├── DATA2NPY ├── dicom2npy.py └── nii2npy.py ├── DiceLossLayer ├── dice_loss_layer.cpp └── dice_loss_layer.hpp ├── OrganSegRSTN ├── Crop.py ├── Crop_old.py ├── Data.py ├── Uncrop.py ├── _fast_functions.so ├── coarse2fine_testing.py ├── coarse_fusion.py ├── coarse_testing.py ├── fast_functions.py ├── indiv_training.py ├── init.py ├── joint_training.py ├── oracle_fusion.py ├── oracle_testing.py ├── prototxts │ ├── deploy_C3.prototxt │ ├── deploy_F3.prototxt │ ├── deploy_O3.prototxt │ ├── training_I3x10.prototxt │ ├── training_J3x10.prototxt │ └── training_S3x10.prototxt ├── run.sh ├── surgery.py └── utils.py ├── README.md ├── icon.png └── logs ├── FD0%3AXI3_1_20180715_091940.txt ├── FD0%3AXJ3_1_20180715_203512.txt ├── FD0%3AYI3_1_20180715_091940.txt ├── FD0%3AYJ3_1_20180715_203512.txt ├── FD0%3AZI3_1_20180715_091940.txt ├── FD0%3AZJ3_1_20180715_203512.txt ├── FD1%3AXI3_1_20180715_091943.txt ├── FD1%3AXJ3_1_20180715_203131.txt ├── FD1%3AYI3_1_20180715_091943.txt ├── FD1%3AYJ3_1_20180715_203131.txt ├── FD1%3AZI3_1_20180715_091943.txt ├── FD1%3AZJ3_1_20180715_203131.txt ├── FD2%3AXI3_1_20180715_091946.txt ├── FD2%3AXJ3_1_20180715_202402.txt ├── FD2%3AYI3_1_20180715_091946.txt ├── FD2%3AYJ3_1_20180715_202402.txt ├── FD2%3AZI3_1_20180715_091946.txt ├── FD2%3AZJ3_1_20180715_202402.txt ├── FD3%3AXI3_1_20180715_091948.txt ├── FD3%3AXJ3_1_20180715_202726.txt ├── FD3%3AYI3_1_20180715_091948.txt ├── FD3%3AYJ3_1_20180715_202726.txt ├── FD3%3AZI3_1_20180715_091948.txt └── FD3%3AZJ3_1_20180715_202726.txt /DATA2NPY/dicom2npy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import dicom 4 | 5 | 6 | N = 82 7 | W = 512 8 | H = 512 9 | path1 = 'DOI' 10 | path2 = 'images' 11 | if not os.path.exists(path2): 12 | os.makedirs(path2) 13 | 14 | for n in range(N): 15 | volumeID = '{:0>4}'.format(n + 1) 16 | print 'Processing File ' + volumeID 17 | filename1 = 'PANCREAS_' + volumeID 18 | directory1 = os.path.join(path1, filename1) 19 | filename2 = volumeID + '.npy' 20 | for path_, _, file_ in os.walk(directory1): 21 | L = len(file_) 22 | if L > 0: 23 | print ' ' + str(L) + ' slices along the axial view.' 24 | data = np.zeros((W, H, L), dtype = np.int16) 25 | for f in sorted(file_): 26 | file1 = os.path.abspath(os.path.join(path_, f)) 27 | image = dicom.read_file(file1) 28 | sliceID = image.data_element("InstanceNumber").value - 1 29 | if image.pixel_array.shape[0] <> 512 or image.pixel_array.shape[1] <> 512: 30 | exit(' Error: DICOM image does not fit ' + str(W) + 'x' + str(H) + ' size!') 31 | data[:, :, sliceID] = image.pixel_array 32 | file2 = os.path.join(path2, filename2) 33 | np.save(file2, data) 34 | print 'File ' + volumeID + ' is saved in ' + file2 + ' .' 35 | 36 | -------------------------------------------------------------------------------- /DATA2NPY/nii2npy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import nibabel 4 | 5 | 6 | N = 82 7 | W = 512 8 | H = 512 9 | path1 = 'TCIA_pancreas_labels-02-05-2017' 10 | path2 = 'labels' 11 | if not os.path.exists(path2): 12 | os.makedirs(path2) 13 | 14 | for n in range(N): 15 | volumeID = '{:0>4}'.format(n + 1) 16 | print 'Processing File ' + volumeID 17 | filename1 = 'label' + volumeID + '.nii.gz' 18 | directory1 = os.path.join(path1, filename1) 19 | filename2 = volumeID + '.npy' 20 | file1 = os.path.join(path1, filename1) 21 | data = nibabel.load(file1).get_data().transpose(1, 0, 2) 22 | print ' Data shape is ' + str(data.shape) + ' .' 23 | file2 = os.path.join(path2, filename2) 24 | np.save(file2, data) 25 | print 'File ' + volumeID + ' is saved in ' + file2 + ' .' 26 | 27 | -------------------------------------------------------------------------------- /DiceLossLayer/dice_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layers/dice_loss_layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | 8 | #define FLT_EPSILON 0.000001 9 | 10 | namespace caffe { 11 | 12 | template 13 | void DiceLossLayer::LayerSetUp( 14 | const vector*>& bottom, const vector*>& top) { 15 | LossLayer::LayerSetUp(bottom, top); 16 | sigmoid_bottom_vec_.clear(); 17 | sigmoid_bottom_vec_.push_back(bottom[0]); 18 | sigmoid_top_vec_.clear(); 19 | sigmoid_top_vec_.push_back(sigmoid_output_.get()); 20 | sigmoid_layer_->SetUp(sigmoid_bottom_vec_, sigmoid_top_vec_); 21 | } 22 | 23 | template 24 | void DiceLossLayer::Reshape( 25 | const vector*>& bottom, const vector*>& top) { 26 | LossLayer::Reshape(bottom, top); 27 | CHECK_EQ(bottom[0]->count(), bottom[1]->count()) << 28 | "DICE_LOSS layer inputs must have the same count."; 29 | sigmoid_layer_->Reshape(sigmoid_bottom_vec_, sigmoid_top_vec_); 30 | } 31 | 32 | template 33 | void DiceLossLayer::Forward_cpu( 34 | const vector*>& bottom, const vector*>& top) { 35 | sigmoid_bottom_vec_[0] = bottom[0]; 36 | sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); 37 | const int count = bottom[0]->count(); 38 | const Dtype* target = bottom[1]->cpu_data(); 39 | Dtype loss = 1; 40 | Dtype up = (Dtype) FLT_EPSILON; 41 | Dtype down = (Dtype) FLT_EPSILON; 42 | const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); 43 | for (int i = 0; i < count; ++i) { 44 | up += 2 * target[i] * sigmoid_output_data[i]; 45 | down += target[i] + sigmoid_output_data[i]; 46 | } 47 | loss -= up / down; 48 | top[0]->mutable_cpu_data()[0] = loss; 49 | } 50 | 51 | template 52 | void DiceLossLayer::Backward_cpu( 53 | const vector*>& top, const vector& propagate_down, 54 | const vector*>& bottom) { 55 | if (propagate_down[1]) { 56 | LOG(FATAL) << this->type() 57 | << " Layer cannot backpropagate to label inputs."; 58 | } 59 | if (propagate_down[0]) { 60 | const int count = bottom[0]->count(); 61 | const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); 62 | const Dtype* target = bottom[1]->cpu_data(); 63 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 64 | Dtype intersection = 0; 65 | Dtype Union = 0; 66 | for (int i = 0; i < count; ++i) { 67 | intersection += target[i] * sigmoid_output_data[i]; 68 | Union += target[i] + sigmoid_output_data[i]; 69 | } 70 | Dtype down = (Union + (Dtype) FLT_EPSILON) * (Union + (Dtype) FLT_EPSILON); 71 | for (int i = 0; i < count; ++i) { 72 | Dtype up = 2 * target[i] * (Union + (Dtype) FLT_EPSILON) - 73 | 2 * intersection - (Dtype) FLT_EPSILON; 74 | bottom_diff[i] = - (up / down) * sigmoid_output_data[i] * 75 | (1 - sigmoid_output_data[i]); 76 | } 77 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 78 | caffe_scal(count, loss_weight, bottom_diff); 79 | } 80 | } 81 | 82 | #ifdef CPU_ONLY 83 | // STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); 84 | #endif 85 | 86 | INSTANTIATE_CLASS(DiceLossLayer); 87 | REGISTER_LAYER_CLASS(DiceLoss); 88 | 89 | } // namespace caffe 90 | -------------------------------------------------------------------------------- /DiceLossLayer/dice_loss_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DICE_LOSS_LAYER_HPP_ 2 | #define CAFFE_DICE_LOSS_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/loss_layer.hpp" 11 | #include "caffe/layers/sigmoid_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | 16 | template 17 | class DiceLossLayer : public LossLayer { 18 | public: 19 | explicit DiceLossLayer(const LayerParameter& param) 20 | : LossLayer(param), 21 | sigmoid_layer_(new SigmoidLayer(param)), 22 | sigmoid_output_(new Blob()) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "DiceLoss"; } 29 | 30 | protected: 31 | virtual void Forward_cpu(const vector*>& bottom, 32 | const vector*>& top); 33 | 34 | virtual void Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom); 36 | 37 | shared_ptr > sigmoid_layer_; 38 | shared_ptr > sigmoid_output_; 39 | vector*> sigmoid_bottom_vec_; 40 | vector*> sigmoid_top_vec_; 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_DICE_LOSS_HPP_ 46 | -------------------------------------------------------------------------------- /OrganSegRSTN/Crop.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | import random 4 | 5 | 6 | class CropLayer(caffe.Layer): 7 | 8 | def setup(self, bottom, top): 9 | self.margin = 0 10 | self.prob = 0 11 | self.batch = 0 12 | self.left = 0 13 | self.right = 0 14 | self.top = 0 15 | self.bottom = 0 16 | params = eval(self.param_str) 17 | self.TEST = params["TEST"] 18 | 19 | 20 | def reshape(self, bottom, top): 21 | (N, C, W, H) = bottom[0].data.shape 22 | data = bottom[1].data 23 | binary_mask = (bottom[0].data >= 0.5).astype(np.uint8) 24 | if len(bottom) == 6 and np.sum(binary_mask) == 0: 25 | binary_mask = (bottom[5].data >= 0.5).astype(np.uint8) 26 | self.margin = int(bottom[2].data) 27 | self.prob = float(bottom[3].data) 28 | self.batch = int(bottom[4].data) 29 | if self.TEST == 1: 30 | self.left = self.margin 31 | self.right = self.margin 32 | self.top = self.margin 33 | self.bottom = self.margin 34 | else: 35 | self.update_margin() 36 | if np.sum(binary_mask) == 0: 37 | minA = 0 38 | maxA = W 39 | minB = 0 40 | maxB = H 41 | self.no_forward = True 42 | else: 43 | if N > 1: 44 | mask = np.zeros(shape = (N, C, W, H)) 45 | for n in range(N): 46 | cur_mask = binary_mask[n, :, :, :] 47 | arr = np.nonzero(cur_mask) 48 | minA = min(arr[1]) 49 | maxA = max(arr[1]) 50 | minB = min(arr[2]) 51 | maxB = max(arr[2]) 52 | bbox = [int(max(minA - self.left, 0)), int(min(maxA + self.right + 1, W)), \ 53 | int(max(minB - self.top, 0)), int(min(maxB + self.bottom + 1, H))] 54 | mask[n, :, bbox[0]: bbox[1], bbox[2]: bbox[3]] = 1 55 | data = data * mask 56 | 57 | arr = np.nonzero(binary_mask) 58 | minA = min(arr[2]) 59 | maxA = max(arr[2]) 60 | minB = min(arr[3]) 61 | maxB = max(arr[3]) 62 | self.no_forward = False 63 | self.bbox = [int(max(minA - self.left, 0)), int(min(maxA + self.right + 1, W)), \ 64 | int(max(minB - self.top, 0)), int(min(maxB + self.bottom + 1, H))] 65 | self.cropped_image = data[:, :, self.bbox[0]: self.bbox[1], \ 66 | self.bbox[2]: self.bbox[3]].copy().astype(np.float32) 67 | top[0].reshape(*self.cropped_image.shape) 68 | top[1].reshape(1, 2, 4) 69 | 70 | 71 | def forward(self, bottom, top): 72 | if self.no_forward == True and self.TEST == 1: 73 | top[0].data[...] = 0. 74 | else: 75 | top[0].data[...] = self.cropped_image 76 | top[1].data[...] = np.zeros((1, 2, 4), dtype = np.int16) 77 | top[1].data[0][0] = self.bbox 78 | top[1].data[0][1] = bottom[0].data.shape 79 | 80 | 81 | def backward(self, top, propagate_down, bottom): 82 | diff = np.zeros(bottom[0].data.shape) 83 | diff[:, :, self.bbox[0]: self.bbox[1], self.bbox[2]: self.bbox[3]] = top[0].diff 84 | bottom[1].diff[...] = diff 85 | 86 | 87 | def update_margin(self): 88 | MAX_INT = 256 89 | if random.randint(0, MAX_INT - 1) >= MAX_INT * self.prob: 90 | self.left = self.margin 91 | self.right = self.margin 92 | self.top = self.margin 93 | self.bottom = self.margin 94 | else: 95 | a = np.zeros(self.batch * 4, dtype = np.uint8) 96 | for i in range(self.batch * 4): 97 | a[i] = random.randint(0, self.margin * 2) 98 | self.left = int(a[0: self.batch].sum() / self.batch) 99 | self.right = int(a[self.batch: self.batch * 2].sum() / self.batch) 100 | self.top = int(a[self.batch * 2: self.batch * 3].sum() / self.batch) 101 | self.bottom = int(a[self.batch * 3: self.batch * 4].sum() / self.batch) 102 | -------------------------------------------------------------------------------- /OrganSegRSTN/Crop_old.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | import random 4 | 5 | 6 | class CropLayer(caffe.Layer): 7 | 8 | def setup(self, bottom, top): 9 | self.margin = 0 10 | self.prob = 0 11 | self.batch = 0 12 | self.left = 0 13 | self.right = 0 14 | self.top = 0 15 | self.bottom = 0 16 | params = eval(self.param_str) 17 | self.TEST = params["TEST"] 18 | 19 | 20 | def reshape(self, bottom, top): 21 | (N, C, W, H) = bottom[0].data.shape 22 | data = bottom[1].data 23 | binary_mask = (bottom[0].data >= 0.5).astype(np.uint8) 24 | if len(bottom) == 6 and np.sum(binary_mask) == 0: 25 | binary_mask = (bottom[5].data >= 0.5).astype(np.uint8) 26 | self.margin = int(bottom[2].data) 27 | self.prob = float(bottom[3].data) 28 | self.batch = int(bottom[4].data) 29 | if self.TEST == 1: 30 | self.left = self.margin 31 | self.right = self.margin 32 | self.top = self.margin 33 | self.bottom = self.margin 34 | else: 35 | self.update_margin() 36 | if np.sum(binary_mask) == 0: 37 | minA = 0 38 | maxA = W 39 | minB = 0 40 | maxB = H 41 | self.no_forward = True 42 | else: 43 | arr = np.nonzero(binary_mask) 44 | minA = min(arr[2]) 45 | maxA = max(arr[2]) 46 | minB = min(arr[3]) 47 | maxB = max(arr[3]) 48 | self.no_forward = False 49 | self.bbox = [int(max(minA - self.left, 0)), int(min(maxA + self.right + 1, W)), \ 50 | int(max(minB - self.top, 0)), int(min(maxB + self.bottom + 1, H))] 51 | self.cropped_image = data[:, :, self.bbox[0]: self.bbox[1], \ 52 | self.bbox[2]: self.bbox[3]].copy().astype(np.float32) 53 | top[0].reshape(*self.cropped_image.shape) 54 | top[1].reshape(1, 2, 4) 55 | 56 | 57 | def forward(self, bottom, top): 58 | if self.no_forward == True and self.TEST == 1: 59 | top[0].data[...] = 0. 60 | else: 61 | top[0].data[...] = self.cropped_image 62 | top[1].data[...] = np.zeros((1, 2, 4), dtype = np.int16) 63 | top[1].data[0][0] = self.bbox 64 | top[1].data[0][1] = bottom[0].data.shape 65 | 66 | 67 | def backward(self, top, propagate_down, bottom): 68 | diff = np.zeros(bottom[0].data.shape) 69 | diff[:, :, self.bbox[0]: self.bbox[1], self.bbox[2]: self.bbox[3]] = top[0].diff 70 | bottom[1].diff[...] = diff 71 | 72 | 73 | def update_margin(self): 74 | MAX_INT = 256 75 | if random.randint(0, MAX_INT - 1) >= MAX_INT * self.prob: 76 | self.left = self.margin 77 | self.right = self.margin 78 | self.top = self.margin 79 | self.bottom = self.margin 80 | else: 81 | a = np.zeros(self.batch * 4, dtype = np.uint8) 82 | for i in range(self.batch * 4): 83 | a[i] = random.randint(0, self.margin * 2) 84 | self.left = int(a[0: self.batch].sum() / self.batch) 85 | self.right = int(a[self.batch: self.batch * 2].sum() / self.batch) 86 | self.top = int(a[self.batch * 2: self.batch * 3].sum() / self.batch) 87 | self.bottom = int(a[self.batch * 3: self.batch * 4].sum() / self.batch) 88 | -------------------------------------------------------------------------------- /OrganSegRSTN/Data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | import math 4 | import caffe 5 | import random 6 | from PIL import Image 7 | from joint_training import * 8 | 9 | 10 | sys.path.insert(0, CAFFE_root + 'python') 11 | 12 | 13 | class DataLayer(caffe.Layer): 14 | 15 | def setup(self, bottom, top): 16 | self.random = True 17 | image_list = open(training_set_filename(current_fold), 'r').read().splitlines() 18 | self.training_image_set = np.zeros((len(image_list)), dtype = np.int) 19 | for i in range(len(image_list)): 20 | s = image_list[i].split(' ') 21 | self.training_image_set[i] = int(s[0]) 22 | slice_list = open(list_training[plane], 'r').read().splitlines() 23 | self.slices = len(slice_list) 24 | self.image_ID = np.zeros((self.slices), dtype = np.int) 25 | self.slice_ID = np.zeros((self.slices), dtype = np.int) 26 | self.image_filename = ['' for l in range(self.slices)] 27 | self.label_filename = ['' for l in range(self.slices)] 28 | self.average = np.zeros((self.slices)) 29 | self.pixels = np.zeros((self.slices), dtype = np.int) 30 | for l in range(self.slices): 31 | s = slice_list[l].split(' ') 32 | self.image_ID[l] = s[0] 33 | self.slice_ID[l] = s[1] 34 | self.image_filename[l] = s[2] 35 | self.label_filename[l] = s[3] 36 | self.average[l] = float(s[4]) 37 | self.pixels[l] = int(s[organ_ID * 5]) 38 | if slice_threshold <= 1: 39 | pixels_index = sorted(range(self.slices), key = lambda l: self.pixels[l]) 40 | last_index = int(math.floor((self.pixels > 0).sum() * slice_threshold)) 41 | min_pixels = self.pixels[pixels_index[-last_index]] 42 | else: 43 | min_pixels = slice_threshold 44 | self.active_index = [l for l, p in enumerate(self.pixels) if p >= min_pixels] 45 | self.index_ = -1 46 | self.next_slice_index() 47 | 48 | 49 | def next_slice_index(self): 50 | while True: 51 | if self.random: 52 | self.index_ = random.randint(0, len(self.active_index) - 1) 53 | else: 54 | self.index_ += 1 55 | if self.index_ == len(self.active_index): 56 | self.index_ = 0 57 | self.index1 = self.active_index[self.index_] 58 | if self.image_ID[self.index1] in self.training_image_set: 59 | break 60 | self.index0 = self.index1 - 1 61 | if self.index1 == 0 or self.slice_ID[self.index0] <> self.slice_ID[self.index1] - 1: 62 | self.index0 = self.index1 63 | self.index2 = self.index1 + 1 64 | if self.index1 == self.slices - 1 or \ 65 | self.slice_ID[self.index2] <> self.slice_ID[self.index1] + 1: 66 | self.index2 = self.index1 67 | 68 | 69 | def reshape(self, bottom, top): 70 | self.data, self.label = self.load_data() 71 | top[0].reshape(1, *self.data.shape) 72 | top[1].reshape(1, *self.label.shape) 73 | top[2].reshape((1)) 74 | top[3].reshape((1)) 75 | top[4].reshape((1)) 76 | 77 | 78 | def forward(self, bottom, top): 79 | top[0].data[...] = self.data 80 | top[1].data[...] = self.label 81 | top[2].data[...] = margin 82 | top[3].data[...] = prob 83 | top[4].data[...] = sample_batch 84 | self.next_slice_index() 85 | 86 | 87 | def backward(self, top, propagate_down, bottom): 88 | pass 89 | 90 | 91 | def load_data(self): 92 | if slice_thickness == 1: 93 | image1 = np.load(self.image_filename[self.index1]).astype(np.float32) 94 | label1 = np.load(self.label_filename[self.index1]) 95 | width = label1.shape[0] 96 | height = label1.shape[1] 97 | image = np.repeat(image1.reshape(1, width, height), 3, axis = 0) 98 | label = label1.reshape(1, width, height) 99 | elif slice_thickness == 3: 100 | image0 = np.load(self.image_filename[self.index0]) 101 | width = image0.shape[0] 102 | height = image0.shape[1] 103 | image = np.zeros((3, width, height), dtype = np.float32) 104 | image[0, ...] = image0 105 | image[1, ...] = np.load(self.image_filename[self.index1]) 106 | image[2, ...] = np.load(self.image_filename[self.index2]) 107 | label = np.zeros((3, width, height), dtype = np.uint8) 108 | label[0, ...] = np.load(self.label_filename[self.index0]) 109 | label[1, ...] = np.load(self.label_filename[self.index1]) 110 | label[2, ...] = np.load(self.label_filename[self.index2]) 111 | np.minimum(np.maximum(image, low_range, image), high_range, image) 112 | image -= low_range 113 | image /= (high_range - low_range) 114 | label = is_organ(label, organ_ID).astype(np.uint8) 115 | return image, label 116 | -------------------------------------------------------------------------------- /OrganSegRSTN/Uncrop.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | 4 | 5 | class UncropLayer(caffe.Layer): 6 | 7 | def setup(self, bottom, top): 8 | if len(bottom) == 4: 9 | self.no_forward = True 10 | else: 11 | self.no_forward = False 12 | 13 | 14 | def reshape(self, bottom, top): 15 | top[0].reshape(*bottom[2].data.shape) 16 | 17 | 18 | def forward(self, bottom, top): 19 | if self.no_forward == True and np.sum(bottom[3].data) == 0: 20 | top[0].data[...] = MIN_VALUE 21 | else: 22 | top[0].data[...] = np.ones(bottom[2].data.shape, dtype = np.float32) 23 | top[0].data[...] *= (-9999999) 24 | shape_ = bottom[0].data[0][0].astype(np.int16) 25 | top[0].data[:, :, shape_[0]: shape_[1], shape_[2]: shape_[3]] = \ 26 | bottom[1].data.astype(np.float32) 27 | 28 | 29 | def backward(self, top, propagate_down, bottom): 30 | shape_ = bottom[0].data[0][0].astype(np.int16) 31 | bottom[1].diff[...] = top[0].diff[:, :, shape_[0]: shape_[1], shape_[2]: shape_[3]] 32 | -------------------------------------------------------------------------------- /OrganSegRSTN/_fast_functions.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/198808xc/OrganSegRSTN/e78e44c5ed85e5871a6a74dc7d2845de166908f7/OrganSegRSTN/_fast_functions.so -------------------------------------------------------------------------------- /OrganSegRSTN/coarse2fine_testing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import shutil 5 | import time 6 | import caffe 7 | from utils import * 8 | import scipy.io 9 | 10 | 11 | data_path = sys.argv[1] 12 | current_fold = int(sys.argv[2]) 13 | organ_number = int(sys.argv[3]) 14 | low_range = int(sys.argv[4]) 15 | high_range = int(sys.argv[5]) 16 | slice_threshold = float(sys.argv[6]) 17 | slice_thickness = int(sys.argv[7]) 18 | organ_ID = int(sys.argv[8]) 19 | GPU_ID = int(sys.argv[9]) 20 | learning_rate1 = float(sys.argv[10]) 21 | learning_rate_m1 = int(sys.argv[11]) 22 | learning_rate2 = float(sys.argv[12]) 23 | learning_rate_m2 = int(sys.argv[13]) 24 | margin = int(sys.argv[14]) 25 | fine_snapshot_path1 = os.path.join(snapshot_path, 'indiv:' + \ 26 | sys.argv[10] + 'x' + str(learning_rate_m1)) 27 | fine_snapshot_path2 = os.path.join(snapshot_path, 'joint:' + \ 28 | sys.argv[10] + 'x' + str(learning_rate_m1) + ',' + \ 29 | sys.argv[12] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 30 | coarse_result_path = os.path.join(result_path, 'coarse:' + \ 31 | sys.argv[10] + 'x' + str(learning_rate_m1) + ',' + \ 32 | sys.argv[12] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 33 | coarse2fine_result_path = os.path.join(result_path, 'coarse2fine:' + \ 34 | sys.argv[10] + 'x' + str(learning_rate_m1) + ',' + \ 35 | sys.argv[12] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 36 | coarse_starting_iterations = int(sys.argv[15]) 37 | coarse_step = int(sys.argv[16]) 38 | coarse_max_iterations = int(sys.argv[17]) 39 | coarse_iteration = range(coarse_starting_iterations, coarse_max_iterations + 1, coarse_step) 40 | coarse_threshold = float(sys.argv[18]) 41 | fine_starting_iterations = int(sys.argv[19]) 42 | fine_step = int(sys.argv[20]) 43 | fine_max_iterations = int(sys.argv[21]) 44 | fine_iteration = range(fine_starting_iterations, fine_max_iterations + 1, fine_step) 45 | fine_threshold = float(sys.argv[22]) 46 | max_rounds = int(sys.argv[23]) 47 | timestamp1 = {} 48 | timestamp1['X'] = sys.argv[24] 49 | timestamp1['Y'] = sys.argv[25] 50 | timestamp1['Z'] = sys.argv[26] 51 | timestamp2 = {} 52 | timestamp2['X'] = sys.argv[27] 53 | timestamp2['Y'] = sys.argv[28] 54 | timestamp2['Z'] = sys.argv[29] 55 | volume_list = open(testing_set_filename(current_fold), 'r').read().splitlines() 56 | while volume_list[len(volume_list) - 1] == '': 57 | volume_list.pop() 58 | 59 | print 'Looking for snapshots:' 60 | fine_snapshot_ = {} 61 | fine_snapshot_name_ = {} 62 | for plane in ['X', 'Y', 'Z']: 63 | [fine_snapshot_name1, fine_snapshot_name2] = snapshot_name_from_timestamp_2( \ 64 | fine_snapshot_path1, fine_snapshot_path2, current_fold, plane, 'I', 'J', \ 65 | slice_thickness, organ_ID, fine_iteration, timestamp1[plane], timestamp2[plane]) 66 | if fine_snapshot_name1 == '' or fine_snapshot_name2 == '': 67 | exit(' Error: no valid snapshot directories are detected!') 68 | fine_snapshot_directory1 = os.path.join(fine_snapshot_path1, fine_snapshot_name1) 69 | print ' Snapshot directory 1 for plane ' + plane + ': ' + fine_snapshot_directory1 + ' .' 70 | fine_snapshot_directory2 = os.path.join(fine_snapshot_path2, fine_snapshot_name2) 71 | print ' Snapshot directory 2 for plane ' + plane + ': ' + fine_snapshot_directory2 + ' .' 72 | fine_snapshot = [] 73 | for t in range(len(fine_iteration)): 74 | fine_snapshot_file1 = snapshot_filename(fine_snapshot_directory1, fine_iteration[t]) 75 | fine_snapshot_file2 = snapshot_filename(fine_snapshot_directory2, fine_iteration[t]) 76 | if os.path.isfile(fine_snapshot_file1): 77 | fine_snapshot.append(fine_snapshot_file1) 78 | else: 79 | fine_snapshot.append(fine_snapshot_file2) 80 | print ' ' + str(len(fine_snapshot)) + ' snapshots are to be evaluated.' 81 | for t in range(len(fine_snapshot)): 82 | print ' Snapshot #' + str(t + 1) + ': ' + fine_snapshot[t] + ' .' 83 | fine_snapshot_[plane] = fine_snapshot 84 | fine_snapshot_name2 = fine_snapshot_name2.split(':')[1] 85 | fine_snapshot_name_[plane] = fine_snapshot_name2.split(',') 86 | timestamp1[plane] = fine_snapshot_name_[plane][0][-15: ] 87 | timestamp2[plane] = fine_snapshot_name_[plane][1][-15: ] 88 | 89 | print 'In the coarse stage:' 90 | coarse_result_name_ = {} 91 | coarse_result_directory_ = {} 92 | for plane in ['X', 'Y', 'Z']: 93 | coarse_result_name__ = result_name_from_timestamp_2(coarse_result_path, \ 94 | current_fold, plane, 'I', 'J', slice_thickness, organ_ID, \ 95 | coarse_iteration, volume_list, timestamp1[plane], timestamp2[plane]) 96 | if coarse_result_name__ == '': 97 | exit(' Error: no valid result directories are detected!') 98 | coarse_result_directory__ = os.path.join(coarse_result_path, coarse_result_name__, 'volumes') 99 | print ' Result directory for plane ' + plane + ': ' + coarse_result_directory__ + ' .' 100 | if coarse_result_name__.startswith('FD'): 101 | index_ = coarse_result_name__.find(':') 102 | coarse_result_name__ = coarse_result_name__[index_ + 1: ] 103 | coarse_result_name_[plane] = coarse_result_name__ 104 | coarse_result_directory_[plane] = coarse_result_directory__ 105 | 106 | coarse2fine_result_name = 'FD' + str(current_fold) + ':' + \ 107 | fine_snapshot_name_['X'][0] + ',' + fine_snapshot_name_['X'][1] + ',' + \ 108 | fine_snapshot_name_['Y'][0] + ',' + fine_snapshot_name_['Y'][1] + ',' + \ 109 | fine_snapshot_name_['Z'][0] + ',' + fine_snapshot_name_['Z'][1] + ':' + \ 110 | str(coarse_starting_iterations) + '_' + str(coarse_step) + '_' + \ 111 | str(coarse_max_iterations) + ',' + str(coarse_threshold) + ':' + \ 112 | str(fine_starting_iterations) + '_' + str(fine_step) + '_' + \ 113 | str(fine_max_iterations) + ',' + str(fine_threshold) + ',' + str(max_rounds) 114 | coarse2fine_result_directory = os.path.join( \ 115 | coarse2fine_result_path, coarse2fine_result_name, 'volumes') 116 | finished = np.ones((len(volume_list)), dtype = np.int) 117 | for i in range(len(volume_list)): 118 | for r in range(max_rounds + 1): 119 | volume_file = volume_filename_coarse2fine(coarse2fine_result_directory, r, i) 120 | if not os.path.isfile(volume_file): 121 | finished[i] = 0 122 | break 123 | finished_all = (finished.sum() == len(volume_list)) 124 | if finished_all: 125 | exit() 126 | else: 127 | deploy_filename = 'deploy_' + 'F' + str(slice_thickness) + '.prototxt' 128 | deploy_file = os.path.join(prototxt_path, deploy_filename) 129 | deploy_file_ = os.path.join('prototxts', deploy_filename) 130 | shutil.copyfile(deploy_file_, deploy_file) 131 | 132 | sys.path.insert(0, os.path.join(CAFFE_root, 'python')) 133 | caffe.set_device(GPU_ID) 134 | caffe.set_mode_gpu() 135 | net_ = {} 136 | for plane in ['X', 'Y', 'Z']: 137 | net_[plane] = [] 138 | for t in range(len(fine_iteration)): 139 | net = caffe.Net(deploy_file, fine_snapshot_[plane][t], caffe.TEST) 140 | net_[plane].append(net) 141 | DSC = np.zeros((max_rounds + 1, len(volume_list))) 142 | DSC_90 = np.zeros((len(volume_list))) 143 | DSC_95 = np.zeros((len(volume_list))) 144 | DSC_98 = np.zeros((len(volume_list))) 145 | DSC_99 = np.zeros((len(volume_list))) 146 | coarse2fine_result_directory = os.path.join(coarse2fine_result_path, \ 147 | coarse2fine_result_name, 'volumes') 148 | if not os.path.exists(coarse2fine_result_directory): 149 | os.makedirs(coarse2fine_result_directory) 150 | coarse2fine_result_file = os.path.join(coarse2fine_result_path, \ 151 | coarse2fine_result_name, 'results.txt') 152 | output = open(coarse2fine_result_file, 'w') 153 | output.close() 154 | output = open(coarse2fine_result_file, 'a+') 155 | output.write('Fusing results of ' + str(len(coarse_iteration)) + \ 156 | ' and ' + str(len(fine_iteration)) + ' snapshots:\n') 157 | output.close() 158 | 159 | for i in range(len(volume_list)): 160 | start_time = time.time() 161 | print 'Testing ' + str(i + 1) + ' out of ' + str(len(volume_list)) + ' testcases.' 162 | output = open(coarse2fine_result_file, 'a+') 163 | output.write(' Testcase ' + str(i + 1) + ':\n') 164 | output.close() 165 | s = volume_list[i].split(' ') 166 | label = np.load(s[2]) 167 | label = is_organ(label, organ_ID).astype(np.uint8) 168 | finished = True 169 | for r in range(max_rounds + 1): 170 | volume_file = volume_filename_coarse2fine(coarse2fine_result_directory, r, i) 171 | if not os.path.isfile(volume_file): 172 | finished = False 173 | break 174 | if not finished: 175 | image = np.load(s[1]).astype(np.float32) 176 | np.minimum(np.maximum(image, low_range, image), high_range, image) 177 | image -= low_range 178 | image /= (high_range - low_range) 179 | imageX = image 180 | imageY = image.transpose(1, 0, 2).copy() 181 | imageZ = image.transpose(2, 0, 1).copy() 182 | print ' Data loading is finished: ' + str(time.time() - start_time) + ' second(s) elapsed.' 183 | for r in range(max_rounds + 1): 184 | print ' Iteration round ' + str(r) + ':' 185 | volume_file = volume_filename_coarse2fine(coarse2fine_result_directory, r, i) 186 | if not finished: 187 | if r == 0: 188 | pred_ = np.zeros(label.shape, dtype = np.float32) 189 | for plane in ['X', 'Y', 'Z']: 190 | for t in range(len(coarse_iteration)): 191 | volume_file_ = volume_filename_testing( \ 192 | coarse_result_directory_[plane], coarse_iteration[t], i) 193 | pred_ += np.load(volume_file_)['volume'] 194 | pred_ /= (255 * len(coarse_iteration) * 3) 195 | print ' Fusion is finished: ' + \ 196 | str(time.time() - start_time) + ' second(s) elapsed.' 197 | else: 198 | mask_sumX = np.sum(mask, axis = (1, 2)) 199 | if mask_sumX.sum() == 0: 200 | continue 201 | mask_sumY = np.sum(mask, axis = (0, 2)) 202 | mask_sumZ = np.sum(mask, axis = (0, 1)) 203 | scoreX = score 204 | scoreY = score.transpose(1, 0, 2).copy() 205 | scoreZ = score.transpose(2, 0, 1).copy() 206 | maskX = mask 207 | maskY = mask.transpose(1, 0, 2).copy() 208 | maskZ = mask.transpose(2, 0, 1).copy() 209 | pred_ = np.zeros(label.shape, dtype = np.float32) 210 | for plane in ['X', 'Y', 'Z']: 211 | for t in range(len(fine_iteration)): 212 | net = net_[plane][t] 213 | minR = 0 214 | if plane == 'X': 215 | maxR = label.shape[0] 216 | shape_ = (1, 3, image.shape[1], image.shape[2]) 217 | pred__ = np.zeros((image.shape[0], image.shape[1], image.shape[2]), \ 218 | dtype = np.float32) 219 | elif plane == 'Y': 220 | maxR = label.shape[1] 221 | shape_ = (1, 3, image.shape[0], image.shape[2]) 222 | pred__ = np.zeros((image.shape[1], image.shape[0], image.shape[2]), \ 223 | dtype = np.float32) 224 | elif plane == 'Z': 225 | maxR = label.shape[2] 226 | shape_ = (1, 3, image.shape[0], image.shape[1]) 227 | pred__ = np.zeros((image.shape[2], image.shape[0], image.shape[1]), \ 228 | dtype = np.float32) 229 | first = True 230 | for j in range(minR, maxR): 231 | if slice_thickness == 1: 232 | sID = [j, j, j] 233 | elif slice_thickness == 3: 234 | sID = [max(minR, j - 1), j, min(maxR - 1, j + 1)] 235 | if (plane == 'X' and mask_sumX[sID].sum() == 0) or \ 236 | (plane == 'Y' and mask_sumY[sID].sum() == 0) or \ 237 | (plane == 'Z' and mask_sumZ[sID].sum() == 0): 238 | continue 239 | if first: 240 | net.blobs['data'].reshape(*shape_) 241 | net.blobs['prob'].reshape(*shape_) 242 | net.blobs['label'].reshape(*shape_) 243 | net.blobs['crop_margin'].reshape((1)) 244 | net.blobs['crop_margin'].data[...] = margin 245 | net.blobs['crop_prob'].reshape((1)) 246 | net.blobs['crop_prob'].data[...] = 0 247 | net.blobs['crop_sample_batch'].reshape((1)) 248 | net.blobs['crop_sample_batch'].data[...] = 0 249 | first = False 250 | if plane == 'X': 251 | net.blobs['data'].data[0, ...] = imageX[sID, :, :] 252 | net.blobs['prob'].data[0, ...] = scoreX[sID, :, :] 253 | net.blobs['label'].data[0, ...] = maskX[sID, :, :] 254 | elif plane == 'Y': 255 | net.blobs['data'].data[0, ...] = imageY[sID, :, :] 256 | net.blobs['prob'].data[0, ...] = scoreY[sID, :, :] 257 | net.blobs['label'].data[0, ...] = maskY[sID, :, :] 258 | elif plane == 'Z': 259 | net.blobs['data'].data[0, ...] = imageZ[sID, :, :] 260 | net.blobs['prob'].data[0, ...] = scoreZ[sID, :, :] 261 | net.blobs['label'].data[0, ...] = maskZ[sID, :, :] 262 | net.forward() 263 | out = net.blobs['prob-R'].data[0, :, :, :] 264 | if slice_thickness == 1: 265 | pred__[j, :, :] = out 266 | elif slice_thickness == 3: 267 | if j == minR: 268 | pred__[minR: minR + 2, :, :] += out[1: 3, :, :] 269 | elif j == maxR - 1: 270 | pred__[maxR - 2: maxR, :, :] += out[0: 2, :, :] 271 | else: 272 | pred__[j - 1: j + 2, :, :] += out 273 | if slice_thickness == 3: 274 | pred__[minR, :, :] /= 2 275 | pred__[minR + 1: maxR - 1, :, :] /= 3 276 | pred__[maxR - 1, :, :] /= 2 277 | print ' Testing on plane ' + plane + ' and snapshot ' + str(t + 1) + \ 278 | ' is finished: ' + str(time.time() - start_time) + \ 279 | ' second(s) elapsed.' 280 | if plane == 'X': 281 | pred_ += pred__ 282 | elif plane == 'Y': 283 | pred_ += pred__.transpose(1, 0, 2) 284 | elif plane == 'Z': 285 | pred_ += pred__.transpose(1, 2, 0) 286 | pred__ = None 287 | pred_ /= (len(fine_iteration) * 3) 288 | print ' Testing is finished: ' + \ 289 | str(time.time() - start_time) + ' second(s) elapsed.' 290 | pred = (pred_ >= fine_threshold).astype(np.uint8) 291 | if r > 0: 292 | pred = post_processing(pred, pred, 0.5, organ_ID) 293 | np.savez_compressed(volume_file, volume = pred) 294 | else: 295 | pred = np.load(volume_file)['volume'].astype(np.uint8) 296 | print ' Testing result is loaded: ' + \ 297 | str(time.time() - start_time) + ' second(s) elapsed.' 298 | DSC[r, i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred) 299 | print ' DSC = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 300 | str(label_sum) + ') = ' + str(DSC[r, i]) + ' .' 301 | output = open(coarse2fine_result_file, 'a+') 302 | output.write(' Round ' + str(r) + ', ' + 'DSC = 2 * ' + str(inter_sum) + ' / (' + \ 303 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC[r, i]) + ' .\n') 304 | output.close() 305 | if pred_sum == 0 and label_sum == 0: 306 | DSC[r, i] = 0 307 | if r > 0: 308 | inter_DSC, inter_sum, pred_sum, label_sum = DSC_computation(mask, pred) 309 | if pred_sum == 0 and label_sum == 0: 310 | inter_DSC = 1 311 | print ' Inter-iteration DSC = 2 * ' + str(inter_sum) + ' / (' + \ 312 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(inter_DSC) + ' .' 313 | output = open(coarse2fine_result_file, 'a+') 314 | output.write(' Inter-iteration DSC = 2 * ' + str(inter_sum) + ' / (' + \ 315 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(inter_DSC) + ' .\n') 316 | output.close() 317 | if DSC_90[i] == 0 and (r == max_rounds or inter_DSC >= 0.90): 318 | DSC_90[i] = DSC[r, i] 319 | if DSC_95[i] == 0 and (r == max_rounds or inter_DSC >= 0.95): 320 | DSC_95[i] = DSC[r, i] 321 | if DSC_98[i] == 0 and (r == max_rounds or inter_DSC >= 0.98): 322 | DSC_98[i] = DSC[r, i] 323 | if DSC_99[i] == 0 and (r == max_rounds or inter_DSC >= 0.99): 324 | DSC_99[i] = DSC[r, i] 325 | if r <= max_rounds: 326 | if not finished: 327 | score = pred_ 328 | mask = pred 329 | for r in range(max_rounds + 1): 330 | print 'Round ' + str(r) + ', ' + 'Average DSC = ' + str(np.mean(DSC[r, :])) + ' .' 331 | output = open(coarse2fine_result_file, 'a+') 332 | output.write('Round ' + str(r) + ', ' + 'Average DSC = ' + str(np.mean(DSC[r, :])) + ' .\n') 333 | output.close() 334 | print 'DSC threshold = 0.90, ' + 'Average DSC = ' + str(np.mean(DSC_90)) + ' .' 335 | print 'DSC threshold = 0.95, ' + 'Average DSC = ' + str(np.mean(DSC_95)) + ' .' 336 | print 'DSC threshold = 0.98, ' + 'Average DSC = ' + str(np.mean(DSC_98)) + ' .' 337 | print 'DSC threshold = 0.99, ' + 'Average DSC = ' + str(np.mean(DSC_99)) + ' .' 338 | output = open(coarse2fine_result_file, 'a+') 339 | output.write('DSC threshold = 0.90, ' + 'Average DSC = ' + str(np.mean(DSC_90)) + ' .\n') 340 | output.write('DSC threshold = 0.95, ' + 'Average DSC = ' + str(np.mean(DSC_95)) + ' .\n') 341 | output.write('DSC threshold = 0.98, ' + 'Average DSC = ' + str(np.mean(DSC_98)) + ' .\n') 342 | output.write('DSC threshold = 0.99, ' + 'Average DSC = ' + str(np.mean(DSC_99)) + ' .\n') 343 | output.close() 344 | print 'The coarse-to-fine testing process is finished.' 345 | -------------------------------------------------------------------------------- /OrganSegRSTN/coarse_fusion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import time 5 | from utils import * 6 | 7 | 8 | data_path = sys.argv[1] 9 | current_fold = int(sys.argv[2]) 10 | organ_number = int(sys.argv[3]) 11 | low_range = int(sys.argv[4]) 12 | high_range = int(sys.argv[5]) 13 | slice_threshold = float(sys.argv[6]) 14 | slice_thickness = int(sys.argv[7]) 15 | organ_ID = int(sys.argv[8]) 16 | GPU_ID = int(sys.argv[9]) 17 | learning_rate1 = float(sys.argv[10]) 18 | learning_rate_m1 = int(sys.argv[11]) 19 | learning_rate2 = float(sys.argv[12]) 20 | learning_rate_m2 = int(sys.argv[13]) 21 | margin = int(sys.argv[14]) 22 | result_path = os.path.join(result_path, 'coarse:' + \ 23 | sys.argv[10] + 'x' + str(learning_rate_m1) + ',' + \ 24 | sys.argv[12] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 25 | starting_iterations = int(sys.argv[15]) 26 | step = int(sys.argv[16]) 27 | max_iterations = int(sys.argv[17]) 28 | iteration = range(starting_iterations, max_iterations + 1, step) 29 | threshold = float(sys.argv[18]) 30 | timestamp1 = {} 31 | timestamp1['X'] = sys.argv[19] 32 | timestamp1['Y'] = sys.argv[20] 33 | timestamp1['Z'] = sys.argv[21] 34 | timestamp2 = {} 35 | timestamp2['X'] = sys.argv[22] 36 | timestamp2['Y'] = sys.argv[23] 37 | timestamp2['Z'] = sys.argv[24] 38 | volume_list = open(testing_set_filename(current_fold), 'r').read().splitlines() 39 | while volume_list[len(volume_list) - 1] == '': 40 | volume_list.pop() 41 | result_name_ = {} 42 | result_directory_ = {} 43 | for plane in ['X', 'Y', 'Z']: 44 | result_name__ = result_name_from_timestamp_2(result_path, \ 45 | current_fold, plane, 'I', 'J', slice_thickness, organ_ID, \ 46 | iteration, volume_list, timestamp1[plane], timestamp2[plane]) 47 | if result_name__ == '': 48 | exit('Error: no valid result directories are detected!') 49 | result_directory__ = os.path.join(result_path, result_name__, 'volumes') 50 | print 'Result directory for plane ' + plane + ': ' + result_directory__ + ' .' 51 | if result_name__.startswith('FD'): 52 | index_ = result_name__.find(':') 53 | result_name__ = result_name__[index_ + 1: ] 54 | result_name_[plane] = result_name__ 55 | result_directory_[plane] = result_directory__ 56 | 57 | DSC_X = np.zeros((len(volume_list))) 58 | DSC_Y = np.zeros((len(volume_list))) 59 | DSC_Z = np.zeros((len(volume_list))) 60 | DSC_F1 = np.zeros((len(volume_list))) 61 | DSC_F2 = np.zeros((len(volume_list))) 62 | DSC_F3 = np.zeros((len(volume_list))) 63 | DSC_F1P = np.zeros((len(volume_list))) 64 | DSC_F2P = np.zeros((len(volume_list))) 65 | DSC_F3P = np.zeros((len(volume_list))) 66 | result_name = 'FD' + str(current_fold) + ':' + 'fusion:' + result_name_['X'] + ',' + \ 67 | result_name_['Y'] + ',' + result_name_['Z'] + ':' + str(starting_iterations) + '_' + \ 68 | str(step) + '_' + str(max_iterations) + ',' + str(threshold) 69 | result_directory = os.path.join(result_path, result_name, 'volumes') 70 | if not os.path.exists(result_directory): 71 | os.makedirs(result_directory) 72 | result_file = os.path.join(result_path, result_name, 'results.txt') 73 | output = open(result_file, 'w') 74 | output.close() 75 | output = open(result_file, 'a+') 76 | output.write('Fusing results of ' + str(len(iteration)) + ' snapshots:\n') 77 | output.close() 78 | for i in range(len(volume_list)): 79 | start_time = time.time() 80 | print 'Testing ' + str(i + 1) + ' out of ' + str(len(volume_list)) + ' testcases.' 81 | output = open(result_file, 'a+') 82 | output.write(' Testcase ' + str(i + 1) + ':\n') 83 | output.close() 84 | s = volume_list[i].split(' ') 85 | label = np.load(s[2]) 86 | label = is_organ(label, organ_ID).astype(np.uint8) 87 | for plane in ['X', 'Y', 'Z']: 88 | volume_file = volume_filename_fusion(result_directory, plane, i) 89 | pred = np.zeros(label.shape, dtype = np.float32) 90 | for t in range(len(iteration)): 91 | volume_file_ = volume_filename_testing(result_directory_[plane], iteration[t], i) 92 | pred += np.load(volume_file_)['volume'] 93 | pred_ = (pred >= threshold * 255 * len(iteration)) 94 | if not os.path.isfile(volume_file): 95 | np.savez_compressed(volume_file, volume = pred_) 96 | DSC_, inter_sum, pred_sum, label_sum = DSC_computation(label, pred_) 97 | print ' DSC_' + plane + ' = 2 * ' + str(inter_sum) + ' / (' + \ 98 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_) + ' .' 99 | output = open(result_file, 'a+') 100 | output.write(' DSC_' + plane + ' = 2 * ' + str(inter_sum) + ' / (' + \ 101 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_) + ' .\n') 102 | output.close() 103 | if pred_sum == 0 and label_sum == 0: 104 | DSC_ = 0 105 | pred /= (255 * len(iteration)) 106 | if plane == 'X': 107 | pred_X = pred 108 | DSC_X[i] = DSC_ 109 | elif plane == 'Y': 110 | pred_Y = pred 111 | DSC_Y[i] = DSC_ 112 | elif plane == 'Z': 113 | pred_Z = pred 114 | DSC_Z[i] = DSC_ 115 | volume_file_F1 = volume_filename_fusion(result_directory, 'F1', i) 116 | volume_file_F2 = volume_filename_fusion(result_directory, 'F2', i) 117 | volume_file_F3 = volume_filename_fusion(result_directory, 'F3', i) 118 | if not os.path.isfile(volume_file_F1) or not os.path.isfile(volume_file_F2) or \ 119 | not os.path.isfile(volume_file_F3): 120 | pred_total = pred_X + pred_Y + pred_Z 121 | if os.path.isfile(volume_file_F1): 122 | pred_F1 = np.load(volume_file_F1)['volume'].astype(np.uint8) 123 | else: 124 | pred_F1 = (pred_total >= 0.5).astype(np.uint8) 125 | np.savez_compressed(volume_file_F1, volume = pred_F1) 126 | DSC_F1[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F1) 127 | print ' DSC_F1 = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' \ 128 | + str(label_sum) + ') = ' + str(DSC_F1[i]) + ' .' 129 | output = open(result_file, 'a+') 130 | output.write(' DSC_F1 = 2 * ' + str(inter_sum) + ' / (' + \ 131 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F1[i]) + ' .\n') 132 | output.close() 133 | if pred_sum == 0 and label_sum == 0: 134 | DSC_F1[i] = 0 135 | if os.path.isfile(volume_file_F2): 136 | pred_F2 = np.load(volume_file_F2)['volume'].astype(np.uint8) 137 | else: 138 | pred_F2 = (pred_total >= 1.5).astype(np.uint8) 139 | np.savez_compressed(volume_file_F2, volume = pred_F2) 140 | DSC_F2[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F2) 141 | print ' DSC_F2 = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 142 | str(label_sum) + ') = ' + str(DSC_F2[i]) + ' .' 143 | output = open(result_file, 'a+') 144 | output.write(' DSC_F2 = 2 * ' + str(inter_sum) + ' / (' + \ 145 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F2[i]) + ' .\n') 146 | output.close() 147 | if pred_sum == 0 and label_sum == 0: 148 | DSC_F2[i] = 0 149 | if os.path.isfile(volume_file_F3): 150 | pred_F3 = np.load(volume_file_F3)['volume'].astype(np.uint8) 151 | else: 152 | pred_F3 = (pred_total >= 2.5).astype(np.uint8) 153 | np.savez_compressed(volume_file_F3, volume = pred_F3) 154 | DSC_F3[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F3) 155 | print ' DSC_F3 = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 156 | str(label_sum) + ') = ' + str(DSC_F3[i]) + ' .' 157 | output = open(result_file, 'a+') 158 | output.write(' DSC_F3 = 2 * ' + str(inter_sum) + ' / (' + \ 159 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F3[i]) + ' .\n') 160 | output.close() 161 | if pred_sum == 0 and label_sum == 0: 162 | DSC_F3[i] = 0 163 | volume_file_F1P = volume_filename_fusion(result_directory, 'F1P', i) 164 | volume_file_F2P = volume_filename_fusion(result_directory, 'F2P', i) 165 | volume_file_F3P = volume_filename_fusion(result_directory, 'F3P', i) 166 | S = pred_F3 167 | if (S.sum() == 0): 168 | S = pred_F2 169 | if (S.sum() == 0): 170 | S = pred_F1 171 | if os.path.isfile(volume_file_F1P): 172 | pred_F1P = np.load(volume_file_F1P)['volume'].astype(np.uint8) 173 | else: 174 | pred_F1P = post_processing(pred_F1, S, 0.5, organ_ID) 175 | np.savez_compressed(volume_file_F1P, volume = pred_F1P) 176 | DSC_F1P[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F1P) 177 | print ' DSC_F1P = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 178 | str(label_sum) + ') = ' + str(DSC_F1P[i]) + ' .' 179 | output = open(result_file, 'a+') 180 | output.write(' DSC_F1P = 2 * ' + str(inter_sum) + ' / (' + \ 181 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F1P[i]) + ' .\n') 182 | output.close() 183 | if pred_sum == 0 and label_sum == 0: 184 | DSC_F1P[i] = 0 185 | if os.path.isfile(volume_file_F2P): 186 | pred_F2P = np.load(volume_file_F2P)['volume'].astype(np.uint8) 187 | else: 188 | pred_F2P = post_processing(pred_F2, S, 0.5, organ_ID) 189 | np.savez_compressed(volume_file_F2P, volume = pred_F2P) 190 | DSC_F2P[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F2P) 191 | print ' DSC_F2P = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 192 | str(label_sum) + ') = ' + str(DSC_F2P[i]) + ' .' 193 | output = open(result_file, 'a+') 194 | output.write(' DSC_F2P = 2 * ' + str(inter_sum) + ' / (' + \ 195 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F2P[i]) + ' .\n') 196 | output.close() 197 | if pred_sum == 0 and label_sum == 0: 198 | DSC_F2P[i] = 0 199 | if os.path.isfile(volume_file_F3P): 200 | pred_F3P = np.load(volume_file_F3P)['volume'].astype(np.uint8) 201 | else: 202 | pred_F3P = post_processing(pred_F3, S, 0.5, organ_ID) 203 | np.savez_compressed(volume_file_F3P, volume = pred_F3P) 204 | DSC_F3P[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F3P) 205 | print ' DSC_F3P = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 206 | str(label_sum) + ') = ' + str(DSC_F3P[i]) + ' .' 207 | output = open(result_file, 'a+') 208 | output.write(' DSC_F3P = 2 * ' + str(inter_sum) + ' / (' + \ 209 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F3P[i]) + ' .\n') 210 | output.close() 211 | if pred_sum == 0 and label_sum == 0: 212 | DSC_F3P[i] = 0 213 | pred_X = None 214 | pred_Y = None 215 | pred_Z = None 216 | pred_F1 = None 217 | pred_F2 = None 218 | pred_F3 = None 219 | pred_F1P = None 220 | pred_F2P = None 221 | pred_F3P = None 222 | output = open(result_file, 'a+') 223 | print 'Average DSC_X = ' + str(np.mean(DSC_X)) + ' .' 224 | output.write('Average DSC_X = ' + str(np.mean(DSC_X)) + ' .\n') 225 | print 'Average DSC_Y = ' + str(np.mean(DSC_Y)) + ' .' 226 | output.write('Average DSC_Y = ' + str(np.mean(DSC_Y)) + ' .\n') 227 | print 'Average DSC_Z = ' + str(np.mean(DSC_Z)) + ' .' 228 | output.write('Average DSC_Z = ' + str(np.mean(DSC_Z)) + ' .\n') 229 | print 'Average DSC_F1 = ' + str(np.mean(DSC_F1)) + ' .' 230 | output.write('Average DSC_F1 = ' + str(np.mean(DSC_F1)) + ' .\n') 231 | print 'Average DSC_F2 = ' + str(np.mean(DSC_F2)) + ' .' 232 | output.write('Average DSC_F2 = ' + str(np.mean(DSC_F2)) + ' .\n') 233 | print 'Average DSC_F3 = ' + str(np.mean(DSC_F3)) + ' .' 234 | output.write('Average DSC_F3 = ' + str(np.mean(DSC_F3)) + ' .\n') 235 | print 'Average DSC_F1P = ' + str(np.mean(DSC_F1P)) + ' .' 236 | output.write('Average DSC_F1P = ' + str(np.mean(DSC_F1P)) + ' .\n') 237 | print 'Average DSC_F2P = ' + str(np.mean(DSC_F2P)) + ' .' 238 | output.write('Average DSC_F2P = ' + str(np.mean(DSC_F2P)) + ' .\n') 239 | print 'Average DSC_F3P = ' + str(np.mean(DSC_F3P)) + ' .' 240 | output.write('Average DSC_F3P = ' + str(np.mean(DSC_F3P)) + ' .\n') 241 | output.close() 242 | print 'The fusion process is finished.' 243 | -------------------------------------------------------------------------------- /OrganSegRSTN/coarse_testing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import shutil 5 | import time 6 | import caffe 7 | from utils import * 8 | 9 | 10 | data_path = sys.argv[1] 11 | current_fold = int(sys.argv[2]) 12 | organ_number = int(sys.argv[3]) 13 | low_range = int(sys.argv[4]) 14 | high_range = int(sys.argv[5]) 15 | slice_threshold = float(sys.argv[6]) 16 | slice_thickness = int(sys.argv[7]) 17 | organ_ID = int(sys.argv[8]) 18 | plane = sys.argv[9] 19 | GPU_ID = int(sys.argv[10]) 20 | learning_rate1 = float(sys.argv[11]) 21 | learning_rate_m1 = int(sys.argv[12]) 22 | learning_rate2 = float(sys.argv[13]) 23 | learning_rate_m2 = int(sys.argv[14]) 24 | margin = int(sys.argv[15]) 25 | prob = float(sys.argv[16]) 26 | sample_batch = int(sys.argv[17]) 27 | snapshot_path1 = os.path.join(snapshot_path, 'indiv:' + \ 28 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + str(margin)) 29 | snapshot_path2 = os.path.join(snapshot_path, 'joint:' + \ 30 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + \ 31 | sys.argv[13] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 32 | result_path = os.path.join(result_path, 'coarse:' + \ 33 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + \ 34 | sys.argv[13] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 35 | max_iterations1 = int(sys.argv[18]) 36 | max_iterations2 = int(sys.argv[19]) 37 | starting_iterations = int(sys.argv[20]) 38 | step = int(sys.argv[21]) 39 | max_iterations = int(sys.argv[22]) 40 | iteration = range(starting_iterations, max_iterations + 1, step) 41 | timestamp1 = sys.argv[23] 42 | timestamp2 = sys.argv[24] 43 | [snapshot_name1, snapshot_name2] = snapshot_name_from_timestamp_2(snapshot_path1, snapshot_path2, \ 44 | current_fold, plane, 'I', 'J', slice_thickness, organ_ID, iteration, timestamp1, timestamp2) 45 | if snapshot_name1 == '' or snapshot_name2 == '': 46 | exit('Error: no valid snapshot directories are detected!') 47 | snapshot_directory1 = os.path.join(snapshot_path1, snapshot_name1) 48 | print 'Snapshot directory 1: ' + snapshot_directory1 + ' .' 49 | snapshot_directory2 = os.path.join(snapshot_path2, snapshot_name2) 50 | print 'Snapshot directory 2: ' + snapshot_directory2 + ' .' 51 | snapshot = [] 52 | for t in range(len(iteration)): 53 | snapshot_file1 = snapshot_filename(snapshot_directory1, iteration[t]) 54 | snapshot_file2 = snapshot_filename(snapshot_directory2, iteration[t]) 55 | if os.path.isfile(snapshot_file1): 56 | snapshot.append(snapshot_file1) 57 | else: 58 | snapshot.append(snapshot_file2) 59 | print str(len(snapshot)) + ' snapshots are to be evaluated.' 60 | for t in range(len(snapshot)): 61 | print ' Snapshot #' + str(t + 1) + ': ' + snapshot[t] + ' .' 62 | result_name = snapshot_name2 63 | 64 | sys.path.insert(0, os.path.join(CAFFE_root, 'python')) 65 | caffe.set_device(GPU_ID) 66 | caffe.set_mode_gpu() 67 | 68 | volume_list = open(testing_set_filename(current_fold), 'r').read().splitlines() 69 | while volume_list[len(volume_list) - 1] == '': 70 | volume_list.pop() 71 | DSC = np.zeros((len(snapshot), len(volume_list))) 72 | result_directory = os.path.join(result_path, result_name, 'volumes') 73 | if not os.path.exists(result_directory): 74 | os.makedirs(result_directory) 75 | result_file = os.path.join(result_path, result_name, 'results.txt') 76 | output = open(result_file, 'w') 77 | output.close() 78 | for t in range(len(snapshot)): 79 | output = open(result_file, 'a+') 80 | output.write('Evaluating snapshot ' + str(iteration[t]) + ':\n') 81 | output.close() 82 | finished = True 83 | for i in range(len(volume_list)): 84 | volume_file = volume_filename_testing(result_directory, iteration[t], i) 85 | if not os.path.isfile(volume_file): 86 | finished = False 87 | break 88 | if not finished: 89 | deploy_filename = 'deploy_' + 'C' + str(slice_thickness) + '.prototxt' 90 | deploy_file = os.path.join(prototxt_path, deploy_filename) 91 | deploy_file_ = os.path.join('prototxts', deploy_filename) 92 | shutil.copyfile(deploy_file_, deploy_file) 93 | net = caffe.Net(deploy_file, snapshot[t], caffe.TEST) 94 | for i in range(len(volume_list)): 95 | start_time = time.time() 96 | print 'Testing ' + str(i + 1) + ' out of ' + str(len(volume_list)) + ' testcases, ' + \ 97 | str(t + 1) + ' out of ' + str(len(snapshot)) + ' snapshots.' 98 | volume_file = volume_filename_testing(result_directory, iteration[t], i) 99 | s = volume_list[i].split(' ') 100 | label = np.load(s[2]) 101 | label = is_organ(label, organ_ID).astype(np.uint8) 102 | if not os.path.isfile(volume_file): 103 | image = np.load(s[1]).astype(np.float32) 104 | np.minimum(np.maximum(image, low_range, image), high_range, image) 105 | image -= low_range 106 | image /= (high_range - low_range) 107 | print ' Data loading is finished: ' + \ 108 | str(time.time() - start_time) + ' second(s) elapsed.' 109 | pred = np.zeros(image.shape, dtype = np.float32) 110 | minR = 0 111 | if plane == 'X': 112 | maxR = image.shape[0] 113 | shape_ = (1, 3, image.shape[1], image.shape[2]) 114 | elif plane == 'Y': 115 | maxR = image.shape[1] 116 | shape_ = (1, 3, image.shape[0], image.shape[2]) 117 | elif plane == 'Z': 118 | maxR = image.shape[2] 119 | shape_ = (1, 3, image.shape[0], image.shape[1]) 120 | for j in range(minR, maxR): 121 | if slice_thickness == 1: 122 | sID = [j, j, j] 123 | elif slice_thickness == 3: 124 | sID = [max(minR, j - 1), j, min(maxR - 1, j + 1)] 125 | if plane == 'X': 126 | net.blobs['data'].reshape(*shape_) 127 | net.blobs['data'].data[0, ...] = image[sID, :, :] 128 | elif plane == 'Y': 129 | net.blobs['data'].reshape(*shape_) 130 | net.blobs['data'].data[0, ...] = image[:, sID, :].transpose(1, 0, 2) 131 | elif plane == 'Z': 132 | net.blobs['data'].reshape(*shape_) 133 | net.blobs['data'].data[0, ...] = image[:, :, sID].transpose(2, 0, 1) 134 | net.forward() 135 | out = net.blobs['prob'].data[0, :, :, :] 136 | if slice_thickness == 1: 137 | if plane == 'X': 138 | pred[j, :, :] = out 139 | elif plane == 'Y': 140 | pred[:, j, :] = out 141 | elif plane == 'Z': 142 | pred[:, :, j] = out 143 | elif slice_thickness == 3: 144 | if plane == 'X': 145 | if j == minR: 146 | pred[j: j + 2, :, :] += out[1: 3, :, :] 147 | elif j == maxR - 1: 148 | pred[j - 1: j + 1, :, :] += out[0: 2, :, :] 149 | else: 150 | pred[j - 1: j + 2, :, :] += out[...] 151 | elif plane == 'Y': 152 | if j == minR: 153 | pred[:, j: j + 2, :] += out[1: 3, :, :].transpose(1, 0, 2) 154 | elif j == maxR - 1: 155 | pred[:, j - 1: j + 1, :] += out[0: 2, :, :].transpose(1, 0, 2) 156 | else: 157 | pred[:, j - 1: j + 2, :] += out[...].transpose(1, 0, 2) 158 | elif plane == 'Z': 159 | if j == minR: 160 | pred[:, :, j: j + 2] += out[1: 3, :, :].transpose(1, 2, 0) 161 | elif j == maxR - 1: 162 | pred[:, :, j - 1: j + 1] += out[0: 2, :, :].transpose(1, 2, 0) 163 | else: 164 | pred[:, :, j - 1: j + 2] += out[...].transpose(1, 2, 0) 165 | if slice_thickness == 3: 166 | if plane == 'X': 167 | pred[minR, :, :] /= 2 168 | pred[minR + 1: maxR - 1, :, :] /= 3 169 | pred[maxR - 1, :, :] /= 2 170 | elif plane == 'Y': 171 | pred[:, minR, :] /= 2 172 | pred[:, minR + 1: maxR - 1, :] /= 3 173 | pred[:, maxR - 1, :] /= 2 174 | elif plane == 'Z': 175 | pred[:, :, minR] /= 2 176 | pred[:, :, minR + 1: maxR - 1] /= 3 177 | pred[:, :, maxR - 1] /= 2 178 | print ' Testing is finished: ' + str(time.time() - start_time) + ' second(s) elapsed.' 179 | pred = np.around(pred * 255).astype(np.uint8) 180 | np.savez_compressed(volume_file, volume = pred) 181 | print ' Data saving is finished: ' + \ 182 | str(time.time() - start_time) + ' second(s) elapsed.' 183 | pred_temp = (pred >= 128) 184 | else: 185 | pred = np.load(volume_file)['volume'].astype(np.uint8) 186 | print ' Testing result is loaded: ' + \ 187 | str(time.time() - start_time) + ' second(s) elapsed.' 188 | pred_temp = (pred >= 128) 189 | DSC[t, i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_temp) 190 | print ' DSC = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + \ 191 | ' + ' + str(label_sum) + ') = ' + str(DSC[t, i]) + ' .' 192 | output = open(result_file, 'a+') 193 | output.write(' Testcase ' + str(i + 1) + ': DSC = 2 * ' + str(inter_sum) + ' / (' + \ 194 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC[t, i]) + ' .\n') 195 | output.close() 196 | if pred_sum == 0 and label_sum == 0: 197 | DSC[t, i] = 0 198 | print ' DSC computation is finished: ' + \ 199 | str(time.time() - start_time) + ' second(s) elapsed.' 200 | print 'Snapshot ' + str(iteration[t]) + ': average DSC = ' + str(np.mean(DSC[t, :])) + ' .' 201 | output = open(result_file, 'a+') 202 | output.write('Snapshot ' + str(iteration[t]) + \ 203 | ': average DSC = ' + str(np.mean(DSC[t, :])) + ' .\n') 204 | output.close() 205 | print 'The testing process is finished.' 206 | for t in range(len(snapshot)): 207 | print ' Snapshot ' + str(iteration[t]) + ': average DSC = ' + str(np.mean(DSC[t, :])) + ' .' 208 | -------------------------------------------------------------------------------- /OrganSegRSTN/fast_functions.py: -------------------------------------------------------------------------------- 1 | # This file was automatically generated by SWIG (http://www.swig.org). 2 | # Version 2.0.11 3 | # 4 | # Do not make changes to this file unless you know what you are doing--modify 5 | # the SWIG interface file instead. 6 | 7 | 8 | 9 | 10 | 11 | from sys import version_info 12 | if version_info >= (2,6,0): 13 | def swig_import_helper(): 14 | from os.path import dirname 15 | import imp 16 | fp = None 17 | try: 18 | fp, pathname, description = imp.find_module('_fast_functions', [dirname(__file__)]) 19 | except ImportError: 20 | import _fast_functions 21 | return _fast_functions 22 | if fp is not None: 23 | try: 24 | _mod = imp.load_module('_fast_functions', fp, pathname, description) 25 | finally: 26 | fp.close() 27 | return _mod 28 | _fast_functions = swig_import_helper() 29 | del swig_import_helper 30 | else: 31 | import _fast_functions 32 | del version_info 33 | try: 34 | _swig_property = property 35 | except NameError: 36 | pass # Python < 2.2 doesn't have 'property'. 37 | def _swig_setattr_nondynamic(self,class_type,name,value,static=1): 38 | if (name == "thisown"): return self.this.own(value) 39 | if (name == "this"): 40 | if type(value).__name__ == 'SwigPyObject': 41 | self.__dict__[name] = value 42 | return 43 | method = class_type.__swig_setmethods__.get(name,None) 44 | if method: return method(self,value) 45 | if (not static): 46 | self.__dict__[name] = value 47 | else: 48 | raise AttributeError("You cannot add attributes to %s" % self) 49 | 50 | def _swig_setattr(self,class_type,name,value): 51 | return _swig_setattr_nondynamic(self,class_type,name,value,0) 52 | 53 | def _swig_getattr(self,class_type,name): 54 | if (name == "thisown"): return self.this.own() 55 | method = class_type.__swig_getmethods__.get(name,None) 56 | if method: return method(self) 57 | raise AttributeError(name) 58 | 59 | def _swig_repr(self): 60 | try: strthis = "proxy of " + self.this.__repr__() 61 | except: strthis = "" 62 | return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) 63 | 64 | try: 65 | _object = object 66 | _newclass = 1 67 | except AttributeError: 68 | class _object : pass 69 | _newclass = 0 70 | 71 | 72 | 73 | def post_processing(*args): 74 | return _fast_functions.post_processing(*args) 75 | post_processing = _fast_functions.post_processing 76 | 77 | def DSC_computation(*args): 78 | return _fast_functions.DSC_computation(*args) 79 | DSC_computation = _fast_functions.DSC_computation 80 | # This file is compatible with both classic and new-style classes. 81 | 82 | 83 | -------------------------------------------------------------------------------- /OrganSegRSTN/indiv_training.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import shutil 5 | import time 6 | import urllib 7 | import caffe 8 | from utils import * 9 | import surgery 10 | 11 | 12 | data_path = sys.argv[1] 13 | current_fold = int(sys.argv[2]) 14 | organ_number = int(sys.argv[3]) 15 | low_range = int(sys.argv[4]) 16 | high_range = int(sys.argv[5]) 17 | slice_threshold = float(sys.argv[6]) 18 | slice_thickness = int(sys.argv[7]) 19 | organ_ID = int(sys.argv[8]) 20 | plane = sys.argv[9] 21 | GPU_ID = int(sys.argv[10]) 22 | learning_rate1 = float(sys.argv[11]) 23 | learning_rate_m1 = int(sys.argv[12]) 24 | learning_rate2 = float(sys.argv[13]) 25 | learning_rate_m2 = int(sys.argv[14]) 26 | margin = int(sys.argv[15]) 27 | prob = float(sys.argv[16]) 28 | sample_batch = int(sys.argv[17]) 29 | snapshot_path = os.path.join(snapshot_path, 'indiv:' + \ 30 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + str(margin)) 31 | step = int(sys.argv[18]) 32 | max_iterations1 = int(sys.argv[19]) 33 | max_iterations2 = int(sys.argv[20]) 34 | fraction = float(sys.argv[21]) 35 | separate_iterations = max(int(round(max_iterations1 * fraction / step)), 1) * step 36 | timestamp = sys.argv[22] 37 | 38 | if __name__ == '__main__': 39 | snapshot_name = 'FD' + str(current_fold) + ':' + \ 40 | plane + 'I' + str(slice_thickness) + '_' + str(organ_ID) + '_' + timestamp 41 | snapshot_directory = os.path.join(snapshot_path, snapshot_name) 42 | if not os.path.exists(snapshot_directory): 43 | os.makedirs(snapshot_directory) 44 | log_file = os.path.join(log_path, snapshot_name + '.txt') 45 | log_file_ = log_filename(snapshot_directory) 46 | weights = os.path.join(pretrained_model_path, 'RSTN-scratch.caffemodel') 47 | if not os.path.isfile(weights): 48 | sys.exit('Error: the scratch model was not found, please download it from our GitHub.') 49 | 50 | 51 | if __name__ == '__main__': 52 | if not os.path.exists(prototxt_path): 53 | os.makedirs(prototxt_path) 54 | while True: 55 | if fraction > 0: 56 | prototxt_filename = 'training_S' + str(slice_thickness) + \ 57 | 'x' + str(learning_rate_m1) + '.prototxt' 58 | else: 59 | prototxt_filename = 'training_I' + str(slice_thickness) + \ 60 | 'x' + str(learning_rate_m1) + '.prototxt' 61 | prototxt_file = os.path.join(prototxt_path, prototxt_filename) 62 | prototxt_file_ = os.path.join('prototxts', prototxt_filename) 63 | shutil.copyfile(prototxt_file_, prototxt_file) 64 | if fraction > 0: 65 | solver_filename = 'solver_S' + str(slice_thickness) + \ 66 | '_FD' + str(current_fold) + '.prototxt' 67 | else: 68 | solver_filename = 'solver_I' + str(slice_thickness) + \ 69 | '_FD' + str(current_fold) + '.prototxt' 70 | solver_file = os.path.join(prototxt_path, solver_filename) 71 | output = open(solver_file, 'w') 72 | output.write('train_net: \"' + prototxt_file + '\"\n') 73 | output.write('\n' * 1) 74 | output.write('display: 20\n') 75 | output.write('average_loss: 20\n') 76 | output.write('\n' * 1) 77 | output.write('base_lr: ' + str(learning_rate1) + '\n') 78 | output.write('lr_policy: \"fixed\"\n') 79 | output.write('stepvalue: ' + str(max_iterations1) + '\n') 80 | output.write('\n' * 1) 81 | output.write('momentum: 0.99\n') 82 | output.write('\n' * 1) 83 | output.write('iter_size: 1\n') 84 | output.write('weight_decay: 0.0005\n') 85 | output.write('snapshot: ' + str(step) + '\n') 86 | output.write('snapshot_prefix: \"' + os.path.join(snapshot_directory, 'train') + '\"\n') 87 | output.write('\n' * 1) 88 | output.write('test_initialization: false\n') 89 | output.close() 90 | sys.path.insert(0, os.path.join(CAFFE_root, 'python')) 91 | caffe.set_device(GPU_ID) 92 | caffe.set_mode_gpu() 93 | solver = caffe.SGDSolver(solver_file) 94 | solver.net.copy_from(weights) 95 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 96 | surgery.interp(solver.net, interp_layers) 97 | solver.step(separate_iterations) 98 | if valid_loss(log_file, separate_iterations): 99 | break 100 | prototxt_filename = 'training_I' + str(slice_thickness) + \ 101 | 'x' + str(learning_rate_m1) + '.prototxt' 102 | prototxt_file = os.path.join(prototxt_path, prototxt_filename) 103 | prototxt_file_ = os.path.join('prototxts', prototxt_filename) 104 | shutil.copyfile(prototxt_file_, prototxt_file) 105 | solver_filename = 'solver_I' + str(slice_thickness) + \ 106 | '_FD' + str(current_fold) + '.prototxt' 107 | solver_file = os.path.join(prototxt_path, solver_filename) 108 | output = open(solver_file, 'w') 109 | output.write('train_net: \"' + prototxt_file + '\"\n') 110 | output.write('\n' * 1) 111 | output.write('display: 20\n') 112 | output.write('average_loss: 20\n') 113 | output.write('\n' * 1) 114 | output.write('base_lr: ' + str(learning_rate1) + '\n') 115 | output.write('lr_policy: \"fixed\"\n') 116 | output.write('stepvalue: ' + str(max_iterations1) + '\n') 117 | output.write('\n' * 1) 118 | output.write('momentum: 0.99\n') 119 | output.write('\n' * 1) 120 | output.write('iter_size: 1\n') 121 | output.write('weight_decay: 0.0005\n') 122 | output.write('snapshot: ' + str(step) + '\n') 123 | output.write('snapshot_prefix: \"' + os.path.join(snapshot_directory, 'train') + '\"\n') 124 | output.write('\n' * 1) 125 | output.write('test_initialization: false\n') 126 | output.close() 127 | sys.path.insert(0, os.path.join(CAFFE_root, 'python')) 128 | caffe.set_device(GPU_ID) 129 | caffe.set_mode_gpu() 130 | solver = caffe.SGDSolver(solver_file) 131 | snapshot_ = os.path.join(snapshot_directory, \ 132 | 'train_iter_' + str(separate_iterations) + '.solverstate') 133 | solver.restore(snapshot_) 134 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 135 | surgery.interp(solver.net, interp_layers) 136 | solver.step(max_iterations1 - separate_iterations) 137 | shutil.copyfile(log_file, log_file_) 138 | -------------------------------------------------------------------------------- /OrganSegRSTN/init.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import time 5 | from utils import * 6 | 7 | 8 | data_path = sys.argv[1] 9 | organ_number = int(sys.argv[2]) 10 | folds = int(sys.argv[3]) 11 | low_range = int(sys.argv[4]) 12 | high_range = int(sys.argv[5]) 13 | 14 | image_list = [] 15 | image_filename = [] 16 | keyword = '' 17 | for directory, _, file_ in os.walk(image_path): 18 | for filename in sorted(file_): 19 | if keyword in filename: 20 | image_list.append(os.path.join(directory, filename)) 21 | image_filename.append(os.path.splitext(filename)[0]) 22 | label_list = [] 23 | label_filename = [] 24 | for directory, _, file_ in os.walk(label_path): 25 | for filename in sorted(file_): 26 | if keyword in filename: 27 | label_list.append(os.path.join(directory, filename)) 28 | label_filename.append(os.path.splitext(filename)[0]) 29 | if len(image_list) != len(label_list): 30 | exit('Error: the number of labels and the number of images are not equal!') 31 | total_samples = len(image_list) 32 | for plane in ['X', 'Y', 'Z']: 33 | output = open(list_training[plane], 'w') 34 | output.close() 35 | print 'Initialization starts.' 36 | for i in range(total_samples): 37 | start_time = time.time() 38 | print 'Processing ' + str(i + 1) + ' out of ' + str(len(image_list)) + ' files.' 39 | image = np.load(image_list[i]) 40 | label = np.load(label_list[i]) 41 | print ' 3D volume is loaded: ' + str(time.time() - start_time) + ' second(s) elapsed.' 42 | for plane in ['X', 'Y', 'Z']: 43 | if plane == 'X': 44 | slice_number = label.shape[0] 45 | elif plane == 'Y': 46 | slice_number = label.shape[1] 47 | elif plane == 'Z': 48 | slice_number = label.shape[2] 49 | print ' Processing data on ' + plane + ' plane (' + str(slice_number) + ' slices): ' + \ 50 | str(time.time() - start_time) + ' second(s) elapsed.' 51 | image_directory_ = os.path.join(image_path_[plane], image_filename[i]) 52 | if not os.path.exists(image_directory_): 53 | os.makedirs(image_directory_) 54 | label_directory_ = os.path.join(label_path_[plane], label_filename[i]) 55 | if not os.path.exists(label_directory_): 56 | os.makedirs(label_directory_) 57 | print ' Slicing data: ' + str(time.time() - start_time) + ' second(s) elapsed.' 58 | sum_ = np.zeros((slice_number, organ_number + 1), dtype = np.int) 59 | minA = np.zeros((slice_number, organ_number + 1), dtype = np.int) 60 | maxA = np.zeros((slice_number, organ_number + 1), dtype = np.int) 61 | minB = np.zeros((slice_number, organ_number + 1), dtype = np.int) 62 | maxB = np.zeros((slice_number, organ_number + 1), dtype = np.int) 63 | average = np.zeros((slice_number), dtype = np.float) 64 | for j in range(0, slice_number): 65 | image_filename_ = os.path.join( \ 66 | image_path_[plane], image_filename[i], '{:0>4}'.format(j) + '.npy') 67 | label_filename_ = os.path.join( \ 68 | label_path_[plane], label_filename[i], '{:0>4}'.format(j) + '.npy') 69 | if plane == 'X': 70 | image_ = image[j, :, :] 71 | label_ = label[j, :, :] 72 | elif plane == 'Y': 73 | image_ = image[:, j, :] 74 | label_ = label[:, j, :] 75 | elif plane == 'Z': 76 | image_ = image[:, :, j] 77 | label_ = label[:, :, j] 78 | if not os.path.isfile(image_filename_) or not os.path.isfile(label_filename_): 79 | np.save(image_filename_, image_) 80 | np.save(label_filename_, label_) 81 | np.minimum(np.maximum(image_, low_range, image_), high_range, image_) 82 | average[j] = float(image_.sum()) / (image_.shape[0] * image_.shape[1]) 83 | for o in range(1, organ_number + 1): 84 | sum_[j, o] = (is_organ(label_, o)).sum() 85 | arr = np.nonzero(is_organ(label_, o)) 86 | minA[j, o] = 0 if not len(arr[0]) else min(arr[0]) 87 | maxA[j, o] = 0 if not len(arr[0]) else max(arr[0]) 88 | minB[j, o] = 0 if not len(arr[1]) else min(arr[1]) 89 | maxB[j, o] = 0 if not len(arr[1]) else max(arr[1]) 90 | print ' Writing training lists: ' + str(time.time() - start_time) + ' second(s) elapsed.' 91 | output = open(list_training[plane], 'a+') 92 | for j in range(0, slice_number): 93 | image_filename_ = os.path.join( \ 94 | image_path_[plane], image_filename[i], '{:0>4}'.format(j) + '.npy') 95 | label_filename_ = os.path.join( \ 96 | label_path_[plane], label_filename[i], '{:0>4}'.format(j) + '.npy') 97 | output.write(str(i) + ' ' + str(j)) 98 | output.write(' ' + image_filename_ + ' ' + label_filename_) 99 | output.write(' ' + str(average[j])) 100 | for o in range(1, organ_number + 1): 101 | output.write(' ' + str(sum_[j, o]) + ' ' + str(minA[j, o]) + \ 102 | ' ' + str(maxA[j, o]) + ' ' + str(minB[j, o]) + ' ' + str(maxB[j, o])) 103 | output.write('\n') 104 | output.close() 105 | print ' ' + plane + ' plane is done: ' + \ 106 | str(time.time() - start_time) + ' second(s) elapsed.' 107 | print 'Processed ' + str(i + 1) + ' out of ' + str(len(image_list)) + ' files: ' + \ 108 | str(time.time() - start_time) + ' second(s) elapsed.' 109 | 110 | print 'Writing training image list.' 111 | for f in range(folds): 112 | list_training_ = training_set_filename(f) 113 | output = open(list_training_, 'w') 114 | for i in range(total_samples): 115 | if in_training_set(total_samples, i, folds, f): 116 | output.write(str(i) + ' ' + image_list[i] + ' ' + label_list[i] + '\n') 117 | output.close() 118 | print 'Writing testing image list.' 119 | for f in range(folds): 120 | list_testing_ = testing_set_filename(f) 121 | output = open(list_testing_, 'w') 122 | for i in range(total_samples): 123 | if not in_training_set(total_samples, i, folds, f): 124 | output.write(str(i) + ' ' + image_list[i] + ' ' + label_list[i] + '\n') 125 | output.close() 126 | print 'Initialization is done.' 127 | -------------------------------------------------------------------------------- /OrganSegRSTN/joint_training.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import shutil 5 | import time 6 | import urllib 7 | import caffe 8 | from utils import * 9 | import surgery 10 | 11 | 12 | data_path = sys.argv[1] 13 | current_fold = int(sys.argv[2]) 14 | organ_number = int(sys.argv[3]) 15 | low_range = int(sys.argv[4]) 16 | high_range = int(sys.argv[5]) 17 | slice_threshold = float(sys.argv[6]) 18 | slice_thickness = int(sys.argv[7]) 19 | organ_ID = int(sys.argv[8]) 20 | plane = sys.argv[9] 21 | GPU_ID = int(sys.argv[10]) 22 | learning_rate1 = float(sys.argv[11]) 23 | learning_rate_m1 = int(sys.argv[12]) 24 | learning_rate2 = float(sys.argv[13]) 25 | learning_rate_m2 = int(sys.argv[14]) 26 | margin = int(sys.argv[15]) 27 | prob = float(sys.argv[16]) 28 | sample_batch = int(sys.argv[17]) 29 | snapshot_path1 = os.path.join(snapshot_path, 'indiv:' + \ 30 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + str(margin)) 31 | snapshot_path2 = os.path.join(snapshot_path, 'joint:' + \ 32 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + \ 33 | sys.argv[13] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 34 | step = int(sys.argv[18]) 35 | max_iterations1 = int(sys.argv[19]) 36 | max_iterations2 = int(sys.argv[20]) 37 | timestamp1 = sys.argv[21] 38 | if len(sys.argv) == 23: 39 | timestamp2 = sys.argv[22] 40 | 41 | if __name__ == '__main__': 42 | if len(timestamp1) < 15: 43 | snapshot_name = snapshot_name_from_timestamp(snapshot_path1, \ 44 | current_fold, plane, 'I', slice_thickness, organ_ID, [max_iterations1], timestamp1) 45 | timestamp1 = snapshot_name[-15: ] 46 | snapshot_name2 = 'FD' + str(current_fold) + ':' + \ 47 | plane + 'I' + str(slice_thickness) + '_' + str(organ_ID) + '_' + timestamp1 + ',' + \ 48 | plane + 'J' + str(slice_thickness) + '_' + str(organ_ID) + '_' + timestamp2 49 | snapshot_directory2 = os.path.join(snapshot_path2, snapshot_name2) 50 | if not os.path.exists(snapshot_directory2): 51 | os.makedirs(snapshot_directory2) 52 | snapshot_name2_ = 'FD' + str(current_fold) + ':' + \ 53 | plane + 'J' + str(slice_thickness) + '_' + str(organ_ID) + '_' + timestamp2 54 | log_file2 = os.path.join(log_path, snapshot_name2_ + '.txt') 55 | log_file2_ = log_filename(snapshot_directory2) 56 | 57 | 58 | if __name__ == '__main__': 59 | if not os.path.exists(prototxt_path): 60 | os.makedirs(prototxt_path) 61 | prototxt_filename = 'training_J' + str(slice_thickness) + \ 62 | 'x' + str(learning_rate_m2) + '.prototxt' 63 | prototxt_file = os.path.join(prototxt_path, prototxt_filename) 64 | prototxt_file_ = os.path.join('prototxts', prototxt_filename) 65 | shutil.copyfile(prototxt_file_, prototxt_file) 66 | solver_filename = 'solver_J' + str(slice_thickness) + \ 67 | '_FD' + str(current_fold) + '.prototxt' 68 | solver_file = os.path.join(prototxt_path, solver_filename) 69 | output = open(solver_file, 'w') 70 | output.write('train_net: \"' + prototxt_file + '\"\n') 71 | output.write('\n' * 1) 72 | output.write('display: 20\n') 73 | output.write('average_loss: 20\n') 74 | output.write('\n' * 1) 75 | output.write('base_lr: ' + str(learning_rate2) + '\n') 76 | output.write('lr_policy: \"multistep\"\n') 77 | output.write('gamma: 0.5\n') 78 | output.write('stepvalue: ' + str(max_iterations1 + max_iterations2 / 4 * 1) + '\n') 79 | output.write('stepvalue: ' + str(max_iterations1 + max_iterations2 / 4 * 2) + '\n') 80 | output.write('stepvalue: ' + str(max_iterations1 + max_iterations2 / 4 * 3) + '\n') 81 | output.write('\n' * 1) 82 | output.write('momentum: 0.99\n') 83 | output.write('\n' * 1) 84 | output.write('iter_size: 1\n') 85 | output.write('weight_decay: 0.0005\n') 86 | output.write('snapshot: ' + str(step) + '\n') 87 | output.write('snapshot_prefix: \"' + os.path.join(snapshot_directory2, 'train') + '\"\n') 88 | output.write('\n' * 1) 89 | output.write('test_initialization: false\n') 90 | output.close() 91 | sys.path.insert(0, os.path.join(CAFFE_root, 'python')) 92 | caffe.set_device(GPU_ID) 93 | caffe.set_mode_gpu() 94 | solver = caffe.SGDSolver(solver_file) 95 | snapshot_name = snapshot_name_from_timestamp(snapshot_path1, \ 96 | current_fold, plane, 'I', slice_thickness, organ_ID, [max_iterations1], timestamp1) 97 | snapshot_ = os.path.join(snapshot_path1, \ 98 | snapshot_name, 'train_iter_' + str(max_iterations1) + '.solverstate') 99 | solver.restore(snapshot_) 100 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 101 | surgery.interp(solver.net, interp_layers) 102 | solver.step(max_iterations2) 103 | shutil.copyfile(log_file2, log_file2_) 104 | -------------------------------------------------------------------------------- /OrganSegRSTN/oracle_fusion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import time 5 | from utils import * 6 | 7 | 8 | data_path = sys.argv[1] 9 | current_fold = int(sys.argv[2]) 10 | organ_number = int(sys.argv[3]) 11 | low_range = int(sys.argv[4]) 12 | high_range = int(sys.argv[5]) 13 | slice_threshold = float(sys.argv[6]) 14 | slice_thickness = int(sys.argv[7]) 15 | organ_ID = int(sys.argv[8]) 16 | GPU_ID = int(sys.argv[9]) 17 | learning_rate1 = float(sys.argv[10]) 18 | learning_rate_m1 = int(sys.argv[11]) 19 | learning_rate2 = float(sys.argv[12]) 20 | learning_rate_m2 = int(sys.argv[13]) 21 | margin = int(sys.argv[14]) 22 | result_path = os.path.join(result_path, 'oracle:' + \ 23 | sys.argv[10] + 'x' + str(learning_rate_m1) + ',' + \ 24 | sys.argv[12] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 25 | starting_iterations = int(sys.argv[15]) 26 | step = int(sys.argv[16]) 27 | max_iterations = int(sys.argv[17]) 28 | iteration = range(starting_iterations, max_iterations + 1, step) 29 | threshold = float(sys.argv[18]) 30 | timestamp1 = {} 31 | timestamp1['X'] = sys.argv[19] 32 | timestamp1['Y'] = sys.argv[20] 33 | timestamp1['Z'] = sys.argv[21] 34 | timestamp2 = {} 35 | timestamp2['X'] = sys.argv[22] 36 | timestamp2['Y'] = sys.argv[23] 37 | timestamp2['Z'] = sys.argv[24] 38 | volume_list = open(testing_set_filename(current_fold), 'r').read().splitlines() 39 | while volume_list[len(volume_list) - 1] == '': 40 | volume_list.pop() 41 | result_name_ = {} 42 | result_directory_ = {} 43 | for plane in ['X', 'Y', 'Z']: 44 | result_name__ = result_name_from_timestamp_2(result_path, \ 45 | current_fold, plane, 'I', 'J', slice_thickness, organ_ID, \ 46 | iteration, volume_list, timestamp1[plane], timestamp2[plane]) 47 | if result_name__ == '': 48 | exit('Error: no valid result directories are detected!') 49 | result_directory__ = os.path.join(result_path, result_name__, 'volumes') 50 | print 'Result directory for plane ' + plane + ': ' + result_directory__ + ' .' 51 | if result_name__.startswith('FD'): 52 | index_ = result_name__.find(':') 53 | result_name__ = result_name__[index_ + 1: ] 54 | result_name_[plane] = result_name__ 55 | result_directory_[plane] = result_directory__ 56 | 57 | DSC_X = np.zeros((len(volume_list))) 58 | DSC_Y = np.zeros((len(volume_list))) 59 | DSC_Z = np.zeros((len(volume_list))) 60 | DSC_F1 = np.zeros((len(volume_list))) 61 | DSC_F2 = np.zeros((len(volume_list))) 62 | DSC_F3 = np.zeros((len(volume_list))) 63 | DSC_F1P = np.zeros((len(volume_list))) 64 | DSC_F2P = np.zeros((len(volume_list))) 65 | DSC_F3P = np.zeros((len(volume_list))) 66 | result_name = 'FD' + str(current_fold) + ':' + 'fusion:' + result_name_['X'] + ',' + \ 67 | result_name_['Y'] + ',' + result_name_['Z'] + ':' + str(starting_iterations) + '_' + \ 68 | str(step) + '_' + str(max_iterations) + ',' + str(threshold) 69 | result_directory = os.path.join(result_path, result_name, 'volumes') 70 | if not os.path.exists(result_directory): 71 | os.makedirs(result_directory) 72 | result_file = os.path.join(result_path, result_name, 'results.txt') 73 | output = open(result_file, 'w') 74 | output.close() 75 | output = open(result_file, 'a+') 76 | output.write('Fusing results of ' + str(len(iteration)) + ' snapshots:\n') 77 | output.close() 78 | for i in range(len(volume_list)): 79 | start_time = time.time() 80 | print 'Testing ' + str(i + 1) + ' out of ' + str(len(volume_list)) + ' testcases.' 81 | output = open(result_file, 'a+') 82 | output.write(' Testcase ' + str(i + 1) + ':\n') 83 | output.close() 84 | s = volume_list[i].split(' ') 85 | label = np.load(s[2]) 86 | label = is_organ(label, organ_ID).astype(np.uint8) 87 | for plane in ['X', 'Y', 'Z']: 88 | volume_file = volume_filename_fusion(result_directory, plane, i) 89 | pred = np.zeros(label.shape, dtype = np.float32) 90 | for t in range(len(iteration)): 91 | volume_file_ = volume_filename_testing(result_directory_[plane], iteration[t], i) 92 | pred += np.load(volume_file_)['volume'] 93 | pred_ = (pred >= threshold * 255 * len(iteration)) 94 | if not os.path.isfile(volume_file): 95 | np.savez_compressed(volume_file, volume = pred_) 96 | DSC_, inter_sum, pred_sum, label_sum = DSC_computation(label, pred_) 97 | print ' DSC_' + plane + ' = 2 * ' + str(inter_sum) + ' / (' + \ 98 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_) + ' .' 99 | output = open(result_file, 'a+') 100 | output.write(' DSC_' + plane + ' = 2 * ' + str(inter_sum) + ' / (' + \ 101 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_) + ' .\n') 102 | output.close() 103 | if pred_sum == 0 and label_sum == 0: 104 | DSC_ = 0 105 | pred /= (255 * len(iteration)) 106 | if plane == 'X': 107 | pred_X = pred 108 | DSC_X[i] = DSC_ 109 | elif plane == 'Y': 110 | pred_Y = pred 111 | DSC_Y[i] = DSC_ 112 | elif plane == 'Z': 113 | pred_Z = pred 114 | DSC_Z[i] = DSC_ 115 | volume_file_F1 = volume_filename_fusion(result_directory, 'F1', i) 116 | volume_file_F2 = volume_filename_fusion(result_directory, 'F2', i) 117 | volume_file_F3 = volume_filename_fusion(result_directory, 'F3', i) 118 | if not os.path.isfile(volume_file_F1) or not os.path.isfile(volume_file_F2) or \ 119 | not os.path.isfile(volume_file_F3): 120 | pred_total = pred_X + pred_Y + pred_Z 121 | if os.path.isfile(volume_file_F1): 122 | pred_F1 = np.load(volume_file_F1)['volume'].astype(np.uint8) 123 | else: 124 | pred_F1 = (pred_total >= 0.5).astype(np.uint8) 125 | np.savez_compressed(volume_file_F1, volume = pred_F1) 126 | DSC_F1[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F1) 127 | print ' DSC_F1 = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' \ 128 | + str(label_sum) + ') = ' + str(DSC_F1[i]) + ' .' 129 | output = open(result_file, 'a+') 130 | output.write(' DSC_F1 = 2 * ' + str(inter_sum) + ' / (' + \ 131 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F1[i]) + ' .\n') 132 | output.close() 133 | if pred_sum == 0 and label_sum == 0: 134 | DSC_F1[i] = 0 135 | if os.path.isfile(volume_file_F2): 136 | pred_F2 = np.load(volume_file_F2)['volume'].astype(np.uint8) 137 | else: 138 | pred_F2 = (pred_total >= 1.5).astype(np.uint8) 139 | np.savez_compressed(volume_file_F2, volume = pred_F2) 140 | DSC_F2[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F2) 141 | print ' DSC_F2 = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 142 | str(label_sum) + ') = ' + str(DSC_F2[i]) + ' .' 143 | output = open(result_file, 'a+') 144 | output.write(' DSC_F2 = 2 * ' + str(inter_sum) + ' / (' + \ 145 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F2[i]) + ' .\n') 146 | output.close() 147 | if pred_sum == 0 and label_sum == 0: 148 | DSC_F2[i] = 0 149 | if os.path.isfile(volume_file_F3): 150 | pred_F3 = np.load(volume_file_F3)['volume'].astype(np.uint8) 151 | else: 152 | pred_F3 = (pred_total >= 2.5).astype(np.uint8) 153 | np.savez_compressed(volume_file_F3, volume = pred_F3) 154 | DSC_F3[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F3) 155 | print ' DSC_F3 = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 156 | str(label_sum) + ') = ' + str(DSC_F3[i]) + ' .' 157 | output = open(result_file, 'a+') 158 | output.write(' DSC_F3 = 2 * ' + str(inter_sum) + ' / (' + \ 159 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F3[i]) + ' .\n') 160 | output.close() 161 | if pred_sum == 0 and label_sum == 0: 162 | DSC_F3[i] = 0 163 | volume_file_F1P = volume_filename_fusion(result_directory, 'F1P', i) 164 | volume_file_F2P = volume_filename_fusion(result_directory, 'F2P', i) 165 | volume_file_F3P = volume_filename_fusion(result_directory, 'F3P', i) 166 | S = pred_F3 167 | if (S.sum() == 0): 168 | S = pred_F2 169 | if (S.sum() == 0): 170 | S = pred_F1 171 | if os.path.isfile(volume_file_F1P): 172 | pred_F1P = np.load(volume_file_F1P)['volume'].astype(np.uint8) 173 | else: 174 | pred_F1P = post_processing(pred_F1, S, 0.5, organ_ID) 175 | np.savez_compressed(volume_file_F1P, volume = pred_F1P) 176 | DSC_F1P[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F1P) 177 | print ' DSC_F1P = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 178 | str(label_sum) + ') = ' + str(DSC_F1P[i]) + ' .' 179 | output = open(result_file, 'a+') 180 | output.write(' DSC_F1P = 2 * ' + str(inter_sum) + ' / (' + \ 181 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F1P[i]) + ' .\n') 182 | output.close() 183 | if pred_sum == 0 and label_sum == 0: 184 | DSC_F1P[i] = 0 185 | if os.path.isfile(volume_file_F2P): 186 | pred_F2P = np.load(volume_file_F2P)['volume'].astype(np.uint8) 187 | else: 188 | pred_F2P = post_processing(pred_F2, S, 0.5, organ_ID) 189 | np.savez_compressed(volume_file_F2P, volume = pred_F2P) 190 | DSC_F2P[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F2P) 191 | print ' DSC_F2P = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 192 | str(label_sum) + ') = ' + str(DSC_F2P[i]) + ' .' 193 | output = open(result_file, 'a+') 194 | output.write(' DSC_F2P = 2 * ' + str(inter_sum) + ' / (' + \ 195 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F2P[i]) + ' .\n') 196 | output.close() 197 | if pred_sum == 0 and label_sum == 0: 198 | DSC_F2P[i] = 0 199 | if os.path.isfile(volume_file_F3P): 200 | pred_F3P = np.load(volume_file_F3P)['volume'].astype(np.uint8) 201 | else: 202 | pred_F3P = post_processing(pred_F3, S, 0.5, organ_ID) 203 | np.savez_compressed(volume_file_F3P, volume = pred_F3P) 204 | DSC_F3P[i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_F3P) 205 | print ' DSC_F3P = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + \ 206 | str(label_sum) + ') = ' + str(DSC_F3P[i]) + ' .' 207 | output = open(result_file, 'a+') 208 | output.write(' DSC_F3P = 2 * ' + str(inter_sum) + ' / (' + \ 209 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC_F3P[i]) + ' .\n') 210 | output.close() 211 | if pred_sum == 0 and label_sum == 0: 212 | DSC_F3P[i] = 0 213 | pred_X = None 214 | pred_Y = None 215 | pred_Z = None 216 | pred_F1 = None 217 | pred_F2 = None 218 | pred_F3 = None 219 | pred_F1P = None 220 | pred_F2P = None 221 | pred_F3P = None 222 | output = open(result_file, 'a+') 223 | print 'Average DSC_X = ' + str(np.mean(DSC_X)) + ' .' 224 | output.write('Average DSC_X = ' + str(np.mean(DSC_X)) + ' .\n') 225 | print 'Average DSC_Y = ' + str(np.mean(DSC_Y)) + ' .' 226 | output.write('Average DSC_Y = ' + str(np.mean(DSC_Y)) + ' .\n') 227 | print 'Average DSC_Z = ' + str(np.mean(DSC_Z)) + ' .' 228 | output.write('Average DSC_Z = ' + str(np.mean(DSC_Z)) + ' .\n') 229 | print 'Average DSC_F1 = ' + str(np.mean(DSC_F1)) + ' .' 230 | output.write('Average DSC_F1 = ' + str(np.mean(DSC_F1)) + ' .\n') 231 | print 'Average DSC_F2 = ' + str(np.mean(DSC_F2)) + ' .' 232 | output.write('Average DSC_F2 = ' + str(np.mean(DSC_F2)) + ' .\n') 233 | print 'Average DSC_F3 = ' + str(np.mean(DSC_F3)) + ' .' 234 | output.write('Average DSC_F3 = ' + str(np.mean(DSC_F3)) + ' .\n') 235 | print 'Average DSC_F1P = ' + str(np.mean(DSC_F1P)) + ' .' 236 | output.write('Average DSC_F1P = ' + str(np.mean(DSC_F1P)) + ' .\n') 237 | print 'Average DSC_F2P = ' + str(np.mean(DSC_F2P)) + ' .' 238 | output.write('Average DSC_F2P = ' + str(np.mean(DSC_F2P)) + ' .\n') 239 | print 'Average DSC_F3P = ' + str(np.mean(DSC_F3P)) + ' .' 240 | output.write('Average DSC_F3P = ' + str(np.mean(DSC_F3P)) + ' .\n') 241 | output.close() 242 | print 'The fusion process is finished.' 243 | -------------------------------------------------------------------------------- /OrganSegRSTN/oracle_testing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import shutil 5 | import time 6 | import caffe 7 | from utils import * 8 | 9 | 10 | data_path = sys.argv[1] 11 | current_fold = int(sys.argv[2]) 12 | organ_number = int(sys.argv[3]) 13 | low_range = int(sys.argv[4]) 14 | high_range = int(sys.argv[5]) 15 | slice_threshold = float(sys.argv[6]) 16 | slice_thickness = int(sys.argv[7]) 17 | organ_ID = int(sys.argv[8]) 18 | plane = sys.argv[9] 19 | GPU_ID = int(sys.argv[10]) 20 | learning_rate1 = float(sys.argv[11]) 21 | learning_rate_m1 = int(sys.argv[12]) 22 | learning_rate2 = float(sys.argv[13]) 23 | learning_rate_m2 = int(sys.argv[14]) 24 | margin = int(sys.argv[15]) 25 | prob = float(sys.argv[16]) 26 | sample_batch = int(sys.argv[17]) 27 | snapshot_path1 = os.path.join(snapshot_path, 'indiv:' + \ 28 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + str(margin)) 29 | snapshot_path2 = os.path.join(snapshot_path, 'joint:' + \ 30 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + \ 31 | sys.argv[13] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 32 | result_path = os.path.join(result_path, 'oracle:' + \ 33 | sys.argv[11] + 'x' + str(learning_rate_m1) + ',' + \ 34 | sys.argv[13] + 'x' + str(learning_rate_m2) + ',' + str(margin)) 35 | max_iterations1 = int(sys.argv[18]) 36 | max_iterations2 = int(sys.argv[19]) 37 | starting_iterations = int(sys.argv[20]) 38 | step = int(sys.argv[21]) 39 | max_iterations = int(sys.argv[22]) 40 | iteration = range(starting_iterations, max_iterations + 1, step) 41 | timestamp1 = sys.argv[23] 42 | timestamp2 = sys.argv[24] 43 | [snapshot_name1, snapshot_name2] = snapshot_name_from_timestamp_2(snapshot_path1, snapshot_path2, \ 44 | current_fold, plane, 'I', 'J', slice_thickness, organ_ID, iteration, timestamp1, timestamp2) 45 | if snapshot_name1 == '' or snapshot_name2 == '': 46 | exit('Error: no valid snapshot directories are detected!') 47 | snapshot_directory1 = os.path.join(snapshot_path1, snapshot_name1) 48 | print 'Snapshot directory 1: ' + snapshot_directory1 + ' .' 49 | snapshot_directory2 = os.path.join(snapshot_path2, snapshot_name2) 50 | print 'Snapshot directory 2: ' + snapshot_directory2 + ' .' 51 | snapshot = [] 52 | for t in range(len(iteration)): 53 | snapshot_file1 = snapshot_filename(snapshot_directory1, iteration[t]) 54 | snapshot_file2 = snapshot_filename(snapshot_directory2, iteration[t]) 55 | if os.path.isfile(snapshot_file1): 56 | snapshot.append(snapshot_file1) 57 | else: 58 | snapshot.append(snapshot_file2) 59 | print str(len(snapshot)) + ' snapshots are to be evaluated.' 60 | for t in range(len(snapshot)): 61 | print ' Snapshot #' + str(t + 1) + ': ' + snapshot[t] + ' .' 62 | result_name = snapshot_name2 63 | 64 | sys.path.insert(0, os.path.join(CAFFE_root, 'python')) 65 | caffe.set_device(GPU_ID) 66 | caffe.set_mode_gpu() 67 | 68 | volume_list = open(testing_set_filename(current_fold), 'r').read().splitlines() 69 | while volume_list[len(volume_list) - 1] == '': 70 | volume_list.pop() 71 | DSC = np.zeros((len(snapshot), len(volume_list))) 72 | result_directory = os.path.join(result_path, result_name, 'volumes') 73 | if not os.path.exists(result_directory): 74 | os.makedirs(result_directory) 75 | result_file = os.path.join(result_path, result_name, 'results.txt') 76 | output = open(result_file, 'w') 77 | output.close() 78 | for t in range(len(snapshot)): 79 | output = open(result_file, 'a+') 80 | output.write('Evaluating snapshot ' + str(iteration[t]) + ':\n') 81 | output.close() 82 | finished = True 83 | for i in range(len(volume_list)): 84 | volume_file = volume_filename_testing(result_directory, iteration[t], i) 85 | if not os.path.isfile(volume_file): 86 | finished = False 87 | break 88 | if not finished: 89 | deploy_filename = 'deploy_' + 'O' + str(slice_thickness) + '.prototxt' 90 | deploy_file = os.path.join(prototxt_path, deploy_filename) 91 | deploy_file_ = os.path.join('prototxts', deploy_filename) 92 | shutil.copyfile(deploy_file_, deploy_file) 93 | net = caffe.Net(deploy_file, snapshot[t], caffe.TEST) 94 | for i in range(len(volume_list)): 95 | start_time = time.time() 96 | print 'Testing ' + str(i + 1) + ' out of ' + str(len(volume_list)) + ' testcases, ' + \ 97 | str(t + 1) + ' out of ' + str(len(snapshot)) + ' snapshots.' 98 | volume_file = volume_filename_testing(result_directory, iteration[t], i) 99 | s = volume_list[i].split(' ') 100 | label = np.load(s[2]) 101 | label = is_organ(label, organ_ID).astype(np.uint8) 102 | if not os.path.isfile(volume_file): 103 | image = np.load(s[1]).astype(np.float32) 104 | np.minimum(np.maximum(image, low_range, image), high_range, image) 105 | image -= low_range 106 | image /= (high_range - low_range) 107 | print ' Data loading is finished: ' + \ 108 | str(time.time() - start_time) + ' second(s) elapsed.' 109 | pred = np.zeros(image.shape, dtype = np.float32) 110 | label_sumX = np.sum(label, axis = (1, 2)) 111 | label_sumY = np.sum(label, axis = (0, 2)) 112 | label_sumZ = np.sum(label, axis = (0, 1)) 113 | if label_sumX.sum() == 0: 114 | continue 115 | minR = 0 116 | if plane == 'X': 117 | maxR = image.shape[0] 118 | shape_ = (1, 3, image.shape[1], image.shape[2]) 119 | elif plane == 'Y': 120 | maxR = image.shape[1] 121 | shape_ = (1, 3, image.shape[0], image.shape[2]) 122 | elif plane == 'Z': 123 | maxR = image.shape[2] 124 | shape_ = (1, 3, image.shape[0], image.shape[1]) 125 | for j in range(minR, maxR): 126 | if slice_thickness == 1: 127 | sID = [j, j, j] 128 | elif slice_thickness == 3: 129 | sID = [max(minR, j - 1), j, min(maxR - 1, j + 1)] 130 | if plane == 'X': 131 | if label_sumX[sID].sum() == 0: 132 | continue 133 | net.blobs['data'].reshape(*shape_) 134 | net.blobs['label'].reshape(*shape_) 135 | net.blobs['data'].data[0, ...] = image[sID, :, :] 136 | net.blobs['label'].data[0, ...] = label[sID, :, :] 137 | elif plane == 'Y': 138 | if label_sumY[sID].sum() == 0: 139 | continue 140 | net.blobs['data'].reshape(*shape_) 141 | net.blobs['label'].reshape(*shape_) 142 | net.blobs['data'].data[0, ...] = image[:, sID, :].transpose(1, 0, 2) 143 | net.blobs['label'].data[0, ...] = label[:, sID, :].transpose(1, 0, 2) 144 | elif plane == 'Z': 145 | if label_sumZ[sID].sum() == 0: 146 | continue 147 | net.blobs['data'].reshape(*shape_) 148 | net.blobs['label'].reshape(*shape_) 149 | net.blobs['data'].data[0, ...] = image[:, :, sID].transpose(2, 0, 1) 150 | net.blobs['label'].data[0, ...] = label[:, :, sID].transpose(2, 0, 1) 151 | net.blobs['crop_margin'].reshape((1)) 152 | net.blobs['crop_margin'].data[...] = margin 153 | net.blobs['crop_prob'].reshape((1)) 154 | net.blobs['crop_prob'].data[...] = 0 155 | net.blobs['crop_sample_batch'].reshape((1)) 156 | net.blobs['crop_sample_batch'].data[...] = 0 157 | net.forward() 158 | out = net.blobs['prob-R'].data[0, :, :, :] 159 | if slice_thickness == 1: 160 | if plane == 'X': 161 | pred[j, :, :] = out 162 | elif plane == 'Y': 163 | pred[:, j, :] = out 164 | elif plane == 'Z': 165 | pred[:, :, j] = out 166 | elif slice_thickness == 3: 167 | if plane == 'X': 168 | if j == minR: 169 | pred[j: j + 2, :, :] += out[1: 3, :, :] 170 | elif j == maxR - 1: 171 | pred[j - 1: j + 1, :, :] += out[0: 2, :, :] 172 | else: 173 | pred[j - 1: j + 2, :, :] += out[...] 174 | elif plane == 'Y': 175 | if j == minR: 176 | pred[:, j: j + 2, :] += out[1: 3, :, :].transpose(1, 0, 2) 177 | elif j == maxR - 1: 178 | pred[:, j - 1: j + 1, :] += out[0: 2, :, :].transpose(1, 0, 2) 179 | else: 180 | pred[:, j - 1: j + 2, :] += out[...].transpose(1, 0, 2) 181 | elif plane == 'Z': 182 | if j == minR: 183 | pred[:, :, j: j + 2] += out[1: 3, :, :].transpose(1, 2, 0) 184 | elif j == maxR - 1: 185 | pred[:, :, j - 1: j + 1] += out[0: 2, :, :].transpose(1, 2, 0) 186 | else: 187 | pred[:, :, j - 1: j + 2] += out[...].transpose(1, 2, 0) 188 | if slice_thickness == 3: 189 | if plane == 'X': 190 | pred[minR, :, :] /= 2 191 | pred[minR + 1: maxR - 1, :, :] /= 3 192 | pred[maxR - 1, :, :] /= 2 193 | elif plane == 'Y': 194 | pred[:, minR, :] /= 2 195 | pred[:, minR + 1: maxR - 1, :] /= 3 196 | pred[:, maxR - 1, :] /= 2 197 | elif plane == 'Z': 198 | pred[:, :, minR] /= 2 199 | pred[:, :, minR + 1: maxR - 1] /= 3 200 | pred[:, :, maxR - 1] /= 2 201 | print ' Testing is finished: ' + str(time.time() - start_time) + ' second(s) elapsed.' 202 | pred = np.around(pred * 255).astype(np.uint8) 203 | np.savez_compressed(volume_file, volume = pred) 204 | print ' Data saving is finished: ' + \ 205 | str(time.time() - start_time) + ' second(s) elapsed.' 206 | pred_temp = (pred >= 128) 207 | else: 208 | pred = np.load(volume_file)['volume'].astype(np.uint8) 209 | print ' Testing result is loaded: ' + \ 210 | str(time.time() - start_time) + ' second(s) elapsed.' 211 | pred_temp = (pred >= 128) 212 | DSC[t, i], inter_sum, pred_sum, label_sum = DSC_computation(label, pred_temp) 213 | print ' DSC = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + \ 214 | ' + ' + str(label_sum) + ') = ' + str(DSC[t, i]) + ' .' 215 | output = open(result_file, 'a+') 216 | output.write(' Testcase ' + str(i + 1) + ': DSC = 2 * ' + str(inter_sum) + ' / (' + \ 217 | str(pred_sum) + ' + ' + str(label_sum) + ') = ' + str(DSC[t, i]) + ' .\n') 218 | output.close() 219 | if pred_sum == 0 and label_sum == 0: 220 | DSC[t, i] = 0 221 | print ' DSC computation is finished: ' + \ 222 | str(time.time() - start_time) + ' second(s) elapsed.' 223 | print 'Snapshot ' + str(iteration[t]) + ': average DSC = ' + str(np.mean(DSC[t, :])) + ' .' 224 | output = open(result_file, 'a+') 225 | output.write('Snapshot ' + str(iteration[t]) + \ 226 | ': average DSC = ' + str(np.mean(DSC[t, :])) + ' .\n') 227 | output.close() 228 | print 'The testing process is finished.' 229 | for t in range(len(snapshot)): 230 | print ' Snapshot ' + str(iteration[t]) + ': average DSC = ' + str(np.mean(DSC[t, :])) + ' .' 231 | -------------------------------------------------------------------------------- /OrganSegRSTN/prototxts/deploy_C3.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "input" 3 | type: "Input" 4 | top: "data" 5 | input_param { 6 | shape { dim: 1 dim: 3 dim: 512 dim: 512 } 7 | } 8 | } 9 | layer { 10 | name: "conv1_1" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv1_1" 14 | param { 15 | lr_mult: 0 16 | decay_mult: 0 17 | } 18 | param { 19 | lr_mult: 0 20 | decay_mult: 0 21 | } 22 | convolution_param { 23 | num_output: 64 24 | pad: 100 25 | kernel_size: 3 26 | stride: 1 27 | } 28 | } 29 | layer { 30 | name: "relu1_1" 31 | type: "ReLU" 32 | bottom: "conv1_1" 33 | top: "conv1_1" 34 | } 35 | layer { 36 | name: "conv1_2" 37 | type: "Convolution" 38 | bottom: "conv1_1" 39 | top: "conv1_2" 40 | param { 41 | lr_mult: 0 42 | decay_mult: 0 43 | } 44 | param { 45 | lr_mult: 0 46 | decay_mult: 0 47 | } 48 | convolution_param { 49 | num_output: 64 50 | pad: 1 51 | kernel_size: 3 52 | stride: 1 53 | } 54 | } 55 | layer { 56 | name: "relu1_2" 57 | type: "ReLU" 58 | bottom: "conv1_2" 59 | top: "conv1_2" 60 | } 61 | layer { 62 | name: "pool1" 63 | type: "Pooling" 64 | bottom: "conv1_2" 65 | top: "pool1" 66 | pooling_param { 67 | pool: MAX 68 | kernel_size: 2 69 | stride: 2 70 | } 71 | } 72 | layer { 73 | name: "conv2_1" 74 | type: "Convolution" 75 | bottom: "pool1" 76 | top: "conv2_1" 77 | param { 78 | lr_mult: 0 79 | decay_mult: 0 80 | } 81 | param { 82 | lr_mult: 0 83 | decay_mult: 0 84 | } 85 | convolution_param { 86 | num_output: 128 87 | pad: 1 88 | kernel_size: 3 89 | stride: 1 90 | } 91 | } 92 | layer { 93 | name: "relu2_1" 94 | type: "ReLU" 95 | bottom: "conv2_1" 96 | top: "conv2_1" 97 | } 98 | layer { 99 | name: "conv2_2" 100 | type: "Convolution" 101 | bottom: "conv2_1" 102 | top: "conv2_2" 103 | param { 104 | lr_mult: 0 105 | decay_mult: 0 106 | } 107 | param { 108 | lr_mult: 0 109 | decay_mult: 0 110 | } 111 | convolution_param { 112 | num_output: 128 113 | pad: 1 114 | kernel_size: 3 115 | stride: 1 116 | } 117 | } 118 | layer { 119 | name: "relu2_2" 120 | type: "ReLU" 121 | bottom: "conv2_2" 122 | top: "conv2_2" 123 | } 124 | layer { 125 | name: "pool2" 126 | type: "Pooling" 127 | bottom: "conv2_2" 128 | top: "pool2" 129 | pooling_param { 130 | pool: MAX 131 | kernel_size: 2 132 | stride: 2 133 | } 134 | } 135 | layer { 136 | name: "conv3_1" 137 | type: "Convolution" 138 | bottom: "pool2" 139 | top: "conv3_1" 140 | param { 141 | lr_mult: 0 142 | decay_mult: 0 143 | } 144 | param { 145 | lr_mult: 0 146 | decay_mult: 0 147 | } 148 | convolution_param { 149 | num_output: 256 150 | pad: 1 151 | kernel_size: 3 152 | stride: 1 153 | } 154 | } 155 | layer { 156 | name: "relu3_1" 157 | type: "ReLU" 158 | bottom: "conv3_1" 159 | top: "conv3_1" 160 | } 161 | layer { 162 | name: "conv3_2" 163 | type: "Convolution" 164 | bottom: "conv3_1" 165 | top: "conv3_2" 166 | param { 167 | lr_mult: 0 168 | decay_mult: 0 169 | } 170 | param { 171 | lr_mult: 0 172 | decay_mult: 0 173 | } 174 | convolution_param { 175 | num_output: 256 176 | pad: 1 177 | kernel_size: 3 178 | stride: 1 179 | } 180 | } 181 | layer { 182 | name: "relu3_2" 183 | type: "ReLU" 184 | bottom: "conv3_2" 185 | top: "conv3_2" 186 | } 187 | layer { 188 | name: "conv3_3" 189 | type: "Convolution" 190 | bottom: "conv3_2" 191 | top: "conv3_3" 192 | param { 193 | lr_mult: 0 194 | decay_mult: 0 195 | } 196 | param { 197 | lr_mult: 0 198 | decay_mult: 0 199 | } 200 | convolution_param { 201 | num_output: 256 202 | pad: 1 203 | kernel_size: 3 204 | stride: 1 205 | } 206 | } 207 | layer { 208 | name: "relu3_3" 209 | type: "ReLU" 210 | bottom: "conv3_3" 211 | top: "conv3_3" 212 | } 213 | layer { 214 | name: "pool3" 215 | type: "Pooling" 216 | bottom: "conv3_3" 217 | top: "pool3" 218 | pooling_param { 219 | pool: MAX 220 | kernel_size: 2 221 | stride: 2 222 | } 223 | } 224 | layer { 225 | name: "conv4_1" 226 | type: "Convolution" 227 | bottom: "pool3" 228 | top: "conv4_1" 229 | param { 230 | lr_mult: 0 231 | decay_mult: 0 232 | } 233 | param { 234 | lr_mult: 0 235 | decay_mult: 0 236 | } 237 | convolution_param { 238 | num_output: 512 239 | pad: 1 240 | kernel_size: 3 241 | stride: 1 242 | } 243 | } 244 | layer { 245 | name: "relu4_1" 246 | type: "ReLU" 247 | bottom: "conv4_1" 248 | top: "conv4_1" 249 | } 250 | layer { 251 | name: "conv4_2" 252 | type: "Convolution" 253 | bottom: "conv4_1" 254 | top: "conv4_2" 255 | param { 256 | lr_mult: 0 257 | decay_mult: 0 258 | } 259 | param { 260 | lr_mult: 0 261 | decay_mult: 0 262 | } 263 | convolution_param { 264 | num_output: 512 265 | pad: 1 266 | kernel_size: 3 267 | stride: 1 268 | } 269 | } 270 | layer { 271 | name: "relu4_2" 272 | type: "ReLU" 273 | bottom: "conv4_2" 274 | top: "conv4_2" 275 | } 276 | layer { 277 | name: "conv4_3" 278 | type: "Convolution" 279 | bottom: "conv4_2" 280 | top: "conv4_3" 281 | param { 282 | lr_mult: 0 283 | decay_mult: 0 284 | } 285 | param { 286 | lr_mult: 0 287 | decay_mult: 0 288 | } 289 | convolution_param { 290 | num_output: 512 291 | pad: 1 292 | kernel_size: 3 293 | stride: 1 294 | } 295 | } 296 | layer { 297 | name: "relu4_3" 298 | type: "ReLU" 299 | bottom: "conv4_3" 300 | top: "conv4_3" 301 | } 302 | layer { 303 | name: "pool4" 304 | type: "Pooling" 305 | bottom: "conv4_3" 306 | top: "pool4" 307 | pooling_param { 308 | pool: MAX 309 | kernel_size: 2 310 | stride: 2 311 | } 312 | } 313 | layer { 314 | name: "conv5_1" 315 | type: "Convolution" 316 | bottom: "pool4" 317 | top: "conv5_1" 318 | param { 319 | lr_mult: 0 320 | decay_mult: 0 321 | } 322 | param { 323 | lr_mult: 0 324 | decay_mult: 0 325 | } 326 | convolution_param { 327 | num_output: 512 328 | pad: 1 329 | kernel_size: 3 330 | stride: 1 331 | } 332 | } 333 | layer { 334 | name: "relu5_1" 335 | type: "ReLU" 336 | bottom: "conv5_1" 337 | top: "conv5_1" 338 | } 339 | layer { 340 | name: "conv5_2" 341 | type: "Convolution" 342 | bottom: "conv5_1" 343 | top: "conv5_2" 344 | param { 345 | lr_mult: 0 346 | decay_mult: 0 347 | } 348 | param { 349 | lr_mult: 0 350 | decay_mult: 0 351 | } 352 | convolution_param { 353 | num_output: 512 354 | pad: 1 355 | kernel_size: 3 356 | stride: 1 357 | } 358 | } 359 | layer { 360 | name: "relu5_2" 361 | type: "ReLU" 362 | bottom: "conv5_2" 363 | top: "conv5_2" 364 | } 365 | layer { 366 | name: "conv5_3" 367 | type: "Convolution" 368 | bottom: "conv5_2" 369 | top: "conv5_3" 370 | param { 371 | lr_mult: 0 372 | decay_mult: 0 373 | } 374 | param { 375 | lr_mult: 0 376 | decay_mult: 0 377 | } 378 | convolution_param { 379 | num_output: 512 380 | pad: 1 381 | kernel_size: 3 382 | stride: 1 383 | } 384 | } 385 | layer { 386 | name: "relu5_3" 387 | type: "ReLU" 388 | bottom: "conv5_3" 389 | top: "conv5_3" 390 | } 391 | layer { 392 | name: "pool5" 393 | type: "Pooling" 394 | bottom: "conv5_3" 395 | top: "pool5" 396 | pooling_param { 397 | pool: MAX 398 | kernel_size: 2 399 | stride: 2 400 | } 401 | } 402 | layer { 403 | name: "fc6" 404 | type: "Convolution" 405 | bottom: "pool5" 406 | top: "fc6" 407 | param { 408 | lr_mult: 0 409 | decay_mult: 0 410 | } 411 | param { 412 | lr_mult: 0 413 | decay_mult: 0 414 | } 415 | convolution_param { 416 | num_output: 4096 417 | pad: 0 418 | kernel_size: 7 419 | stride: 1 420 | } 421 | } 422 | layer { 423 | name: "relu6" 424 | type: "ReLU" 425 | bottom: "fc6" 426 | top: "fc6" 427 | } 428 | layer { 429 | name: "drop6" 430 | type: "Dropout" 431 | bottom: "fc6" 432 | top: "fc6" 433 | dropout_param { 434 | dropout_ratio: 0.5 435 | } 436 | } 437 | layer { 438 | name: "fc7" 439 | type: "Convolution" 440 | bottom: "fc6" 441 | top: "fc7" 442 | param { 443 | lr_mult: 0 444 | decay_mult: 0 445 | } 446 | param { 447 | lr_mult: 0 448 | decay_mult: 0 449 | } 450 | convolution_param { 451 | num_output: 4096 452 | pad: 0 453 | kernel_size: 1 454 | stride: 1 455 | } 456 | } 457 | layer { 458 | name: "relu7" 459 | type: "ReLU" 460 | bottom: "fc7" 461 | top: "fc7" 462 | } 463 | layer { 464 | name: "drop7" 465 | type: "Dropout" 466 | bottom: "fc7" 467 | top: "fc7" 468 | dropout_param { 469 | dropout_ratio: 0.5 470 | } 471 | } 472 | layer { 473 | name: "score_fr-new" 474 | type: "Convolution" 475 | bottom: "fc7" 476 | top: "score_fr-new" 477 | param { 478 | lr_mult: 0 479 | decay_mult: 0 480 | } 481 | param { 482 | lr_mult: 0 483 | decay_mult: 0 484 | } 485 | convolution_param { 486 | num_output: 3 487 | kernel_size: 1 488 | } 489 | } 490 | layer { 491 | name: "upscore2-new" 492 | type: "Deconvolution" 493 | bottom: "score_fr-new" 494 | top: "upscore2-new" 495 | param { 496 | lr_mult: 0 497 | } 498 | convolution_param { 499 | num_output: 3 500 | bias_term: false 501 | kernel_size: 4 502 | stride: 2 503 | } 504 | } 505 | layer { 506 | name: "score_pool4-new" 507 | type: "Convolution" 508 | bottom: "pool4" 509 | top: "score_pool4-new" 510 | param { 511 | lr_mult: 0 512 | decay_mult: 0 513 | } 514 | param { 515 | lr_mult: 0 516 | decay_mult: 0 517 | } 518 | convolution_param { 519 | num_output: 3 520 | pad: 0 521 | kernel_size: 1 522 | } 523 | } 524 | layer { 525 | name: "score_pool4c" 526 | type: "Crop" 527 | bottom: "score_pool4-new" 528 | bottom: "upscore2-new" 529 | top: "score_pool4c" 530 | crop_param { 531 | axis: 2 532 | offset: 5 533 | } 534 | } 535 | layer { 536 | name: "fuse_pool4" 537 | type: "Eltwise" 538 | bottom: "upscore2-new" 539 | bottom: "score_pool4c" 540 | top: "fuse_pool4" 541 | eltwise_param { 542 | operation: SUM 543 | } 544 | } 545 | layer { 546 | name: "upscore_pool4-new" 547 | type: "Deconvolution" 548 | bottom: "fuse_pool4" 549 | top: "upscore_pool4-new" 550 | param { 551 | lr_mult: 0 552 | } 553 | convolution_param { 554 | num_output: 3 555 | bias_term: false 556 | kernel_size: 4 557 | stride: 2 558 | } 559 | } 560 | layer { 561 | name: "score_pool3-new" 562 | type: "Convolution" 563 | bottom: "pool3" 564 | top: "score_pool3-new" 565 | param { 566 | lr_mult: 0 567 | decay_mult: 0 568 | } 569 | param { 570 | lr_mult: 0 571 | decay_mult: 0 572 | } 573 | convolution_param { 574 | num_output: 3 575 | pad: 0 576 | kernel_size: 1 577 | } 578 | } 579 | layer { 580 | name: "score_pool3c" 581 | type: "Crop" 582 | bottom: "score_pool3-new" 583 | bottom: "upscore_pool4-new" 584 | top: "score_pool3c" 585 | crop_param { 586 | axis: 2 587 | offset: 9 588 | } 589 | } 590 | layer { 591 | name: "fuse_pool3" 592 | type: "Eltwise" 593 | bottom: "upscore_pool4-new" 594 | bottom: "score_pool3c" 595 | top: "fuse_pool3" 596 | eltwise_param { 597 | operation: SUM 598 | } 599 | } 600 | layer { 601 | name: "upscore8-new" 602 | type: "Deconvolution" 603 | bottom: "fuse_pool3" 604 | top: "upscore8-new" 605 | param { 606 | lr_mult: 0 607 | } 608 | convolution_param { 609 | num_output: 3 610 | bias_term: false 611 | kernel_size: 16 612 | stride: 8 613 | } 614 | } 615 | layer { 616 | name: "upscore8_cropped" 617 | type: "Crop" 618 | bottom: "upscore8-new" 619 | bottom: "data" 620 | top: "upscore8_cropped" 621 | crop_param { 622 | axis: 2 623 | offset: 31 624 | } 625 | } 626 | layer { 627 | name: "prob" 628 | type: "Sigmoid" 629 | bottom: "upscore8_cropped" 630 | top: "prob" 631 | } 632 | -------------------------------------------------------------------------------- /OrganSegRSTN/prototxts/deploy_F3.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "input" 3 | type: "Input" 4 | top: "data" 5 | top: "prob" 6 | top: "label" 7 | top: "crop_margin" 8 | top: "crop_prob" 9 | top: "crop_sample_batch" 10 | input_param { 11 | shape { dim: 1 dim: 3 dim: 512 dim: 512 } 12 | shape { dim: 1 dim: 3 dim: 512 dim: 512 } 13 | shape { dim: 1 dim: 3 dim: 512 dim: 512 } 14 | shape { dim: 1 } 15 | shape { dim: 1 } 16 | shape { dim: 1 } 17 | } 18 | } 19 | layer { 20 | name: "saliency1" 21 | type: "Convolution" 22 | bottom: "prob" 23 | top: "saliency1" 24 | param { 25 | lr_mult: 0 26 | decay_mult: 0 27 | } 28 | param { 29 | lr_mult: 0 30 | decay_mult: 0 31 | } 32 | convolution_param { 33 | num_output: 3 34 | kernel_size: 3 35 | stride: 1 36 | pad: 1 37 | } 38 | } 39 | layer { 40 | name: "relu_saliency1" 41 | type: "ReLU" 42 | bottom: "saliency1" 43 | top: "saliency1" 44 | } 45 | layer { 46 | name: "saliency" 47 | type: "Convolution" 48 | bottom: "saliency1" 49 | top: "saliency" 50 | param { 51 | lr_mult: 0 52 | decay_mult: 0 53 | } 54 | param { 55 | lr_mult: 0 56 | decay_mult: 0 57 | } 58 | convolution_param { 59 | num_output: 3 60 | kernel_size: 5 61 | stride: 1 62 | pad: 2 63 | } 64 | } 65 | layer { 66 | name: "saliency_data" 67 | type: "Eltwise" 68 | bottom: "data" 69 | bottom: "saliency" 70 | top: "saliency_data" 71 | eltwise_param { 72 | operation: PROD 73 | } 74 | } 75 | layer{ 76 | name: "crop" 77 | type: "Python" 78 | bottom: "label" 79 | bottom: "saliency_data" 80 | bottom: "crop_margin" 81 | bottom: "crop_prob" 82 | bottom: "crop_sample_batch" 83 | top: "data-R" 84 | top: "crop_info" 85 | python_param { 86 | module: "Crop" 87 | layer: "CropLayer" 88 | param_str: '{"TEST": 1}' 89 | } 90 | } 91 | layer { 92 | name: "conv1_1-R" 93 | type: "Convolution" 94 | bottom: "data-R" 95 | top: "conv1_1-R" 96 | param { 97 | lr_mult: 0 98 | decay_mult: 0 99 | } 100 | param { 101 | lr_mult: 0 102 | decay_mult: 0 103 | } 104 | convolution_param { 105 | num_output: 64 106 | pad: 100 107 | kernel_size: 3 108 | stride: 1 109 | } 110 | } 111 | layer { 112 | name: "relu1_1-R" 113 | type: "ReLU" 114 | bottom: "conv1_1-R" 115 | top: "conv1_1-R" 116 | } 117 | layer { 118 | name: "conv1_2-R" 119 | type: "Convolution" 120 | bottom: "conv1_1-R" 121 | top: "conv1_2-R" 122 | param { 123 | lr_mult: 0 124 | decay_mult: 0 125 | } 126 | param { 127 | lr_mult: 0 128 | decay_mult: 0 129 | } 130 | convolution_param { 131 | num_output: 64 132 | pad: 1 133 | kernel_size: 3 134 | stride: 1 135 | } 136 | } 137 | layer { 138 | name: "relu1_2-R" 139 | type: "ReLU" 140 | bottom: "conv1_2-R" 141 | top: "conv1_2-R" 142 | } 143 | layer { 144 | name: "pool1-R" 145 | type: "Pooling" 146 | bottom: "conv1_2-R" 147 | top: "pool1-R" 148 | pooling_param { 149 | pool: MAX 150 | kernel_size: 2 151 | stride: 2 152 | } 153 | } 154 | layer { 155 | name: "conv2_1-R" 156 | type: "Convolution" 157 | bottom: "pool1-R" 158 | top: "conv2_1-R" 159 | param { 160 | lr_mult: 0 161 | decay_mult: 0 162 | } 163 | param { 164 | lr_mult: 0 165 | decay_mult: 0 166 | } 167 | convolution_param { 168 | num_output: 128 169 | pad: 1 170 | kernel_size: 3 171 | stride: 1 172 | } 173 | } 174 | layer { 175 | name: "relu2_1-R" 176 | type: "ReLU" 177 | bottom: "conv2_1-R" 178 | top: "conv2_1-R" 179 | } 180 | layer { 181 | name: "conv2_2-R" 182 | type: "Convolution" 183 | bottom: "conv2_1-R" 184 | top: "conv2_2-R" 185 | param { 186 | lr_mult: 0 187 | decay_mult: 0 188 | } 189 | param { 190 | lr_mult: 0 191 | decay_mult: 0 192 | } 193 | convolution_param { 194 | num_output: 128 195 | pad: 1 196 | kernel_size: 3 197 | stride: 1 198 | } 199 | } 200 | layer { 201 | name: "relu2_2-R" 202 | type: "ReLU" 203 | bottom: "conv2_2-R" 204 | top: "conv2_2-R" 205 | } 206 | layer { 207 | name: "pool2-R" 208 | type: "Pooling" 209 | bottom: "conv2_2-R" 210 | top: "pool2-R" 211 | pooling_param { 212 | pool: MAX 213 | kernel_size: 2 214 | stride: 2 215 | } 216 | } 217 | layer { 218 | name: "conv3_1-R" 219 | type: "Convolution" 220 | bottom: "pool2-R" 221 | top: "conv3_1-R" 222 | param { 223 | lr_mult: 0 224 | decay_mult: 0 225 | } 226 | param { 227 | lr_mult: 0 228 | decay_mult: 0 229 | } 230 | convolution_param { 231 | num_output: 256 232 | pad: 1 233 | kernel_size: 3 234 | stride: 1 235 | } 236 | } 237 | layer { 238 | name: "relu3_1-R" 239 | type: "ReLU" 240 | bottom: "conv3_1-R" 241 | top: "conv3_1-R" 242 | } 243 | layer { 244 | name: "conv3_2-R" 245 | type: "Convolution" 246 | bottom: "conv3_1-R" 247 | top: "conv3_2-R" 248 | param { 249 | lr_mult: 0 250 | decay_mult: 0 251 | } 252 | param { 253 | lr_mult: 0 254 | decay_mult: 0 255 | } 256 | convolution_param { 257 | num_output: 256 258 | pad: 1 259 | kernel_size: 3 260 | stride: 1 261 | } 262 | } 263 | layer { 264 | name: "relu3_2-R" 265 | type: "ReLU" 266 | bottom: "conv3_2-R" 267 | top: "conv3_2-R" 268 | } 269 | layer { 270 | name: "conv3_3-R" 271 | type: "Convolution" 272 | bottom: "conv3_2-R" 273 | top: "conv3_3-R" 274 | param { 275 | lr_mult: 0 276 | decay_mult: 0 277 | } 278 | param { 279 | lr_mult: 0 280 | decay_mult: 0 281 | } 282 | convolution_param { 283 | num_output: 256 284 | pad: 1 285 | kernel_size: 3 286 | stride: 1 287 | } 288 | } 289 | layer { 290 | name: "relu3_3-R" 291 | type: "ReLU" 292 | bottom: "conv3_3-R" 293 | top: "conv3_3-R" 294 | } 295 | layer { 296 | name: "pool3-R" 297 | type: "Pooling" 298 | bottom: "conv3_3-R" 299 | top: "pool3-R" 300 | pooling_param { 301 | pool: MAX 302 | kernel_size: 2 303 | stride: 2 304 | } 305 | } 306 | layer { 307 | name: "conv4_1-R" 308 | type: "Convolution" 309 | bottom: "pool3-R" 310 | top: "conv4_1-R" 311 | param { 312 | lr_mult: 0 313 | decay_mult: 0 314 | } 315 | param { 316 | lr_mult: 0 317 | decay_mult: 0 318 | } 319 | convolution_param { 320 | num_output: 512 321 | pad: 1 322 | kernel_size: 3 323 | stride: 1 324 | } 325 | } 326 | layer { 327 | name: "relu4_1-R" 328 | type: "ReLU" 329 | bottom: "conv4_1-R" 330 | top: "conv4_1-R" 331 | } 332 | layer { 333 | name: "conv4_2-R" 334 | type: "Convolution" 335 | bottom: "conv4_1-R" 336 | top: "conv4_2-R" 337 | param { 338 | lr_mult: 0 339 | decay_mult: 0 340 | } 341 | param { 342 | lr_mult: 0 343 | decay_mult: 0 344 | } 345 | convolution_param { 346 | num_output: 512 347 | pad: 1 348 | kernel_size: 3 349 | stride: 1 350 | } 351 | } 352 | layer { 353 | name: "relu4_2-R" 354 | type: "ReLU" 355 | bottom: "conv4_2-R" 356 | top: "conv4_2-R" 357 | } 358 | layer { 359 | name: "conv4_3-R" 360 | type: "Convolution" 361 | bottom: "conv4_2-R" 362 | top: "conv4_3-R" 363 | param { 364 | lr_mult: 0 365 | decay_mult: 0 366 | } 367 | param { 368 | lr_mult: 0 369 | decay_mult: 0 370 | } 371 | convolution_param { 372 | num_output: 512 373 | pad: 1 374 | kernel_size: 3 375 | stride: 1 376 | } 377 | } 378 | layer { 379 | name: "relu4_3-R" 380 | type: "ReLU" 381 | bottom: "conv4_3-R" 382 | top: "conv4_3-R" 383 | } 384 | layer { 385 | name: "pool4-R" 386 | type: "Pooling" 387 | bottom: "conv4_3-R" 388 | top: "pool4-R" 389 | pooling_param { 390 | pool: MAX 391 | kernel_size: 2 392 | stride: 2 393 | } 394 | } 395 | layer { 396 | name: "conv5_1-R" 397 | type: "Convolution" 398 | bottom: "pool4-R" 399 | top: "conv5_1-R" 400 | param { 401 | lr_mult: 0 402 | decay_mult: 0 403 | } 404 | param { 405 | lr_mult: 0 406 | decay_mult: 0 407 | } 408 | convolution_param { 409 | num_output: 512 410 | pad: 1 411 | kernel_size: 3 412 | stride: 1 413 | } 414 | } 415 | layer { 416 | name: "relu5_1-R" 417 | type: "ReLU" 418 | bottom: "conv5_1-R" 419 | top: "conv5_1-R" 420 | } 421 | layer { 422 | name: "conv5_2-R" 423 | type: "Convolution" 424 | bottom: "conv5_1-R" 425 | top: "conv5_2-R" 426 | param { 427 | lr_mult: 0 428 | decay_mult: 0 429 | } 430 | param { 431 | lr_mult: 0 432 | decay_mult: 0 433 | } 434 | convolution_param { 435 | num_output: 512 436 | pad: 1 437 | kernel_size: 3 438 | stride: 1 439 | } 440 | } 441 | layer { 442 | name: "relu5_2-R" 443 | type: "ReLU" 444 | bottom: "conv5_2-R" 445 | top: "conv5_2-R" 446 | } 447 | layer { 448 | name: "conv5_3-R" 449 | type: "Convolution" 450 | bottom: "conv5_2-R" 451 | top: "conv5_3-R" 452 | param { 453 | lr_mult: 0 454 | decay_mult: 0 455 | } 456 | param { 457 | lr_mult: 0 458 | decay_mult: 0 459 | } 460 | convolution_param { 461 | num_output: 512 462 | pad: 1 463 | kernel_size: 3 464 | stride: 1 465 | } 466 | } 467 | layer { 468 | name: "relu5_3-R" 469 | type: "ReLU" 470 | bottom: "conv5_3-R" 471 | top: "conv5_3-R" 472 | } 473 | layer { 474 | name: "pool5-R" 475 | type: "Pooling" 476 | bottom: "conv5_3-R" 477 | top: "pool5-R" 478 | pooling_param { 479 | pool: MAX 480 | kernel_size: 2 481 | stride: 2 482 | } 483 | } 484 | layer { 485 | name: "fc6-R" 486 | type: "Convolution" 487 | bottom: "pool5-R" 488 | top: "fc6-R" 489 | param { 490 | lr_mult: 0 491 | decay_mult: 0 492 | } 493 | param { 494 | lr_mult: 0 495 | decay_mult: 0 496 | } 497 | convolution_param { 498 | num_output: 4096 499 | pad: 0 500 | kernel_size: 7 501 | stride: 1 502 | } 503 | } 504 | layer { 505 | name: "relu6-R" 506 | type: "ReLU" 507 | bottom: "fc6-R" 508 | top: "fc6-R" 509 | } 510 | layer { 511 | name: "drop6-R" 512 | type: "Dropout" 513 | bottom: "fc6-R" 514 | top: "fc6-R" 515 | dropout_param { 516 | dropout_ratio: 0.5 517 | } 518 | } 519 | layer { 520 | name: "fc7-R" 521 | type: "Convolution" 522 | bottom: "fc6-R" 523 | top: "fc7-R" 524 | param { 525 | lr_mult: 0 526 | decay_mult: 0 527 | } 528 | param { 529 | lr_mult: 0 530 | decay_mult: 0 531 | } 532 | convolution_param { 533 | num_output: 4096 534 | pad: 0 535 | kernel_size: 1 536 | stride: 1 537 | } 538 | } 539 | layer { 540 | name: "relu7-R" 541 | type: "ReLU" 542 | bottom: "fc7-R" 543 | top: "fc7-R" 544 | } 545 | layer { 546 | name: "drop7-R" 547 | type: "Dropout" 548 | bottom: "fc7-R" 549 | top: "fc7-R" 550 | dropout_param { 551 | dropout_ratio: 0.5 552 | } 553 | } 554 | layer { 555 | name: "score_fr-new-R" 556 | type: "Convolution" 557 | bottom: "fc7-R" 558 | top: "score_fr-new-R" 559 | param { 560 | lr_mult: 0 561 | decay_mult: 0 562 | } 563 | param { 564 | lr_mult: 0 565 | decay_mult: 0 566 | } 567 | convolution_param { 568 | num_output: 3 569 | kernel_size: 1 570 | } 571 | } 572 | layer { 573 | name: "upscore2-new-R" 574 | type: "Deconvolution" 575 | bottom: "score_fr-new-R" 576 | top: "upscore2-new-R" 577 | param { 578 | lr_mult: 0 579 | } 580 | convolution_param { 581 | num_output: 3 582 | bias_term: false 583 | kernel_size: 4 584 | stride: 2 585 | } 586 | } 587 | layer { 588 | name: "score_pool4-new-R" 589 | type: "Convolution" 590 | bottom: "pool4-R" 591 | top: "score_pool4-new-R" 592 | param { 593 | lr_mult: 0 594 | decay_mult: 0 595 | } 596 | param { 597 | lr_mult: 0 598 | decay_mult: 0 599 | } 600 | convolution_param { 601 | num_output: 3 602 | pad: 0 603 | kernel_size: 1 604 | } 605 | } 606 | layer { 607 | name: "score_pool4c-R" 608 | type: "Crop" 609 | bottom: "score_pool4-new-R" 610 | bottom: "upscore2-new-R" 611 | top: "score_pool4c-R" 612 | crop_param { 613 | axis: 2 614 | offset: 5 615 | } 616 | } 617 | layer { 618 | name: "fuse_pool4-R" 619 | type: "Eltwise" 620 | bottom: "upscore2-new-R" 621 | bottom: "score_pool4c-R" 622 | top: "fuse_pool4-R" 623 | eltwise_param { 624 | operation: SUM 625 | } 626 | } 627 | layer { 628 | name: "upscore_pool4-new-R" 629 | type: "Deconvolution" 630 | bottom: "fuse_pool4-R" 631 | top: "upscore_pool4-new-R" 632 | param { 633 | lr_mult: 0 634 | } 635 | convolution_param { 636 | num_output: 3 637 | bias_term: false 638 | kernel_size: 4 639 | stride: 2 640 | } 641 | } 642 | layer { 643 | name: "score_pool3-new-R" 644 | type: "Convolution" 645 | bottom: "pool3-R" 646 | top: "score_pool3-new-R" 647 | param { 648 | lr_mult: 0 649 | decay_mult: 0 650 | } 651 | param { 652 | lr_mult: 0 653 | decay_mult: 0 654 | } 655 | convolution_param { 656 | num_output: 3 657 | pad: 0 658 | kernel_size: 1 659 | } 660 | } 661 | layer { 662 | name: "score_pool3c-R" 663 | type: "Crop" 664 | bottom: "score_pool3-new-R" 665 | bottom: "upscore_pool4-new-R" 666 | top: "score_pool3c-R" 667 | crop_param { 668 | axis: 2 669 | offset: 9 670 | } 671 | } 672 | layer { 673 | name: "fuse_pool3-R" 674 | type: "Eltwise" 675 | bottom: "upscore_pool4-new-R" 676 | bottom: "score_pool3c-R" 677 | top: "fuse_pool3-R" 678 | eltwise_param { 679 | operation: SUM 680 | } 681 | } 682 | layer { 683 | name: "upscore8-new-R" 684 | type: "Deconvolution" 685 | bottom: "fuse_pool3-R" 686 | top: "upscore8-new-R" 687 | param { 688 | lr_mult: 0 689 | } 690 | convolution_param { 691 | num_output: 3 692 | bias_term: false 693 | kernel_size: 16 694 | stride: 8 695 | } 696 | } 697 | layer { 698 | name: "upscore8_cropped-R" 699 | type: "Crop" 700 | bottom: "upscore8-new-R" 701 | bottom: "data-R" 702 | top: "upscore8_cropped-R" 703 | crop_param { 704 | axis: 2 705 | offset: 31 706 | } 707 | } 708 | layer { 709 | name: "upscore8_uncropped-R" 710 | type: "Python" 711 | bottom: "crop_info" 712 | bottom: "upscore8_cropped-R" 713 | bottom: "data" 714 | top: "upscore8_uncropped-R" 715 | python_param { 716 | module: "Uncrop" 717 | layer: "UncropLayer" 718 | } 719 | } 720 | layer { 721 | name: "prob-R" 722 | type: "Sigmoid" 723 | bottom: "upscore8_uncropped-R" 724 | top: "prob-R" 725 | } 726 | -------------------------------------------------------------------------------- /OrganSegRSTN/prototxts/deploy_O3.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "input" 3 | type: "Input" 4 | top: "data" 5 | top: "label" 6 | top: "crop_margin" 7 | top: "crop_prob" 8 | top: "crop_sample_batch" 9 | input_param { 10 | shape { dim: 1 dim: 3 dim: 512 dim: 512 } 11 | shape { dim: 1 dim: 3 dim: 512 dim: 512 } 12 | shape { dim: 1 } 13 | shape { dim: 1 } 14 | shape { dim: 1 } 15 | } 16 | } 17 | layer { 18 | name: "conv1_1" 19 | type: "Convolution" 20 | bottom: "data" 21 | top: "conv1_1" 22 | param { 23 | lr_mult: 0 24 | decay_mult: 0 25 | } 26 | param { 27 | lr_mult: 0 28 | decay_mult: 0 29 | } 30 | convolution_param { 31 | num_output: 64 32 | pad: 100 33 | kernel_size: 3 34 | stride: 1 35 | } 36 | } 37 | layer { 38 | name: "relu1_1" 39 | type: "ReLU" 40 | bottom: "conv1_1" 41 | top: "conv1_1" 42 | } 43 | layer { 44 | name: "conv1_2" 45 | type: "Convolution" 46 | bottom: "conv1_1" 47 | top: "conv1_2" 48 | param { 49 | lr_mult: 0 50 | decay_mult: 0 51 | } 52 | param { 53 | lr_mult: 0 54 | decay_mult: 0 55 | } 56 | convolution_param { 57 | num_output: 64 58 | pad: 1 59 | kernel_size: 3 60 | stride: 1 61 | } 62 | } 63 | layer { 64 | name: "relu1_2" 65 | type: "ReLU" 66 | bottom: "conv1_2" 67 | top: "conv1_2" 68 | } 69 | layer { 70 | name: "pool1" 71 | type: "Pooling" 72 | bottom: "conv1_2" 73 | top: "pool1" 74 | pooling_param { 75 | pool: MAX 76 | kernel_size: 2 77 | stride: 2 78 | } 79 | } 80 | layer { 81 | name: "conv2_1" 82 | type: "Convolution" 83 | bottom: "pool1" 84 | top: "conv2_1" 85 | param { 86 | lr_mult: 0 87 | decay_mult: 0 88 | } 89 | param { 90 | lr_mult: 0 91 | decay_mult: 0 92 | } 93 | convolution_param { 94 | num_output: 128 95 | pad: 1 96 | kernel_size: 3 97 | stride: 1 98 | } 99 | } 100 | layer { 101 | name: "relu2_1" 102 | type: "ReLU" 103 | bottom: "conv2_1" 104 | top: "conv2_1" 105 | } 106 | layer { 107 | name: "conv2_2" 108 | type: "Convolution" 109 | bottom: "conv2_1" 110 | top: "conv2_2" 111 | param { 112 | lr_mult: 0 113 | decay_mult: 0 114 | } 115 | param { 116 | lr_mult: 0 117 | decay_mult: 0 118 | } 119 | convolution_param { 120 | num_output: 128 121 | pad: 1 122 | kernel_size: 3 123 | stride: 1 124 | } 125 | } 126 | layer { 127 | name: "relu2_2" 128 | type: "ReLU" 129 | bottom: "conv2_2" 130 | top: "conv2_2" 131 | } 132 | layer { 133 | name: "pool2" 134 | type: "Pooling" 135 | bottom: "conv2_2" 136 | top: "pool2" 137 | pooling_param { 138 | pool: MAX 139 | kernel_size: 2 140 | stride: 2 141 | } 142 | } 143 | layer { 144 | name: "conv3_1" 145 | type: "Convolution" 146 | bottom: "pool2" 147 | top: "conv3_1" 148 | param { 149 | lr_mult: 0 150 | decay_mult: 0 151 | } 152 | param { 153 | lr_mult: 0 154 | decay_mult: 0 155 | } 156 | convolution_param { 157 | num_output: 256 158 | pad: 1 159 | kernel_size: 3 160 | stride: 1 161 | } 162 | } 163 | layer { 164 | name: "relu3_1" 165 | type: "ReLU" 166 | bottom: "conv3_1" 167 | top: "conv3_1" 168 | } 169 | layer { 170 | name: "conv3_2" 171 | type: "Convolution" 172 | bottom: "conv3_1" 173 | top: "conv3_2" 174 | param { 175 | lr_mult: 0 176 | decay_mult: 0 177 | } 178 | param { 179 | lr_mult: 0 180 | decay_mult: 0 181 | } 182 | convolution_param { 183 | num_output: 256 184 | pad: 1 185 | kernel_size: 3 186 | stride: 1 187 | } 188 | } 189 | layer { 190 | name: "relu3_2" 191 | type: "ReLU" 192 | bottom: "conv3_2" 193 | top: "conv3_2" 194 | } 195 | layer { 196 | name: "conv3_3" 197 | type: "Convolution" 198 | bottom: "conv3_2" 199 | top: "conv3_3" 200 | param { 201 | lr_mult: 0 202 | decay_mult: 0 203 | } 204 | param { 205 | lr_mult: 0 206 | decay_mult: 0 207 | } 208 | convolution_param { 209 | num_output: 256 210 | pad: 1 211 | kernel_size: 3 212 | stride: 1 213 | } 214 | } 215 | layer { 216 | name: "relu3_3" 217 | type: "ReLU" 218 | bottom: "conv3_3" 219 | top: "conv3_3" 220 | } 221 | layer { 222 | name: "pool3" 223 | type: "Pooling" 224 | bottom: "conv3_3" 225 | top: "pool3" 226 | pooling_param { 227 | pool: MAX 228 | kernel_size: 2 229 | stride: 2 230 | } 231 | } 232 | layer { 233 | name: "conv4_1" 234 | type: "Convolution" 235 | bottom: "pool3" 236 | top: "conv4_1" 237 | param { 238 | lr_mult: 0 239 | decay_mult: 0 240 | } 241 | param { 242 | lr_mult: 0 243 | decay_mult: 0 244 | } 245 | convolution_param { 246 | num_output: 512 247 | pad: 1 248 | kernel_size: 3 249 | stride: 1 250 | } 251 | } 252 | layer { 253 | name: "relu4_1" 254 | type: "ReLU" 255 | bottom: "conv4_1" 256 | top: "conv4_1" 257 | } 258 | layer { 259 | name: "conv4_2" 260 | type: "Convolution" 261 | bottom: "conv4_1" 262 | top: "conv4_2" 263 | param { 264 | lr_mult: 0 265 | decay_mult: 0 266 | } 267 | param { 268 | lr_mult: 0 269 | decay_mult: 0 270 | } 271 | convolution_param { 272 | num_output: 512 273 | pad: 1 274 | kernel_size: 3 275 | stride: 1 276 | } 277 | } 278 | layer { 279 | name: "relu4_2" 280 | type: "ReLU" 281 | bottom: "conv4_2" 282 | top: "conv4_2" 283 | } 284 | layer { 285 | name: "conv4_3" 286 | type: "Convolution" 287 | bottom: "conv4_2" 288 | top: "conv4_3" 289 | param { 290 | lr_mult: 0 291 | decay_mult: 0 292 | } 293 | param { 294 | lr_mult: 0 295 | decay_mult: 0 296 | } 297 | convolution_param { 298 | num_output: 512 299 | pad: 1 300 | kernel_size: 3 301 | stride: 1 302 | } 303 | } 304 | layer { 305 | name: "relu4_3" 306 | type: "ReLU" 307 | bottom: "conv4_3" 308 | top: "conv4_3" 309 | } 310 | layer { 311 | name: "pool4" 312 | type: "Pooling" 313 | bottom: "conv4_3" 314 | top: "pool4" 315 | pooling_param { 316 | pool: MAX 317 | kernel_size: 2 318 | stride: 2 319 | } 320 | } 321 | layer { 322 | name: "conv5_1" 323 | type: "Convolution" 324 | bottom: "pool4" 325 | top: "conv5_1" 326 | param { 327 | lr_mult: 0 328 | decay_mult: 0 329 | } 330 | param { 331 | lr_mult: 0 332 | decay_mult: 0 333 | } 334 | convolution_param { 335 | num_output: 512 336 | pad: 1 337 | kernel_size: 3 338 | stride: 1 339 | } 340 | } 341 | layer { 342 | name: "relu5_1" 343 | type: "ReLU" 344 | bottom: "conv5_1" 345 | top: "conv5_1" 346 | } 347 | layer { 348 | name: "conv5_2" 349 | type: "Convolution" 350 | bottom: "conv5_1" 351 | top: "conv5_2" 352 | param { 353 | lr_mult: 0 354 | decay_mult: 0 355 | } 356 | param { 357 | lr_mult: 0 358 | decay_mult: 0 359 | } 360 | convolution_param { 361 | num_output: 512 362 | pad: 1 363 | kernel_size: 3 364 | stride: 1 365 | } 366 | } 367 | layer { 368 | name: "relu5_2" 369 | type: "ReLU" 370 | bottom: "conv5_2" 371 | top: "conv5_2" 372 | } 373 | layer { 374 | name: "conv5_3" 375 | type: "Convolution" 376 | bottom: "conv5_2" 377 | top: "conv5_3" 378 | param { 379 | lr_mult: 0 380 | decay_mult: 0 381 | } 382 | param { 383 | lr_mult: 0 384 | decay_mult: 0 385 | } 386 | convolution_param { 387 | num_output: 512 388 | pad: 1 389 | kernel_size: 3 390 | stride: 1 391 | } 392 | } 393 | layer { 394 | name: "relu5_3" 395 | type: "ReLU" 396 | bottom: "conv5_3" 397 | top: "conv5_3" 398 | } 399 | layer { 400 | name: "pool5" 401 | type: "Pooling" 402 | bottom: "conv5_3" 403 | top: "pool5" 404 | pooling_param { 405 | pool: MAX 406 | kernel_size: 2 407 | stride: 2 408 | } 409 | } 410 | layer { 411 | name: "fc6" 412 | type: "Convolution" 413 | bottom: "pool5" 414 | top: "fc6" 415 | param { 416 | lr_mult: 0 417 | decay_mult: 0 418 | } 419 | param { 420 | lr_mult: 0 421 | decay_mult: 0 422 | } 423 | convolution_param { 424 | num_output: 4096 425 | pad: 0 426 | kernel_size: 7 427 | stride: 1 428 | } 429 | } 430 | layer { 431 | name: "relu6" 432 | type: "ReLU" 433 | bottom: "fc6" 434 | top: "fc6" 435 | } 436 | layer { 437 | name: "drop6" 438 | type: "Dropout" 439 | bottom: "fc6" 440 | top: "fc6" 441 | dropout_param { 442 | dropout_ratio: 0.5 443 | } 444 | } 445 | layer { 446 | name: "fc7" 447 | type: "Convolution" 448 | bottom: "fc6" 449 | top: "fc7" 450 | param { 451 | lr_mult: 0 452 | decay_mult: 0 453 | } 454 | param { 455 | lr_mult: 0 456 | decay_mult: 0 457 | } 458 | convolution_param { 459 | num_output: 4096 460 | pad: 0 461 | kernel_size: 1 462 | stride: 1 463 | } 464 | } 465 | layer { 466 | name: "relu7" 467 | type: "ReLU" 468 | bottom: "fc7" 469 | top: "fc7" 470 | } 471 | layer { 472 | name: "drop7" 473 | type: "Dropout" 474 | bottom: "fc7" 475 | top: "fc7" 476 | dropout_param { 477 | dropout_ratio: 0.5 478 | } 479 | } 480 | layer { 481 | name: "score_fr-new" 482 | type: "Convolution" 483 | bottom: "fc7" 484 | top: "score_fr-new" 485 | param { 486 | lr_mult: 0 487 | decay_mult: 0 488 | } 489 | param { 490 | lr_mult: 0 491 | decay_mult: 0 492 | } 493 | convolution_param { 494 | num_output: 3 495 | kernel_size: 1 496 | } 497 | } 498 | layer { 499 | name: "upscore2-new" 500 | type: "Deconvolution" 501 | bottom: "score_fr-new" 502 | top: "upscore2-new" 503 | param { 504 | lr_mult: 0 505 | } 506 | convolution_param { 507 | num_output: 3 508 | bias_term: false 509 | kernel_size: 4 510 | stride: 2 511 | } 512 | } 513 | layer { 514 | name: "score_pool4-new" 515 | type: "Convolution" 516 | bottom: "pool4" 517 | top: "score_pool4-new" 518 | param { 519 | lr_mult: 0 520 | decay_mult: 0 521 | } 522 | param { 523 | lr_mult: 0 524 | decay_mult: 0 525 | } 526 | convolution_param { 527 | num_output: 3 528 | pad: 0 529 | kernel_size: 1 530 | } 531 | } 532 | layer { 533 | name: "score_pool4c" 534 | type: "Crop" 535 | bottom: "score_pool4-new" 536 | bottom: "upscore2-new" 537 | top: "score_pool4c" 538 | crop_param { 539 | axis: 2 540 | offset: 5 541 | } 542 | } 543 | layer { 544 | name: "fuse_pool4" 545 | type: "Eltwise" 546 | bottom: "upscore2-new" 547 | bottom: "score_pool4c" 548 | top: "fuse_pool4" 549 | eltwise_param { 550 | operation: SUM 551 | } 552 | } 553 | layer { 554 | name: "upscore_pool4-new" 555 | type: "Deconvolution" 556 | bottom: "fuse_pool4" 557 | top: "upscore_pool4-new" 558 | param { 559 | lr_mult: 0 560 | } 561 | convolution_param { 562 | num_output: 3 563 | bias_term: false 564 | kernel_size: 4 565 | stride: 2 566 | } 567 | } 568 | layer { 569 | name: "score_pool3-new" 570 | type: "Convolution" 571 | bottom: "pool3" 572 | top: "score_pool3-new" 573 | param { 574 | lr_mult: 0 575 | decay_mult: 0 576 | } 577 | param { 578 | lr_mult: 0 579 | decay_mult: 0 580 | } 581 | convolution_param { 582 | num_output: 3 583 | pad: 0 584 | kernel_size: 1 585 | } 586 | } 587 | layer { 588 | name: "score_pool3c" 589 | type: "Crop" 590 | bottom: "score_pool3-new" 591 | bottom: "upscore_pool4-new" 592 | top: "score_pool3c" 593 | crop_param { 594 | axis: 2 595 | offset: 9 596 | } 597 | } 598 | layer { 599 | name: "fuse_pool3" 600 | type: "Eltwise" 601 | bottom: "upscore_pool4-new" 602 | bottom: "score_pool3c" 603 | top: "fuse_pool3" 604 | eltwise_param { 605 | operation: SUM 606 | } 607 | } 608 | layer { 609 | name: "upscore8-new" 610 | type: "Deconvolution" 611 | bottom: "fuse_pool3" 612 | top: "upscore8-new" 613 | param { 614 | lr_mult: 0 615 | } 616 | convolution_param { 617 | num_output: 3 618 | bias_term: false 619 | kernel_size: 16 620 | stride: 8 621 | } 622 | } 623 | layer { 624 | name: "upscore8_cropped" 625 | type: "Crop" 626 | bottom: "upscore8-new" 627 | bottom: "data" 628 | top: "upscore8_cropped" 629 | crop_param { 630 | axis: 2 631 | offset: 31 632 | } 633 | } 634 | layer { 635 | name: "prob" 636 | type: "Sigmoid" 637 | bottom: "upscore8_cropped" 638 | top: "prob" 639 | } 640 | layer { 641 | name: "saliency1" 642 | type: "Convolution" 643 | bottom: "prob" 644 | top: "saliency1" 645 | param { 646 | lr_mult: 0 647 | decay_mult: 0 648 | } 649 | param { 650 | lr_mult: 0 651 | decay_mult: 0 652 | } 653 | convolution_param { 654 | num_output: 3 655 | kernel_size: 3 656 | stride: 1 657 | pad: 1 658 | } 659 | } 660 | layer { 661 | name: "relu_saliency1" 662 | type: "ReLU" 663 | bottom: "saliency1" 664 | top: "saliency1" 665 | } 666 | layer { 667 | name: "saliency" 668 | type: "Convolution" 669 | bottom: "saliency1" 670 | top: "saliency" 671 | param { 672 | lr_mult: 0 673 | decay_mult: 0 674 | } 675 | param { 676 | lr_mult: 0 677 | decay_mult: 0 678 | } 679 | convolution_param { 680 | num_output: 3 681 | kernel_size: 5 682 | stride: 1 683 | pad: 2 684 | } 685 | } 686 | layer { 687 | name: "saliency_data" 688 | type: "Eltwise" 689 | bottom: "data" 690 | bottom: "saliency" 691 | top: "saliency_data" 692 | eltwise_param { 693 | operation: PROD 694 | } 695 | } 696 | layer{ 697 | name: "crop" 698 | type: "Python" 699 | bottom: "label" 700 | bottom: "saliency_data" 701 | bottom: "crop_margin" 702 | bottom: "crop_prob" 703 | bottom: "crop_sample_batch" 704 | top: "data-R" 705 | top: "crop_info" 706 | python_param { 707 | module: "Crop" 708 | layer: "CropLayer" 709 | param_str: '{"TEST": 1}' 710 | } 711 | } 712 | layer { 713 | name: "conv1_1-R" 714 | type: "Convolution" 715 | bottom: "data-R" 716 | top: "conv1_1-R" 717 | param { 718 | lr_mult: 0 719 | decay_mult: 0 720 | } 721 | param { 722 | lr_mult: 0 723 | decay_mult: 0 724 | } 725 | convolution_param { 726 | num_output: 64 727 | pad: 100 728 | kernel_size: 3 729 | stride: 1 730 | } 731 | } 732 | layer { 733 | name: "relu1_1-R" 734 | type: "ReLU" 735 | bottom: "conv1_1-R" 736 | top: "conv1_1-R" 737 | } 738 | layer { 739 | name: "conv1_2-R" 740 | type: "Convolution" 741 | bottom: "conv1_1-R" 742 | top: "conv1_2-R" 743 | param { 744 | lr_mult: 0 745 | decay_mult: 0 746 | } 747 | param { 748 | lr_mult: 0 749 | decay_mult: 0 750 | } 751 | convolution_param { 752 | num_output: 64 753 | pad: 1 754 | kernel_size: 3 755 | stride: 1 756 | } 757 | } 758 | layer { 759 | name: "relu1_2-R" 760 | type: "ReLU" 761 | bottom: "conv1_2-R" 762 | top: "conv1_2-R" 763 | } 764 | layer { 765 | name: "pool1-R" 766 | type: "Pooling" 767 | bottom: "conv1_2-R" 768 | top: "pool1-R" 769 | pooling_param { 770 | pool: MAX 771 | kernel_size: 2 772 | stride: 2 773 | } 774 | } 775 | layer { 776 | name: "conv2_1-R" 777 | type: "Convolution" 778 | bottom: "pool1-R" 779 | top: "conv2_1-R" 780 | param { 781 | lr_mult: 0 782 | decay_mult: 0 783 | } 784 | param { 785 | lr_mult: 0 786 | decay_mult: 0 787 | } 788 | convolution_param { 789 | num_output: 128 790 | pad: 1 791 | kernel_size: 3 792 | stride: 1 793 | } 794 | } 795 | layer { 796 | name: "relu2_1-R" 797 | type: "ReLU" 798 | bottom: "conv2_1-R" 799 | top: "conv2_1-R" 800 | } 801 | layer { 802 | name: "conv2_2-R" 803 | type: "Convolution" 804 | bottom: "conv2_1-R" 805 | top: "conv2_2-R" 806 | param { 807 | lr_mult: 0 808 | decay_mult: 0 809 | } 810 | param { 811 | lr_mult: 0 812 | decay_mult: 0 813 | } 814 | convolution_param { 815 | num_output: 128 816 | pad: 1 817 | kernel_size: 3 818 | stride: 1 819 | } 820 | } 821 | layer { 822 | name: "relu2_2-R" 823 | type: "ReLU" 824 | bottom: "conv2_2-R" 825 | top: "conv2_2-R" 826 | } 827 | layer { 828 | name: "pool2-R" 829 | type: "Pooling" 830 | bottom: "conv2_2-R" 831 | top: "pool2-R" 832 | pooling_param { 833 | pool: MAX 834 | kernel_size: 2 835 | stride: 2 836 | } 837 | } 838 | layer { 839 | name: "conv3_1-R" 840 | type: "Convolution" 841 | bottom: "pool2-R" 842 | top: "conv3_1-R" 843 | param { 844 | lr_mult: 0 845 | decay_mult: 0 846 | } 847 | param { 848 | lr_mult: 0 849 | decay_mult: 0 850 | } 851 | convolution_param { 852 | num_output: 256 853 | pad: 1 854 | kernel_size: 3 855 | stride: 1 856 | } 857 | } 858 | layer { 859 | name: "relu3_1-R" 860 | type: "ReLU" 861 | bottom: "conv3_1-R" 862 | top: "conv3_1-R" 863 | } 864 | layer { 865 | name: "conv3_2-R" 866 | type: "Convolution" 867 | bottom: "conv3_1-R" 868 | top: "conv3_2-R" 869 | param { 870 | lr_mult: 0 871 | decay_mult: 0 872 | } 873 | param { 874 | lr_mult: 0 875 | decay_mult: 0 876 | } 877 | convolution_param { 878 | num_output: 256 879 | pad: 1 880 | kernel_size: 3 881 | stride: 1 882 | } 883 | } 884 | layer { 885 | name: "relu3_2-R" 886 | type: "ReLU" 887 | bottom: "conv3_2-R" 888 | top: "conv3_2-R" 889 | } 890 | layer { 891 | name: "conv3_3-R" 892 | type: "Convolution" 893 | bottom: "conv3_2-R" 894 | top: "conv3_3-R" 895 | param { 896 | lr_mult: 0 897 | decay_mult: 0 898 | } 899 | param { 900 | lr_mult: 0 901 | decay_mult: 0 902 | } 903 | convolution_param { 904 | num_output: 256 905 | pad: 1 906 | kernel_size: 3 907 | stride: 1 908 | } 909 | } 910 | layer { 911 | name: "relu3_3-R" 912 | type: "ReLU" 913 | bottom: "conv3_3-R" 914 | top: "conv3_3-R" 915 | } 916 | layer { 917 | name: "pool3-R" 918 | type: "Pooling" 919 | bottom: "conv3_3-R" 920 | top: "pool3-R" 921 | pooling_param { 922 | pool: MAX 923 | kernel_size: 2 924 | stride: 2 925 | } 926 | } 927 | layer { 928 | name: "conv4_1-R" 929 | type: "Convolution" 930 | bottom: "pool3-R" 931 | top: "conv4_1-R" 932 | param { 933 | lr_mult: 0 934 | decay_mult: 0 935 | } 936 | param { 937 | lr_mult: 0 938 | decay_mult: 0 939 | } 940 | convolution_param { 941 | num_output: 512 942 | pad: 1 943 | kernel_size: 3 944 | stride: 1 945 | } 946 | } 947 | layer { 948 | name: "relu4_1-R" 949 | type: "ReLU" 950 | bottom: "conv4_1-R" 951 | top: "conv4_1-R" 952 | } 953 | layer { 954 | name: "conv4_2-R" 955 | type: "Convolution" 956 | bottom: "conv4_1-R" 957 | top: "conv4_2-R" 958 | param { 959 | lr_mult: 0 960 | decay_mult: 0 961 | } 962 | param { 963 | lr_mult: 0 964 | decay_mult: 0 965 | } 966 | convolution_param { 967 | num_output: 512 968 | pad: 1 969 | kernel_size: 3 970 | stride: 1 971 | } 972 | } 973 | layer { 974 | name: "relu4_2-R" 975 | type: "ReLU" 976 | bottom: "conv4_2-R" 977 | top: "conv4_2-R" 978 | } 979 | layer { 980 | name: "conv4_3-R" 981 | type: "Convolution" 982 | bottom: "conv4_2-R" 983 | top: "conv4_3-R" 984 | param { 985 | lr_mult: 0 986 | decay_mult: 0 987 | } 988 | param { 989 | lr_mult: 0 990 | decay_mult: 0 991 | } 992 | convolution_param { 993 | num_output: 512 994 | pad: 1 995 | kernel_size: 3 996 | stride: 1 997 | } 998 | } 999 | layer { 1000 | name: "relu4_3-R" 1001 | type: "ReLU" 1002 | bottom: "conv4_3-R" 1003 | top: "conv4_3-R" 1004 | } 1005 | layer { 1006 | name: "pool4-R" 1007 | type: "Pooling" 1008 | bottom: "conv4_3-R" 1009 | top: "pool4-R" 1010 | pooling_param { 1011 | pool: MAX 1012 | kernel_size: 2 1013 | stride: 2 1014 | } 1015 | } 1016 | layer { 1017 | name: "conv5_1-R" 1018 | type: "Convolution" 1019 | bottom: "pool4-R" 1020 | top: "conv5_1-R" 1021 | param { 1022 | lr_mult: 0 1023 | decay_mult: 0 1024 | } 1025 | param { 1026 | lr_mult: 0 1027 | decay_mult: 0 1028 | } 1029 | convolution_param { 1030 | num_output: 512 1031 | pad: 1 1032 | kernel_size: 3 1033 | stride: 1 1034 | } 1035 | } 1036 | layer { 1037 | name: "relu5_1-R" 1038 | type: "ReLU" 1039 | bottom: "conv5_1-R" 1040 | top: "conv5_1-R" 1041 | } 1042 | layer { 1043 | name: "conv5_2-R" 1044 | type: "Convolution" 1045 | bottom: "conv5_1-R" 1046 | top: "conv5_2-R" 1047 | param { 1048 | lr_mult: 0 1049 | decay_mult: 0 1050 | } 1051 | param { 1052 | lr_mult: 0 1053 | decay_mult: 0 1054 | } 1055 | convolution_param { 1056 | num_output: 512 1057 | pad: 1 1058 | kernel_size: 3 1059 | stride: 1 1060 | } 1061 | } 1062 | layer { 1063 | name: "relu5_2-R" 1064 | type: "ReLU" 1065 | bottom: "conv5_2-R" 1066 | top: "conv5_2-R" 1067 | } 1068 | layer { 1069 | name: "conv5_3-R" 1070 | type: "Convolution" 1071 | bottom: "conv5_2-R" 1072 | top: "conv5_3-R" 1073 | param { 1074 | lr_mult: 0 1075 | decay_mult: 0 1076 | } 1077 | param { 1078 | lr_mult: 0 1079 | decay_mult: 0 1080 | } 1081 | convolution_param { 1082 | num_output: 512 1083 | pad: 1 1084 | kernel_size: 3 1085 | stride: 1 1086 | } 1087 | } 1088 | layer { 1089 | name: "relu5_3-R" 1090 | type: "ReLU" 1091 | bottom: "conv5_3-R" 1092 | top: "conv5_3-R" 1093 | } 1094 | layer { 1095 | name: "pool5-R" 1096 | type: "Pooling" 1097 | bottom: "conv5_3-R" 1098 | top: "pool5-R" 1099 | pooling_param { 1100 | pool: MAX 1101 | kernel_size: 2 1102 | stride: 2 1103 | } 1104 | } 1105 | layer { 1106 | name: "fc6-R" 1107 | type: "Convolution" 1108 | bottom: "pool5-R" 1109 | top: "fc6-R" 1110 | param { 1111 | lr_mult: 0 1112 | decay_mult: 0 1113 | } 1114 | param { 1115 | lr_mult: 0 1116 | decay_mult: 0 1117 | } 1118 | convolution_param { 1119 | num_output: 4096 1120 | pad: 0 1121 | kernel_size: 7 1122 | stride: 1 1123 | } 1124 | } 1125 | layer { 1126 | name: "relu6-R" 1127 | type: "ReLU" 1128 | bottom: "fc6-R" 1129 | top: "fc6-R" 1130 | } 1131 | layer { 1132 | name: "drop6-R" 1133 | type: "Dropout" 1134 | bottom: "fc6-R" 1135 | top: "fc6-R" 1136 | dropout_param { 1137 | dropout_ratio: 0.5 1138 | } 1139 | } 1140 | layer { 1141 | name: "fc7-R" 1142 | type: "Convolution" 1143 | bottom: "fc6-R" 1144 | top: "fc7-R" 1145 | param { 1146 | lr_mult: 0 1147 | decay_mult: 0 1148 | } 1149 | param { 1150 | lr_mult: 0 1151 | decay_mult: 0 1152 | } 1153 | convolution_param { 1154 | num_output: 4096 1155 | pad: 0 1156 | kernel_size: 1 1157 | stride: 1 1158 | } 1159 | } 1160 | layer { 1161 | name: "relu7-R" 1162 | type: "ReLU" 1163 | bottom: "fc7-R" 1164 | top: "fc7-R" 1165 | } 1166 | layer { 1167 | name: "drop7-R" 1168 | type: "Dropout" 1169 | bottom: "fc7-R" 1170 | top: "fc7-R" 1171 | dropout_param { 1172 | dropout_ratio: 0.5 1173 | } 1174 | } 1175 | layer { 1176 | name: "score_fr-new-R" 1177 | type: "Convolution" 1178 | bottom: "fc7-R" 1179 | top: "score_fr-new-R" 1180 | param { 1181 | lr_mult: 0 1182 | decay_mult: 0 1183 | } 1184 | param { 1185 | lr_mult: 0 1186 | decay_mult: 0 1187 | } 1188 | convolution_param { 1189 | num_output: 3 1190 | kernel_size: 1 1191 | } 1192 | } 1193 | layer { 1194 | name: "upscore2-new-R" 1195 | type: "Deconvolution" 1196 | bottom: "score_fr-new-R" 1197 | top: "upscore2-new-R" 1198 | param { 1199 | lr_mult: 0 1200 | } 1201 | convolution_param { 1202 | num_output: 3 1203 | bias_term: false 1204 | kernel_size: 4 1205 | stride: 2 1206 | } 1207 | } 1208 | layer { 1209 | name: "score_pool4-new-R" 1210 | type: "Convolution" 1211 | bottom: "pool4-R" 1212 | top: "score_pool4-new-R" 1213 | param { 1214 | lr_mult: 0 1215 | decay_mult: 0 1216 | } 1217 | param { 1218 | lr_mult: 0 1219 | decay_mult: 0 1220 | } 1221 | convolution_param { 1222 | num_output: 3 1223 | pad: 0 1224 | kernel_size: 1 1225 | } 1226 | } 1227 | layer { 1228 | name: "score_pool4c-R" 1229 | type: "Crop" 1230 | bottom: "score_pool4-new-R" 1231 | bottom: "upscore2-new-R" 1232 | top: "score_pool4c-R" 1233 | crop_param { 1234 | axis: 2 1235 | offset: 5 1236 | } 1237 | } 1238 | layer { 1239 | name: "fuse_pool4-R" 1240 | type: "Eltwise" 1241 | bottom: "upscore2-new-R" 1242 | bottom: "score_pool4c-R" 1243 | top: "fuse_pool4-R" 1244 | eltwise_param { 1245 | operation: SUM 1246 | } 1247 | } 1248 | layer { 1249 | name: "upscore_pool4-new-R" 1250 | type: "Deconvolution" 1251 | bottom: "fuse_pool4-R" 1252 | top: "upscore_pool4-new-R" 1253 | param { 1254 | lr_mult: 0 1255 | } 1256 | convolution_param { 1257 | num_output: 3 1258 | bias_term: false 1259 | kernel_size: 4 1260 | stride: 2 1261 | } 1262 | } 1263 | layer { 1264 | name: "score_pool3-new-R" 1265 | type: "Convolution" 1266 | bottom: "pool3-R" 1267 | top: "score_pool3-new-R" 1268 | param { 1269 | lr_mult: 0 1270 | decay_mult: 0 1271 | } 1272 | param { 1273 | lr_mult: 0 1274 | decay_mult: 0 1275 | } 1276 | convolution_param { 1277 | num_output: 3 1278 | pad: 0 1279 | kernel_size: 1 1280 | } 1281 | } 1282 | layer { 1283 | name: "score_pool3c-R" 1284 | type: "Crop" 1285 | bottom: "score_pool3-new-R" 1286 | bottom: "upscore_pool4-new-R" 1287 | top: "score_pool3c-R" 1288 | crop_param { 1289 | axis: 2 1290 | offset: 9 1291 | } 1292 | } 1293 | layer { 1294 | name: "fuse_pool3-R" 1295 | type: "Eltwise" 1296 | bottom: "upscore_pool4-new-R" 1297 | bottom: "score_pool3c-R" 1298 | top: "fuse_pool3-R" 1299 | eltwise_param { 1300 | operation: SUM 1301 | } 1302 | } 1303 | layer { 1304 | name: "upscore8-new-R" 1305 | type: "Deconvolution" 1306 | bottom: "fuse_pool3-R" 1307 | top: "upscore8-new-R" 1308 | param { 1309 | lr_mult: 0 1310 | } 1311 | convolution_param { 1312 | num_output: 3 1313 | bias_term: false 1314 | kernel_size: 16 1315 | stride: 8 1316 | } 1317 | } 1318 | layer { 1319 | name: "upscore8_cropped-R" 1320 | type: "Crop" 1321 | bottom: "upscore8-new-R" 1322 | bottom: "data-R" 1323 | top: "upscore8_cropped-R" 1324 | crop_param { 1325 | axis: 2 1326 | offset: 31 1327 | } 1328 | } 1329 | layer { 1330 | name: "upscore8_uncropped-R" 1331 | type: "Python" 1332 | bottom: "crop_info" 1333 | bottom: "upscore8_cropped-R" 1334 | bottom: "data" 1335 | top: "upscore8_uncropped-R" 1336 | python_param { 1337 | module: "Uncrop" 1338 | layer: "UncropLayer" 1339 | } 1340 | } 1341 | layer { 1342 | name: "prob-R" 1343 | type: "Sigmoid" 1344 | bottom: "upscore8_uncropped-R" 1345 | top: "prob-R" 1346 | } 1347 | -------------------------------------------------------------------------------- /OrganSegRSTN/run.sh: -------------------------------------------------------------------------------- 1 | #################################################################################################### 2 | # RSTN: Recurrent Saliency Transformation Network for organ segmentation framework # 3 | # # 4 | # If you use our codes, please cite our paper accordingly: # 5 | # Qihang Yu, Lingxi Xie, Yan Wang, Yuyin Zhou, Elliot K. Fishman, Alan L. Yuille, # 6 | # "Recurrent Saliency Transformation Network: # 7 | # Incorporating Multi-Stage Visual Cues for Small Organ Segmentation", # 8 | # in IEEE Conference on Computer Vision and Pattern Recognition, 2018. # 9 | # # 10 | # NOTE: this program can be used for multi-organ segmentation. # 11 | # Please also refer to its previous version, OrganSegC2F. # 12 | #################################################################################################### 13 | 14 | #################################################################################################### 15 | # variables for convenience 16 | CURRENT_ORGAN_ID=1 17 | CURRENT_PLANE=A 18 | CURRENT_FOLD=0 19 | CURRENT_GPU=$CURRENT_FOLD 20 | 21 | #################################################################################################### 22 | # turn on these switches to execute each module 23 | ENABLE_INITIALIZATION=0 24 | ENABLE_INDIV_TRAINING=0 25 | ENABLE_JOINT_TRAINING=0 26 | ENABLE_COARSE_TESTING=0 27 | ENABLE_COARSE_FUSION=0 28 | ENABLE_ORACLE_TESTING=0 29 | ENABLE_ORACLE_FUSION=0 30 | ENABLE_COARSE2FINE_TESTING=1 31 | # indiv_training settings: X|Y|Z 32 | INDIV_TRAINING_ORGAN_ID=$CURRENT_ORGAN_ID 33 | INDIV_TRAINING_PLANE=$CURRENT_PLANE 34 | INDIV_TRAINING_GPU=$CURRENT_GPU 35 | # joint_training settings: X|Y|Z 36 | JOINT_TRAINING_ORGAN_ID=$CURRENT_ORGAN_ID 37 | JOINT_TRAINING_PLANE=$CURRENT_PLANE 38 | JOINT_TRAINING_GPU=$CURRENT_GPU 39 | # coarse_testing settings: X|Y|Z, before this, coarse-scaled models shall be ready 40 | COARSE_TESTING_ORGAN_ID=$CURRENT_ORGAN_ID 41 | COARSE_TESTING_PLANE=$CURRENT_PLANE 42 | COARSE_TESTING_GPU=$CURRENT_GPU 43 | # coarse_fusion settings: before this, coarse-scaled results on 3 views shall be ready 44 | COARSE_FUSION_ORGAN_ID=$CURRENT_ORGAN_ID 45 | # oracle_testing settings: X|Y|Z, before this, fine-scaled models shall be ready 46 | ORACLE_TESTING_ORGAN_ID=$CURRENT_ORGAN_ID 47 | ORACLE_TESTING_PLANE=$CURRENT_PLANE 48 | ORACLE_TESTING_GPU=$CURRENT_GPU 49 | # oracle_fusion settings: before this, fine-scaled results on 3 views shall be ready 50 | ORACLE_FUSION_ORGAN_ID=$CURRENT_ORGAN_ID 51 | # fine_testing settings: before this, both coarse-scaled and fine-scaled models shall be ready 52 | COARSE2FINE_TESTING_ORGAN_ID=$CURRENT_ORGAN_ID 53 | COARSE2FINE_TESTING_GPU=$CURRENT_GPU 54 | 55 | 56 | #################################################################################################### 57 | # defining the root path which stores image and label data 58 | DATA_PATH='/media/Med_2T2/data2/' 59 | LIB_PATH='/media/Med_2T2/data2/' 60 | mkdir ${DATA_PATH}logs/ 61 | 62 | #################################################################################################### 63 | # export PYTHONPATH (related to your path to CAFFE) 64 | export LD_LIBRARY_PATH=/usr/local/cuda-8.0/lib64:$LD_LIBRARY_PATH 65 | export PYTHONPATH=${LIB_PATH}libs/caffe-master/python:$PYTHONPATH 66 | 67 | #################################################################################################### 68 | # data initialization: only needs to be run once 69 | # variables 70 | ORGAN_NUMBER=1 71 | FOLDS=4 72 | LOW_RANGE=-100 73 | HIGH_RANGE=240 74 | # init.py : data_path, organ_number, folds, low_range, high_range 75 | if [ "$ENABLE_INITIALIZATION" = "1" ] 76 | then 77 | python init.py \ 78 | $DATA_PATH $ORGAN_NUMBER $FOLDS $LOW_RANGE $HIGH_RANGE 79 | fi 80 | 81 | #################################################################################################### 82 | # the individual and joint training processes 83 | # variables 84 | SLICE_THRESHOLD=0.98 85 | SLICE_THICKNESS=3 86 | LEARNING_RATE1=1e-5 87 | LEARNING_RATE2=1e-5 88 | LEARNING_RATE_M1=10 89 | LEARNING_RATE_M2=10 90 | TRAINING_MARGIN=20 91 | TRAINING_PROB=0.5 92 | TRAINING_SAMPLE_BATCH=1 93 | TRAINING_STEP=10000 94 | TRAINING_MAX_ITERATIONS1=40000 95 | TRAINING_MAX_ITERATIONS2=40000 96 | TRAINING_FRACTION=0.25 97 | TRAINING_TOTAL_ITERATIONS=$(($TRAINING_MAX_ITERATIONS1+$TRAINING_MAX_ITERATIONS2)) 98 | # individual training 99 | if [ "$ENABLE_INDIV_TRAINING" = "1" ] 100 | then 101 | INDIV_TIMESTAMP=$(date +'%Y%m%d_%H%M%S') 102 | else 103 | INDIV_TIMESTAMP=_ 104 | fi 105 | # indiv_training.py : data_path, current_fold, organ_number, low_range, high_range, 106 | # slice_threshold, slice_thickness, organ_ID, plane, GPU_ID, 107 | # learning_rate1, learning_rate2 (not used), margin, prob, sample_batch, 108 | # step, max_iterations1, max_iterations2 (not used), fraction, timestamp 109 | if [ "$ENABLE_INDIV_TRAINING" = "1" ] 110 | then 111 | if [ "$INDIV_TRAINING_PLANE" = "X" ] || [ "$INDIV_TRAINING_PLANE" = "A" ] 112 | then 113 | INDIV_MODELNAME=XI${SLICE_THICKNESS}_${INDIV_TRAINING_ORGAN_ID} 114 | INDIV_LOG=${DATA_PATH}logs/FD${CURRENT_FOLD}:${INDIV_MODELNAME}_${INDIV_TIMESTAMP}.txt 115 | python indiv_training.py \ 116 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 117 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 118 | $INDIV_TRAINING_ORGAN_ID X $INDIV_TRAINING_GPU \ 119 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 120 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 121 | $TRAINING_STEP $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 122 | $TRAINING_FRACTION $INDIV_TIMESTAMP 1 2>&1 | tee $INDIV_LOG 123 | fi 124 | if [ "$INDIV_TRAINING_PLANE" = "Y" ] || [ "$INDIV_TRAINING_PLANE" = "A" ] 125 | then 126 | INDIV_MODELNAME=YI${SLICE_THICKNESS}_${INDIV_TRAINING_ORGAN_ID} 127 | INDIV_LOG=${DATA_PATH}logs/FD${CURRENT_FOLD}:${INDIV_MODELNAME}_${INDIV_TIMESTAMP}.txt 128 | python indiv_training.py \ 129 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 130 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 131 | $INDIV_TRAINING_ORGAN_ID Y $INDIV_TRAINING_GPU \ 132 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 133 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 134 | $TRAINING_STEP $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 135 | $TRAINING_FRACTION $INDIV_TIMESTAMP 1 2>&1 | tee $INDIV_LOG 136 | fi 137 | if [ "$INDIV_TRAINING_PLANE" = "Z" ] || [ "$INDIV_TRAINING_PLANE" = "A" ] 138 | then 139 | INDIV_MODELNAME=ZI${SLICE_THICKNESS}_${INDIV_TRAINING_ORGAN_ID} 140 | INDIV_LOG=${DATA_PATH}logs/FD${CURRENT_FOLD}:${INDIV_MODELNAME}_${INDIV_TIMESTAMP}.txt 141 | python indiv_training.py \ 142 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 143 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 144 | $INDIV_TRAINING_ORGAN_ID Z $INDIV_TRAINING_GPU \ 145 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 146 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 147 | $TRAINING_STEP $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 148 | $TRAINING_FRACTION $INDIV_TIMESTAMP 1 2>&1 | tee $INDIV_LOG 149 | fi 150 | fi 151 | # joint training 152 | JOINT_TIMESTAMP=$(date +'%Y%m%d_%H%M%S') 153 | # joint_training.py : data_path, current_fold, organ_number, low_range, high_range, 154 | # slice_threshold, slice_thickness, organ_ID, plane, GPU_ID, 155 | # learning_rate1, learning_rate2, margin, prob, sample_batch, 156 | # step, max_iterations1, max_iterations2, timestamp 157 | if [ "$ENABLE_JOINT_TRAINING" = "1" ] 158 | then 159 | if [ "$JOINT_TRAINING_PLANE" = "X" ] || [ "$JOINT_TRAINING_PLANE" = "A" ] 160 | then 161 | JOINT_MODELNAME=XJ${SLICE_THICKNESS}_${JOINT_TRAINING_ORGAN_ID} 162 | JOINT_LOG=${DATA_PATH}logs/FD${CURRENT_FOLD}:${JOINT_MODELNAME}_${JOINT_TIMESTAMP}.txt 163 | python joint_training.py \ 164 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 165 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 166 | $JOINT_TRAINING_ORGAN_ID X $JOINT_TRAINING_GPU \ 167 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 168 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 169 | $TRAINING_STEP $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 170 | $INDIV_TIMESTAMP $JOINT_TIMESTAMP 2>&1 | tee $JOINT_LOG 171 | fi 172 | if [ "$JOINT_TRAINING_PLANE" = "Y" ] || [ "$JOINT_TRAINING_PLANE" = "A" ] 173 | then 174 | JOINT_MODELNAME=YJ${SLICE_THICKNESS}_${JOINT_TRAINING_ORGAN_ID} 175 | JOINT_LOG=${DATA_PATH}logs/FD${CURRENT_FOLD}:${JOINT_MODELNAME}_${JOINT_TIMESTAMP}.txt 176 | python joint_training.py \ 177 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 178 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 179 | $JOINT_TRAINING_ORGAN_ID Y $JOINT_TRAINING_GPU \ 180 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 181 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 182 | $TRAINING_STEP $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 183 | $INDIV_TIMESTAMP $JOINT_TIMESTAMP 2>&1 | tee $JOINT_LOG 184 | fi 185 | if [ "$JOINT_TRAINING_PLANE" = "Z" ] || [ "$JOINT_TRAINING_PLANE" = "A" ] 186 | then 187 | JOINT_MODELNAME=ZJ${SLICE_THICKNESS}_${JOINT_TRAINING_ORGAN_ID} 188 | JOINT_LOG=${DATA_PATH}logs/FD${CURRENT_FOLD}:${JOINT_MODELNAME}_${JOINT_TIMESTAMP}.txt 189 | python joint_training.py \ 190 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 191 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 192 | $JOINT_TRAINING_ORGAN_ID Z $JOINT_TRAINING_GPU \ 193 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 194 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 195 | $TRAINING_STEP $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 196 | $INDIV_TIMESTAMP $JOINT_TIMESTAMP 2>&1 | tee $JOINT_LOG 197 | fi 198 | fi 199 | 200 | #################################################################################################### 201 | # the coarse-scaled testing processes 202 | # variables 203 | COARSE_TESTING_STARTING_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 204 | COARSE_TESTING_STEP=$TRAINING_STEP 205 | COARSE_TESTING_MAX_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 206 | COARSE_TIMESTAMP1=_ 207 | COARSE_TIMESTAMP2=_ 208 | # coarse_testing.py : data_path, current_fold, organ_number, low_range, high_range, 209 | # slice_threshold, slice_thickness, organ_ID, plane, GPU_ID, 210 | # learning_rate1, learning_rate2, margin, prob, sample_batch, 211 | # step, max_iterations1, max_iterations2, 212 | # starting_iterations, step, max_iterations, 213 | # timestamp1 (optional), timestamp2 (optional) 214 | if [ "$ENABLE_COARSE_TESTING" = "1" ] 215 | then 216 | if [ "$COARSE_TESTING_PLANE" = "X" ] || [ "$COARSE_TESTING_PLANE" = "A" ] 217 | then 218 | python coarse_testing.py \ 219 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 220 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 221 | $COARSE_TESTING_ORGAN_ID X $COARSE_TESTING_GPU \ 222 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 223 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 224 | $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 225 | $COARSE_TESTING_STARTING_ITERATIONS $COARSE_TESTING_STEP \ 226 | $COARSE_TESTING_MAX_ITERATIONS \ 227 | $COARSE_TIMESTAMP1 $COARSE_TIMESTAMP2 228 | fi 229 | if [ "$COARSE_TESTING_PLANE" = "Y" ] || [ "$COARSE_TESTING_PLANE" = "A" ] 230 | then 231 | python coarse_testing.py \ 232 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 233 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 234 | $COARSE_TESTING_ORGAN_ID Y $COARSE_TESTING_GPU \ 235 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 236 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 237 | $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 238 | $COARSE_TESTING_STARTING_ITERATIONS $COARSE_TESTING_STEP \ 239 | $COARSE_TESTING_MAX_ITERATIONS \ 240 | $COARSE_TIMESTAMP1 $COARSE_TIMESTAMP2 241 | fi 242 | if [ "$COARSE_TESTING_PLANE" = "Z" ] || [ "$COARSE_TESTING_PLANE" = "A" ] 243 | then 244 | python coarse_testing.py \ 245 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 246 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 247 | $COARSE_TESTING_ORGAN_ID Z $COARSE_TESTING_GPU \ 248 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 249 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 250 | $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 251 | $COARSE_TESTING_STARTING_ITERATIONS $COARSE_TESTING_STEP \ 252 | $COARSE_TESTING_MAX_ITERATIONS \ 253 | $COARSE_TIMESTAMP1 $COARSE_TIMESTAMP2 254 | fi 255 | fi 256 | 257 | #################################################################################################### 258 | # the coarse-scaled fusion process 259 | # variables 260 | COARSE_FUSION_STARTING_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 261 | COARSE_FUSION_STEP=$TRAINING_STEP 262 | COARSE_FUSION_MAX_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 263 | COARSE_FUSION_THRESHOLD=0.5 264 | COARSE_TIMESTAMP1_X=_ 265 | COARSE_TIMESTAMP1_Y=_ 266 | COARSE_TIMESTAMP1_Z=_ 267 | COARSE_TIMESTAMP2_X=_ 268 | COARSE_TIMESTAMP2_Y=_ 269 | COARSE_TIMESTAMP2_Z=_ 270 | # coarse_fusion.py : data_path, current_fold, organ_number, low_range, high_range, 271 | # slice_threshold, slice_thickness, organ_ID, plane, GPU_ID, 272 | # learning_rate1, learning_rate_m1, learning_rate2, learning_rate_m2, margin, 273 | # starting_iterations, step, max_iterations, threshold, 274 | # timestamp1_X (optional), timestamp1_Y (optional), timestamp1_Z (optional), 275 | # timestamp2_X (optional), timestamp2_Y (optional), timestamp2_Z (optional) 276 | if [ "$ENABLE_COARSE_FUSION" = "1" ] 277 | then 278 | python coarse_fusion.py \ 279 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 280 | $SLICE_THRESHOLD $SLICE_THICKNESS $COARSE_TESTING_ORGAN_ID $COARSE_TESTING_GPU \ 281 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 $TRAINING_MARGIN \ 282 | $COARSE_FUSION_STARTING_ITERATIONS $COARSE_FUSION_STEP \ 283 | $COARSE_FUSION_MAX_ITERATIONS $COARSE_FUSION_THRESHOLD \ 284 | $COARSE_TIMESTAMP1_X $COARSE_TIMESTAMP1_Y $COARSE_TIMESTAMP1_Z \ 285 | $COARSE_TIMESTAMP2_X $COARSE_TIMESTAMP2_Y $COARSE_TIMESTAMP2_Z 286 | fi 287 | 288 | #################################################################################################### 289 | # the oracle testing processes 290 | # variables 291 | ORACLE_TESTING_STARTING_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 292 | ORACLE_TESTING_STEP=$TRAINING_STEP 293 | ORACLE_TESTING_MAX_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 294 | ORACLE_TIMESTAMP1=_ 295 | ORACLE_TIMESTAMP2=_ 296 | # oracle_testing.py : data_path, current_fold, organ_number, low_range, high_range, 297 | # slice_threshold, slice_thickness, organ_ID, plane, GPU_ID, 298 | # learning_rate1, learning_rate_m1, learning_rate2, learning_rate_m2, 299 | # margin, prob, sample_batch, 300 | # step, max_iterations1, max_iterations2, 301 | # starting_iterations, step, max_iterations, 302 | # timestamp1 (optional), timestamp2 (optional) 303 | if [ "$ENABLE_ORACLE_TESTING" = "1" ] 304 | then 305 | if [ "$ORACLE_TESTING_PLANE" = "X" ] || [ "$ORACLE_TESTING_PLANE" = "A" ] 306 | then 307 | python oracle_testing.py \ 308 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 309 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 310 | $ORACLE_TESTING_ORGAN_ID X $ORACLE_TESTING_GPU \ 311 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 312 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 313 | $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 314 | $ORACLE_TESTING_STARTING_ITERATIONS $ORACLE_TESTING_STEP \ 315 | $ORACLE_TESTING_MAX_ITERATIONS \ 316 | $ORACLE_TIMESTAMP1 $ORACLE_TIMESTAMP2 317 | fi 318 | if [ "$ORACLE_TESTING_PLANE" = "Y" ] || [ "$ORACLE_TESTING_PLANE" = "A" ] 319 | then 320 | python oracle_testing.py \ 321 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 322 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 323 | $ORACLE_TESTING_ORGAN_ID Y $ORACLE_TESTING_GPU \ 324 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 325 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 326 | $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 327 | $ORACLE_TESTING_STARTING_ITERATIONS $ORACLE_TESTING_STEP \ 328 | $ORACLE_TESTING_MAX_ITERATIONS \ 329 | $ORACLE_TIMESTAMP1 $ORACLE_TIMESTAMP2 330 | fi 331 | if [ "$ORACLE_TESTING_PLANE" = "Z" ] || [ "$ORACLE_TESTING_PLANE" = "A" ] 332 | then 333 | python oracle_testing.py \ 334 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 335 | $SLICE_THRESHOLD $SLICE_THICKNESS \ 336 | $ORACLE_TESTING_ORGAN_ID Z $ORACLE_TESTING_GPU \ 337 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 \ 338 | $TRAINING_MARGIN $TRAINING_PROB $TRAINING_SAMPLE_BATCH \ 339 | $TRAINING_MAX_ITERATIONS1 $TRAINING_MAX_ITERATIONS2 \ 340 | $ORACLE_TESTING_STARTING_ITERATIONS $ORACLE_TESTING_STEP \ 341 | $ORACLE_TESTING_MAX_ITERATIONS \ 342 | $ORACLE_TIMESTAMP1 $ORACLE_TIMESTAMP2 343 | fi 344 | fi 345 | 346 | #################################################################################################### 347 | # the oracle-scaled fusion process 348 | # variables 349 | ORACLE_FUSION_STARTING_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 350 | ORACLE_FUSION_STEP=$TRAINING_STEP 351 | ORACLE_FUSION_MAX_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 352 | ORACLE_FUSION_THRESHOLD=0.5 353 | ORACLE_TIMESTAMP1_X=_ 354 | ORACLE_TIMESTAMP1_Y=_ 355 | ORACLE_TIMESTAMP1_Z=_ 356 | ORACLE_TIMESTAMP2_X=_ 357 | ORACLE_TIMESTAMP2_Y=_ 358 | ORACLE_TIMESTAMP2_Z=_ 359 | # oracle_fusion.py : data_path, current_fold, organ_number, low_range, high_range, 360 | # slice_threshold, slice_thickness, organ_ID, plane, GPU_ID, 361 | # learning_rate1, learning_rate_m1, learning_rate2, learning_rate_m2, margin, 362 | # starting_iterations, step, max_iterations, threshold, 363 | # timestamp1_X (optional), timestamp1_Y (optional), timestamp1_Z (optional), 364 | # timestamp2_X (optional), timestamp2_Y (optional), timestamp2_Z (optional) 365 | if [ "$ENABLE_ORACLE_FUSION" = "1" ] 366 | then 367 | python oracle_fusion.py \ 368 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 369 | $SLICE_THRESHOLD $SLICE_THICKNESS $ORACLE_TESTING_ORGAN_ID $ORACLE_TESTING_GPU \ 370 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 $TRAINING_MARGIN \ 371 | $ORACLE_FUSION_STARTING_ITERATIONS $ORACLE_FUSION_STEP \ 372 | $ORACLE_FUSION_MAX_ITERATIONS $ORACLE_FUSION_THRESHOLD \ 373 | $ORACLE_TIMESTAMP1_X $ORACLE_TIMESTAMP1_Y $ORACLE_TIMESTAMP1_Z \ 374 | $ORACLE_TIMESTAMP2_X $ORACLE_TIMESTAMP2_Y $ORACLE_TIMESTAMP2_Z 375 | fi 376 | 377 | #################################################################################################### 378 | # the coarse-to-fine testing process 379 | # variables 380 | COARSE_FUSION_STARTING_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 381 | COARSE_FUSION_STEP=$TRAINING_STEP 382 | COARSE_FUSION_MAX_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 383 | COARSE_FUSION_THRESHOLD=0.5 384 | FINE_TESTING_STARTING_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 385 | FINE_TESTING_STEP=$TRAINING_STEP 386 | FINE_TESTING_MAX_ITERATIONS=$TRAINING_TOTAL_ITERATIONS 387 | FINE_FUSION_THRESHOLD=0.5 388 | COARSE2FINE_TIMESTAMP1_X=_ 389 | COARSE2FINE_TIMESTAMP1_Y=_ 390 | COARSE2FINE_TIMESTAMP1_Z=_ 391 | COARSE2FINE_TIMESTAMP2_X=_ 392 | COARSE2FINE_TIMESTAMP2_Y=_ 393 | COARSE2FINE_TIMESTAMP2_Z=_ 394 | MAX_ROUNDS=10 395 | # coarse2fine_testing.py : data_path, current_fold, organ_number, low_range, high_range, 396 | # slice_threshold, slice_thickness, organ_ID, GPU_ID, 397 | # learning_rate1, learning_rate_m1, learning_rate2, learning_rate_m2, margin, 398 | # coarse_fusion_starting_iterations, coarse_fusion_step, coarse_fusion_max_iterations, 399 | # coarse_fusion_threshold, coarse_fusion_code, 400 | # fine_starting_iterations, fine_step, fine_max_iterations, 401 | # fine_fusion_threshold, max_rounds 402 | # timestamp1_X (optional), timestamp1_Y (optional), timestamp1_Z (optional), 403 | # timestamp2_X (optional), timestamp2_Y (optional), timestamp2_Z (optional) 404 | if [ "$ENABLE_COARSE2FINE_TESTING" = "1" ] 405 | then 406 | python coarse2fine_testing.py \ 407 | $DATA_PATH $CURRENT_FOLD $ORGAN_NUMBER $LOW_RANGE $HIGH_RANGE \ 408 | $SLICE_THRESHOLD $SLICE_THICKNESS $COARSE2FINE_TESTING_ORGAN_ID $COARSE2FINE_TESTING_GPU \ 409 | $LEARNING_RATE1 $LEARNING_RATE_M1 $LEARNING_RATE2 $LEARNING_RATE_M2 $TRAINING_MARGIN \ 410 | $COARSE_FUSION_STARTING_ITERATIONS $COARSE_FUSION_STEP $COARSE_FUSION_MAX_ITERATIONS \ 411 | $COARSE_FUSION_THRESHOLD \ 412 | $FINE_TESTING_STARTING_ITERATIONS $FINE_TESTING_STEP $FINE_TESTING_MAX_ITERATIONS \ 413 | $FINE_FUSION_THRESHOLD $MAX_ROUNDS \ 414 | $COARSE2FINE_TIMESTAMP1_X $COARSE2FINE_TIMESTAMP1_Y $COARSE2FINE_TIMESTAMP1_Z \ 415 | $COARSE2FINE_TIMESTAMP2_X $COARSE2FINE_TIMESTAMP2_Y $COARSE2FINE_TIMESTAMP2_Z 416 | fi 417 | 418 | #################################################################################################### 419 | -------------------------------------------------------------------------------- /OrganSegRSTN/surgery.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import numpy as np 3 | import sys 4 | import caffe 5 | from joint_training import * 6 | 7 | 8 | sys.path.insert(0, CAFFE_root + 'python') 9 | 10 | 11 | def transplant(new_net, net, suffix = ''): 12 | for p in net.params: 13 | p_new = p + suffix 14 | if p_new not in new_net.params: 15 | print 'dropping', p 16 | continue 17 | for i in range(len(net.params[p])): 18 | if i > (len(new_net.params[p_new]) - 1): 19 | print 'dropping', p, i 20 | break 21 | if net.params[p][i].data.shape != new_net.params[p_new][i].data.shape: 22 | print 'coercing', p, i, 'from', net.params[p][i].data.shape, \ 23 | 'to', new_net.params[p_new][i].data.shape 24 | else: 25 | print 'copying', p, ' -> ', p_new, i 26 | new_net.params[p_new][i].data.flat = net.params[p][i].data.flat 27 | 28 | 29 | def expand_score(new_net, new_layer, net, layer): 30 | old_cl = net.params[layer][0].num 31 | new_net.params[new_layer][0].data[: old_cl][...] = net.params[layer][0].data 32 | new_net.params[new_layer][1].data[0, 0, 0, : old_cl][...] = net.params[layer][1].data 33 | 34 | 35 | def upsample_filt(size): 36 | factor = (size + 1) // 2 37 | if size % 2 == 1: 38 | center = factor - 1 39 | else: 40 | center = factor - 0.5 41 | og = np.ogrid[: size, : size] 42 | return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) 43 | 44 | 45 | def interp(net, layers): 46 | for l in layers: 47 | m, k, h, w = net.params[l][0].data.shape 48 | if m != k and k != 1: 49 | print 'input + output channels need to be the same or |output| == 1' 50 | raise 51 | if h != w: 52 | print 'filters need to be square' 53 | raise 54 | filt = upsample_filt(h) 55 | net.params[l][0].data[range(m), range(k), :, :] = filt 56 | -------------------------------------------------------------------------------- /OrganSegRSTN/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import math 5 | import fast_functions as ff 6 | 7 | 8 | #################################################################################################### 9 | # returning the binary label map by the organ ID (especially useful under overlapping cases) 10 | # label: the label matrix 11 | # organ_ID: the organ ID 12 | def is_organ(label, organ_ID): 13 | return label == organ_ID 14 | 15 | 16 | #################################################################################################### 17 | # determining if a sample belongs to the training set by the fold number 18 | # total_samples: the total number of samples 19 | # i: sample ID, an integer in [0, total_samples - 1] 20 | # folds: the total number of folds 21 | # current_fold: the current fold ID, an integer in [0, folds - 1] 22 | def in_training_set(total_samples, i, folds, current_fold): 23 | fold_remainder = folds - total_samples % folds 24 | fold_size = (total_samples - total_samples % folds) / folds 25 | start_index = fold_size * current_fold + max(0, current_fold - fold_remainder) 26 | end_index = fold_size * (current_fold + 1) + max(0, current_fold + 1 - fold_remainder) 27 | return not (i >= start_index and i < end_index) 28 | 29 | 30 | #################################################################################################### 31 | # returning the filename of the training set according to the current fold ID 32 | def training_set_filename(current_fold): 33 | return os.path.join(list_path, 'training_' + 'FD' + str(current_fold) + '.txt') 34 | 35 | 36 | #################################################################################################### 37 | # returning the filename of the testing set according to the current fold ID 38 | def testing_set_filename(current_fold): 39 | return os.path.join(list_path, 'testing_' + 'FD' + str(current_fold) + '.txt') 40 | 41 | 42 | #################################################################################################### 43 | # returning the filename of the log file 44 | def log_filename(snapshot_directory): 45 | count = 0 46 | while True: 47 | count += 1 48 | if count == 1: 49 | log_file_ = os.path.join(snapshot_directory, 'log.txt') 50 | else: 51 | log_file_ = os.path.join(snapshot_directory, 'log' + str(count) + '.txt') 52 | if not os.path.isfile(log_file_): 53 | return log_file_ 54 | 55 | 56 | #################################################################################################### 57 | # determining if the loss values are reasonable (otherwise re-training is required) 58 | def valid_loss(log_file, iterations): 59 | FRACTION = 0.02 60 | loss_avg = 0.0 61 | loss_min = 1.0 62 | count = 0 63 | text = open(log_file, 'r').read().splitlines() 64 | for l in range(int(len(text) - iterations / 5 * FRACTION - 10), len(text)): 65 | index1 = text[l].find('Iteration') 66 | index2 = text[l].find('(') 67 | index3 = text[l].find('loss = ') 68 | if index1 > 0 and index2 > index1 and index3 > index2: 69 | iteration = int(text[l][index1 + 10: index2 - 1]) 70 | loss = float(text[l][index3 + 7: ]) 71 | if iteration >= iterations * (1 - FRACTION): 72 | loss_avg += loss 73 | loss_min = min(loss_min, loss) 74 | count += 1 75 | if count > 0: 76 | loss_avg /= count 77 | else: 78 | loss_avg = loss 79 | loss_min = loss 80 | return loss_avg < 0.4 and loss_min < 0.35 81 | 82 | 83 | #################################################################################################### 84 | # returning the snapshot filename according to the directory and the iteration count 85 | def snapshot_filename(snapshot_directory, t): 86 | return os.path.join(snapshot_directory, 'train_iter_' + str(t) + '.caffemodel') 87 | 88 | 89 | #################################################################################################### 90 | # returning the s-th latest timestamp that contains all the required snapshots 91 | def snapshot_name_from_timestamp_s(snapshot_path, current_fold, \ 92 | plane, stage_code, slice_thickness, organ_ID, iteration, timestamp, s): 93 | snapshot_prefix = 'FD' + str(current_fold) + ':' + plane + \ 94 | stage_code + str(slice_thickness) + '_' + str(organ_ID) + '_' 95 | if len(timestamp) == 15: 96 | snapshot_prefix = snapshot_prefix + timestamp 97 | if not os.path.isdir(snapshot_path): 98 | return '' 99 | directory = os.listdir(snapshot_path) 100 | directory.sort() 101 | found = False 102 | count = 0 103 | for name in reversed(directory): 104 | if snapshot_prefix in name: 105 | snapshot_directory = os.path.join(snapshot_path, name) 106 | valid = True 107 | for t in range(len(iteration)): 108 | snapshot_file = snapshot_filename(snapshot_directory, iteration[t]) 109 | if not os.path.isfile(snapshot_file): 110 | valid = False 111 | break 112 | if valid: 113 | count += 1 114 | if count == s: 115 | snapshot_name = name 116 | found = True 117 | break 118 | if found: 119 | return snapshot_name 120 | else: 121 | return '' 122 | 123 | 124 | #################################################################################################### 125 | # returning the latest timestamp that contains all the required snapshots 126 | def snapshot_name_from_timestamp(snapshot_path, \ 127 | current_fold, plane, stage_code, slice_thickness, organ_ID, iteration, timestamp): 128 | return snapshot_name_from_timestamp_s(snapshot_path, \ 129 | current_fold, plane, stage_code, slice_thickness, organ_ID, iteration, timestamp, 1) 130 | 131 | 132 | #################################################################################################### 133 | # returning the s-th latest timestamp that contains all the required snapshots (2-stage version) 134 | def snapshot_name_from_timestamp_2_s(snapshot_path1, snapshot_path2, current_fold, plane, \ 135 | stage_code1, stage_code2, slice_thickness, organ_ID, iteration, timestamp1, timestamp2, s): 136 | snapshot_prefix = 'FD' + str(current_fold) + ':' 137 | snapshot_str1 = plane + stage_code1 + str(slice_thickness) + '_' + str(organ_ID) + '_' 138 | if len(timestamp1) == 15: 139 | snapshot_str1 = snapshot_str1 + timestamp1 140 | snapshot_str2 = plane + stage_code2 + str(slice_thickness) + '_' + str(organ_ID) + '_' 141 | if len(timestamp2) == 15: 142 | snapshot_str2 = snapshot_str2 + timestamp2 143 | if not os.path.isdir(snapshot_path2): 144 | return ['', ''] 145 | directory2 = os.listdir(snapshot_path2) 146 | directory2.sort() 147 | found = False 148 | count = 0 149 | for name2 in reversed(directory2): 150 | if snapshot_prefix in name2 and snapshot_str1 in name2 and snapshot_str2 in name2: 151 | name1 = name2.split(',')[0] 152 | snapshot_directory1 = os.path.join(snapshot_path1, name1) 153 | snapshot_directory2 = os.path.join(snapshot_path2, name2) 154 | valid = True 155 | for t in range(len(iteration)): 156 | snapshot_file1 = snapshot_filename(snapshot_directory1, iteration[t]) 157 | snapshot_file2 = snapshot_filename(snapshot_directory2, iteration[t]) 158 | if (os.path.isfile(snapshot_file1) and os.path.isfile(snapshot_file2)) or \ 159 | (not os.path.isfile(snapshot_file1) and not os.path.isfile(snapshot_file2)): 160 | valid = False 161 | break 162 | if valid: 163 | count += 1 164 | if count == s: 165 | snapshot_name = [name1, name2] 166 | found = True 167 | break 168 | if found: 169 | return snapshot_name 170 | else: 171 | return ['', ''] 172 | 173 | 174 | #################################################################################################### 175 | # returning the latest timestamp that contains all the required snapshots (2-stage version) 176 | def snapshot_name_from_timestamp_2(snapshot_path1, snapshot_path2, current_fold, plane, \ 177 | stage_code1, stage_code2, slice_thickness, organ_ID, iteration, timestamp1, timestamp2): 178 | return snapshot_name_from_timestamp_2_s(snapshot_path1, snapshot_path2, current_fold, plane, \ 179 | stage_code1, stage_code2, slice_thickness, organ_ID, iteration, timestamp1, timestamp2, 1) 180 | 181 | 182 | #################################################################################################### 183 | # returning the volume filename as in the testing stage 184 | def volume_filename_testing(result_directory, t, i): 185 | return os.path.join(result_directory, str(t) + '_' + str(i + 1) + '.npz') 186 | 187 | 188 | #################################################################################################### 189 | # returning the volume filename as in the fusion stage 190 | def volume_filename_fusion(result_directory, code, i): 191 | return os.path.join(result_directory, code + '_' + str(i + 1) + '.npz') 192 | 193 | 194 | #################################################################################################### 195 | # returning the volume filename as in the coarse-to-fine testing stage 196 | def volume_filename_coarse2fine(result_directory, r, i): 197 | return os.path.join(result_directory, 'R' + str(r) + '_' + str(i + 1) + '.npz') 198 | 199 | 200 | #################################################################################################### 201 | # returning the s-th latest timestamp that contains all the required results 202 | def result_name_from_timestamp_s(result_path, current_fold, \ 203 | plane, stage_code, slice_thickness, organ_ID, iteration, volume_list, timestamp, s): 204 | result_prefix = 'FD' + str(current_fold) + ':' + plane + \ 205 | stage_code + str(slice_thickness) + '_' + str(organ_ID) + '_' 206 | if len(timestamp) == 15: 207 | result_prefix = result_prefix + timestamp 208 | if not os.path.isdir(result_path): 209 | return '' 210 | directory = os.listdir(result_path) 211 | directory.sort() 212 | found = False 213 | count = 0 214 | for name in reversed(directory): 215 | if result_prefix in name and not name.endswith('_'): 216 | result_directory = os.path.join(result_path, name, 'volumes') 217 | valid = True 218 | for t in range(len(iteration)): 219 | for i in range(len(volume_list)): 220 | volume_file = volume_filename_testing(result_directory, iteration[t], i) 221 | if not os.path.isfile(volume_file): 222 | valid = False 223 | break 224 | if not valid: 225 | break 226 | if valid: 227 | count += 1 228 | if count == s: 229 | result_name = name 230 | found = True 231 | break 232 | if found: 233 | return result_name 234 | else: 235 | return '' 236 | 237 | 238 | #################################################################################################### 239 | # returning the latest timestamp that contains all the required results 240 | def result_name_from_timestamp(result_path, current_fold, \ 241 | plane, stage_code, slice_thickness, organ_ID, iteration, volume_list, timestamp): 242 | return result_name_from_timestamp_s(result_path, current_fold, \ 243 | plane, stage_code, slice_thickness, organ_ID, iteration, volume_list, timestamp, 1) 244 | 245 | 246 | #################################################################################################### 247 | # returning the s-th latest timestamp that contains all the required results (2-stage version) 248 | def result_name_from_timestamp_2_s(result_path, \ 249 | current_fold, plane, stage_code1, stage_code2, slice_thickness, organ_ID, \ 250 | iteration, volume_list, timestamp1, timestamp2, s): 251 | result_prefix = 'FD' + str(current_fold) + ':' 252 | result_str1 = plane + stage_code1 + str(slice_thickness) + '_' + str(organ_ID) + '_' 253 | if len(timestamp1) == 15: 254 | result_str1 = result_str1 + timestamp1 255 | result_str2 = plane + stage_code2 + str(slice_thickness) + '_' + str(organ_ID) + '_' 256 | if len(timestamp2) == 15: 257 | result_str2 = result_str2 + timestamp2 258 | if not os.path.isdir(result_path): 259 | return '' 260 | directory = os.listdir(result_path) 261 | directory.sort() 262 | found = False 263 | count = 0 264 | for name in reversed(directory): 265 | if result_prefix in name and result_str1 in name and result_str2 in name: 266 | result_directory = os.path.join(result_path, name, 'volumes') 267 | valid = True 268 | for t in range(len(iteration)): 269 | for i in range(len(volume_list)): 270 | volume_file = volume_filename_testing(result_directory, iteration[t], i) 271 | if not os.path.isfile(volume_file): 272 | valid = False 273 | break 274 | if not valid: 275 | break 276 | if valid: 277 | count += 1 278 | if count == s: 279 | result_name = name 280 | found = True 281 | break 282 | if found: 283 | return result_name 284 | else: 285 | return '' 286 | 287 | 288 | #################################################################################################### 289 | # returning the latest timestamp that contains all the required results (2-stage version) 290 | def result_name_from_timestamp_2(result_path, \ 291 | current_fold, plane, stage_code1, stage_code2, slice_thickness, organ_ID, \ 292 | iteration, volume_list, timestamp1, timestamp2): 293 | return result_name_from_timestamp_2_s(result_path, \ 294 | current_fold, plane, stage_code1, stage_code2, slice_thickness, organ_ID, \ 295 | iteration, volume_list, timestamp1, timestamp2, 1) 296 | 297 | 298 | #################################################################################################### 299 | # computing the DSC together with other values based on the label and prediction volumes 300 | def DSC_computation(label, pred): 301 | P = np.zeros(3, dtype = np.uint32) 302 | ff.DSC_computation(label, pred, P) 303 | return 2 * float(P[2]) / (P[0] + P[1]), P[2], P[1], P[0] 304 | 305 | 306 | #################################################################################################### 307 | # post-processing: preserving the largest connecting component(s) and discarding other voxels 308 | # The floodfill algorithm is used to detect the connecting components. 309 | # In the future version, this function is to be replaced by a C module for speedup! 310 | # F: a binary volume, the volume to be post-processed 311 | # S: a binary volume, the seed voxels (currently defined as those predicted as FG by all 3 views) 312 | # NOTE: a connected component will not be considered if it does not contain any seed voxels 313 | # threshold: a floating point number in [0, 1] determining if a connected component is accepted 314 | # NOTE: accepted if it is not smaller larger than the largest volume times this number 315 | # NOTE: 1 means to only keep the largest one(s), 0 means to keep all 316 | # organ_ID: passed in case that each organ needs to be dealt with differently 317 | def post_processing(F, S, threshold, organ_ID): 318 | ff.post_processing(F, S, threshold, False) 319 | return F 320 | 321 | 322 | #################################################################################################### 323 | # defining the common variables used throughout the entire flowchart 324 | data_path = sys.argv[1] 325 | image_path = os.path.join(data_path, 'images') 326 | image_path_ = {} 327 | for plane in ['X', 'Y', 'Z']: 328 | image_path_[plane] = os.path.join(data_path, 'images_' + plane) 329 | if not os.path.exists(image_path_[plane]): 330 | os.makedirs(image_path_[plane]) 331 | label_path = os.path.join(data_path, 'labels') 332 | label_path_ = {} 333 | for plane in ['X', 'Y', 'Z']: 334 | label_path_[plane] = os.path.join(data_path, 'labels_' + plane) 335 | if not os.path.exists(label_path_[plane]): 336 | os.makedirs(label_path_[plane]) 337 | list_path = os.path.join(data_path, 'lists') 338 | if not os.path.exists(list_path): 339 | os.makedirs(list_path) 340 | list_training = {} 341 | for plane in ['X', 'Y', 'Z']: 342 | list_training[plane] = os.path.join(list_path, 'training_' + plane + '.txt') 343 | CAFFE_root = os.path.join(data_path, 'libs', 'caffe-master'); 344 | prototxt_path = os.path.join(data_path, 'prototxts') 345 | if not os.path.exists(prototxt_path): 346 | os.makedirs(prototxt_path) 347 | model_path = os.path.join(data_path, 'models') 348 | if not os.path.exists(model_path): 349 | os.makedirs(model_path) 350 | pretrained_model_path = os.path.join(data_path, 'models', 'pretrained') 351 | if not os.path.exists(pretrained_model_path): 352 | os.makedirs(pretrained_model_path) 353 | snapshot_path = os.path.join(data_path, 'models', 'snapshots') 354 | if not os.path.exists(snapshot_path): 355 | os.makedirs(snapshot_path) 356 | log_path = os.path.join(data_path, 'logs') 357 | if not os.path.exists(log_path): 358 | os.makedirs(log_path) 359 | result_path = os.path.join(data_path, 'results') 360 | if not os.path.exists(result_path): 361 | os.makedirs(result_path) 362 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OrganSegRSTN: an end-to-end coarse-to-fine organ segmentation framework 2 | version 2.0 - Jul 31 2018 - by Qihang Yu, Yuyin Zhou and Lingxi Xie 3 | 4 | ### NOTEs: 5 | 6 | #### 1. v2.0 is a MAJOR update to v1.0, which we: 7 | 8 | (1) slightly changed network architecture (score layers are removed and change to a saliency layer), 9 | so that network training is more robust (especially on some tiny targets such as pancreatic cysts); 10 | 11 | (2) carefully optimized codes so that the testing stage becomes much more efficient, 12 | especially when you use multiple processes to run different folds or datasets; 13 | 14 | (3) re-implemented two functions "post-processing" and "DSC_computation" in C, which is much faster. 15 | 16 | Note that our pre-trained models are also updated. 17 | 18 | 19 | #### 2. If you are more familiar with PyTorch, take a look at [this repository](https://github.com/twni2016/OrganSegRSTN_PyTorch)! 20 | 21 | 22 | #### 3. **Qihang Yu and Yuyin Zhou are the main contributors to this repository.** 23 | 24 | Yuyin Zhou implemented [the original coarse-to-fine framework](https://github.com/198808xc/OrganSegC2F), 25 | Qihang Yu improved it to allow end-to-end training, and Lingxi Xie later wrapped up these codes for release. 26 | 27 | #### 4. If you use our codes, please cite our paper accordingly: 28 | 29 | **Qihang Yu**, Lingxi Xie, Yan Wang, Yuyin Zhou, Elliot K. Fishman, Alan L. Yuille, 30 | "Recurrent Saliency Transformation Network: Incorporating Multi-Stage Visual Cues for Small Organ Segmentation", 31 | in IEEE Conference on CVPR, Salt Lake City, Utah, USA, 2018. 32 | 33 | https://arxiv.org/abs/1709.04518 34 | 35 | ###### and possibly, our previous work (the basis of this work): 36 | 37 | **Yuyin Zhou**, Lingxi Xie, Wei Shen, Yan Wang, Elliot K. Fishman, Alan L. Yuille, 38 | "A Fixed-Point Model for Pancreas Segmentation in Abdominal CT Scans", 39 | in International Conference on MICCAI, Quebec City, Quebec, Canada, 2017. 40 | 41 | https://arxiv.org/abs/1612.08230 42 | 43 | All the materials released in this library can **ONLY** be used for **RESEARCH** purposes. 44 | 45 | The authors and their institution (JHU/JHMI) preserve the copyright and all legal rights of these codes. 46 | 47 | **Before you start, please note that there is a LAZY MODE, 48 | which allows you to run the entire framework with ONE click. 49 | Check the contents before Section 4.3 for details.** 50 | 51 | 52 | ## 1. Introduction 53 | 54 | OrganSegRSTN is a code package for our paper: 55 | 56 | **Qihang Yu**, Lingxi Xie, Yan Wang, Yuyin Zhou, Elliot K. Fishman, Alan L. Yuille, 57 | "Recurrent Saliency Transformation Network: Incorporating Multi-Stage Visual Cues for Small Organ Segmentation", 58 | in IEEE Conference on CVPR, Salt Lake City, Utah, USA, 2018. 59 | 60 | OrganSegRSTN is a segmentation framework designed for 3D volumes. 61 | It was originally designed for segmenting abdominal organs in CT scans, 62 | but we believe that it can also be used for other purposes, 63 | such as brain tissue segmentation in fMRI-scanned images. 64 | 65 | OrganSegRSTN is based on the state-of-the-art deep learning techniques. 66 | This code package is to be used with CAFFE, a deep learning library. 67 | We make use of the python interface of CAFFE, named pyCAFFE. 68 | 69 | It is highly recommended to use one or more modern GPUs for computation. 70 | Using CPUs will take at least 50x more time in computation. 71 | 72 | **We provide an easy implementation in which the training stages has only 1 fine-scaled iteration. 73 | If you hope to add more, please modify the prototxt file accordingly. 74 | As we said in the paper, our strategy of using 1 stage in training and multiple iterations in testing works very well.** 75 | 76 | 77 | ## 2. File List 78 | 79 | | Folder/File | Description | 80 | |:--------------------------- |:---------------------------------------------------- | 81 | | `README.md` | the README file | 82 | | | | 83 | | **DATA2NPY/** | codes to transfer the NIH dataset into NPY format | 84 | | `dicom2npy.py` | transferring image data (DICOM) into NPY format | 85 | | `nii2npy.py` | transferring label data (NII) into NPY format | 86 | | | | 87 | | **DiceLossLayer/** | CPU implementation of the Dice loss layer | 88 | | `dice_loss_layer.hpp` | the header file | 89 | | `dice_loss_layer.cpp` | the CPU implementation | 90 | | | | 91 | | **OrganSegRSTN/** | primary codes of OrganSegRSTN | 92 | | `coarse2fine_testing.py` | the coarse-to-fine testing process | 93 | | `coarse_fusion.py` | the coarse-scaled fusion process | 94 | | `coarse_testing.py` | the coarse-scaled testing process | 95 | | `Crop.py` | the crop layer (cropping a region from the image) | 96 | | `Data.py` | the data layer | 97 | | `indiv_training.py` | training the coarse and fine stages individually | 98 | | `init.py` | the initialization functions | 99 | | `joint_training.py` | training the coarse and fine stages jointly | 100 | | `Uncrop.py` | the uncrop layer (putting the regional output back) | 101 | | `oracle_fusion.py` | the fusion process with oracle information | 102 | | `oracle_testing.py` | the testing process with oracle information | 103 | | `run.sh` | the main program to be called in bash shell | 104 | | `surgery.py` | the surgery function | 105 | | `utils.py` | the common functions | 106 | | | | 107 | | **OrganSegRSTN/prototxts/** | prototxt files of OrganSegRSTN | 108 | | `deploy_C3.prototxt` | the prototxt file for coarse-scaled testing | 109 | | `deploy_F3.prototxt` | the prototxt file for fine-scaled testing | 110 | | `deploy_O3.prototxt` | the prototxt file for oracle testing | 111 | | `training_I3x1.prototxt` | the prototxt file for individual training (1xLR) | 112 | | `training_I3x10.prototxt` | the prototxt file for individual training (10xLR) | 113 | | `training_J3x1.prototxt` | the prototxt file for joint training (1xLR) | 114 | | `training_J3x10.prototxt` | the prototxt file for joint training (10xLR) | 115 | | `training_S3x1.prototxt` | the prototxt file for separate training (1xLR) | 116 | | `training_S3x10.prototxt` | the prototxt file for separate training (10xLR) | 117 | | | | 118 | | **logs/** | training log files on the NIH dataset | 119 | 120 | 121 | The multiplier (1 or 10) applies to all the trainable layers in the fine stage of the framework. 122 | 123 | 124 | ## 3. Installation 125 | 126 | 127 | #### 3.1 Prerequisites 128 | 129 | ###### 3.1.1 Please make sure that your computer is equipped with modern GPUs that support CUDA. 130 | Without them, you will need 50x more time in both training and testing stages. 131 | 132 | ###### 3.1.2 Please also make sure that python (we are using 2.7) is installed. 133 | 134 | 135 | #### 3.2 CAFFE and pyCAFFE 136 | 137 | ###### 3.2.1 Download a CAFFE library from http://caffe.berkeleyvision.org/ . 138 | Suppose your CAFFE root directory is $CAFFE_PATH. 139 | 140 | ###### 3.2.2 Place the files of Dice loss layer at the correct position. 141 | dice_loss_layer.hpp -> $CAFFE_PATH/include/caffe/layers/ 142 | dice_loss_layer.cpp -> $CAFFE_PATH/src/caffe/layers/ 143 | 144 | ###### 3.2.3 Make CAFFE and pyCAFFE. 145 | 146 | 147 | ## 4. Usage 148 | 149 | Please follow these steps to reproduce our results on the NIH pancreas segmentation dataset. 150 | 151 | **NOTE**: Here we only provide basic steps to run our codes on the NIH dataset. 152 | For more detailed analysis and empirical guidelines for parameter setting 153 | (this is very important especially when you are using our codes on other datasets), 154 | please refer to our technical report (check our webpage for updates). 155 | 156 | 157 | #### 4.1 Data preparation 158 | 159 | ###### 4.1.1 Download NIH data from https://wiki.cancerimagingarchive.net/display/Public/Pancreas-CT . 160 | You should be able to download image and label data individually. 161 | Suppose your data directory is $RAW_PATH: 162 | The image data are organized as $RAW_PATH/DOI/PANCREAS_00XX/A_LONG_CODE/A_LONG_CODE/ . 163 | The label data are organized as $RAW_PATH/TCIA_pancreas_labels-TIMESTAMP/label00XX.nii.gz . 164 | 165 | ###### 4.1.2 Use our codes to transfer these data into NPY format. 166 | Put dicom2npy.py under $RAW_PATH, and run: python dicom2npy.py . 167 | The transferred data should be put under $RAW_PATH/images/ 168 | Put nii2npy.py under $RAW_PATH, and run: python nii2npy.py . 169 | The transferred data should be put under $RAW_PATH/labels/ 170 | 171 | ###### 4.1.3 Suppose your directory to store experimental data is `$DATA_PATH`: 172 | Put $CAFFE_PATH under $DATA_PATH/libs/ 173 | Put images/ under $DATA_PATH/ 174 | Put labels/ under $DATA_PATH/ 175 | Download the scratch model below and put it under $DATA_PATH/models/pretrained/ 176 | 177 | [The scratch model](https://drive.google.com/open?id=1C7XPat4BhAHPA3azIssmc3cd7zJOmQ4k) - see the explanations in 4.2.3. 178 | 179 | NOTE: If you use other path(s), please modify the variable(s) in run.sh accordingly. 180 | 181 | 182 | #### 4.2 Initialization (requires: 4.1) 183 | 184 | ###### 4.2.1 Check `run.sh` and set $DATA_PATH accordingly. 185 | 186 | ###### 4.2.2 Set `$ENABLE_INITIALIZATION=1` and run this script. 187 | Several folders will be created under $DATA_PATH: 188 | $DATA_PATH/images_X|Y|Z/: the sliced image data (data are sliced for faster I/O). 189 | $DATA_PATH/labels_X|Y|Z/: the sliced label data (data are sliced for faster I/O). 190 | $DATA_PATH/lists/: used for storing training, testing and slice lists. 191 | $DATA_PATH/logs/: used for storing log files during the training process. 192 | $DATA_PATH/models/: used for storing models (snapshots) during the training process. 193 | $DATA_PATH/prototxts/: used for storing prototxts (called by training and testing nets). 194 | $DATA_PATH/results/: used for storing testing results (volumes and text results). 195 | According to the I/O speed of your hard drive, the time cost may vary. 196 | For a typical HDD, around 20 seconds are required for a 512x512x300 volume. 197 | This process needs to be executed only once. 198 | 199 | NOTE: if you are using another dataset which contains multiple targets, 200 | you can modify the variables "ORGAN_NUMBER" and "ORGAN_ID" in run.sh, 201 | as well as the "is_organ" function in utils.py to define your mapping function flexibly. 202 | 203 | 204 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 205 | **LAZY MODE!** 206 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 207 | 208 | You can run all the following modules with **one** execution! 209 | * a) Enable everything (except initialization) in the beginning part. 210 | * b) Set all the "PLANE" variables as "A" (4 in total) in the following part. 211 | * c) Run this manuscript! 212 | 213 | 214 | #### 4.3 Individual training (requires: 4.2) 215 | 216 | ###### 4.3.1 Check `run.sh` and set `$INDIV_TRAINING_PLANE` and `$INDIV_TRAINING_GPU`. 217 | You need to run X|Y|Z planes individually, so you can use 3 GPUs in parallel. 218 | You can also set INDIV_TRAINING_PLANE=A, so that three planes are trained orderly in one GPU. 219 | 220 | ###### 4.3.2 Set `$ENABLE_INDIV_TRAINING=1` and run this script. 221 | The following folders/files will be created: 222 | Under $DATA_PATH/logs/, a log file named by training information. 223 | Under $DATA_PATH/models/snapshots/, a folder named by training information. 224 | Snapshots and solver-states will be stored in this folder. 225 | The log file will also be copied into this folder after the entire training process. 226 | On the axial view (training image size is 512x512, small input images make training faster), 227 | each 20 iterations cost ~10s on a Titan-X Pascal GPU, or ~8s on a Titan-Xp GPU. 228 | As described in the code, we need ~40K iterations, which take less than 5 GPU-hours. 229 | After the training process, the log file will be copied to the snapshot directory. 230 | 231 | ###### 4.3.3 Important notes on initialization and model convergence. 232 | 233 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 234 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 235 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 236 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 237 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 238 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 239 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 240 | ![](https://github.com/198808xc/OrganSegRSTN/blob/master/icon.png) 241 | 242 | It is very important to provide a reasonable initialization for our model. 243 | In the previous step of data preparation, we provide a scratch model for the NIH dataset, 244 | in which both the coarse and fine stages are initialized using the weights of an FCN-8s model 245 | (please refer to the [FCN project](https://github.com/shelhamer/fcn.berkeleyvision.org)). 246 | This model was pre-trained on PASCALVOC. 247 | We initialized all upsampling weights to be 0, as the number of channels does not align with that in PASCAL. 248 | 249 | The most important thing is to initialize three layers related to saliency transformation, 250 | which are named "score", "score-R" and "saliency" in our prototxts. 251 | In our solution, we use a Xavier filler to fill in the weights of the "score" and "score-R" layers, 252 | and an all-0 cube to fill in the weights of the "saliency" layer. 253 | For the bias term, we use an all-0 vector for "score" and "score-R", and an all-1 vector for "saliency". 254 | *We also set a restart mechanism after the first 10K iterations in case of non-convergece.* 255 | In more than **95% of time**, this mechanism leads to a successful convergence. 256 | 257 | ###### How to determine if a model converges and works well? 258 | 259 | The loss function value in the beginning of training is almost 1.0. 260 | If a model converges, you should observe the loss function values to decrease gradually. 261 | **But in order to make it work well, in the last several epochs, 262 | you need to confirm the average loss function value to be sufficiently low (e.g. 0.15).** 263 | Here we attach the training logs for your reference, see the `logs/` folder (detailed in Section 5). 264 | 265 | ###### Training RSTN on other CT datasets? 266 | 267 | If you are experimenting on other **CT datasets**, we strongly recommend you to use a pre-trained model, 268 | such as those pre-trained model attached in the last part of this file. 269 | We also provide [a mixed model](http://nothing) (to be provided soon), 270 | which was tuned using all X|Y|Z images of 82 training samples for pancreas segmentation on NIH. 271 | Of course, do not use it to evaluate any NIH data, as all cases have been used for training. 272 | 273 | 274 | #### 4.4 Joint training (requires: 4.3) 275 | 276 | ###### 4.4.1 Check `run.sh` and set `$JOINT_TRAINING_PLANE` and `$JOINT_TRAINING_GPU`. 277 | You need to run X|Y|Z planes individually, so you can use 3 GPUs in parallel. 278 | You can also set JOINT_TRAINING_PLANE=A, so that three planes are trained orderly in one GPU. 279 | 280 | ###### 4.4.2 Set `$ENABLE_JOINT_TRAINING=1` and run this script. 281 | The following folders/files will be created: 282 | Under $DATA_PATH/logs/, a log file named by training information. 283 | Under $DATA_PATH/models/snapshots/, a folder named by training information. 284 | Snapshots and solver-states will be stored in this folder. 285 | The log file will also be copied into this folder after the entire training process. 286 | On the axial view (training image size is 512x512, small input images make training faster), 287 | each 20 iterations cost ~10s on a Titan-X Pascal GPU, or ~8s on a Titan-Xp GPU. 288 | As described in the paper, we need ~40K iterations, which take less than 5 GPU-hours. 289 | After the training process, the log file will be copied to the snapshot directory. 290 | 291 | 292 | #### 4.5 Coarse-scaled testing (requires: 4.4) 293 | 294 | ###### 4.5.1 Check `run.sh` and set `$COARSE_TESTING_PLANE` and `$COARSE_TESTING_GPU`. 295 | You need to run X|Y|Z planes individually, so you can use 3 GPUs in parallel. 296 | You can also set COARSE_TESTING_PLANE=A, so that three planes are tested orderly in one GPU. 297 | 298 | ###### 4.5.2 Set `$ENABLE_COARSE_TESTING=1` and run this script. 299 | The following folder will be created: 300 | Under $DATA_PATH/results/, a folder named by training information. 301 | Testing each volume costs ~30 seconds on a Titan-X Pascal GPU, or ~25s on a Titan-Xp GPU. 302 | 303 | 304 | #### 4.6 Coarse-scaled fusion (optional) (requires: 4.5) 305 | 306 | ###### 4.6.1 Fusion is perfomed on CPU and all X|Y|Z planes are combined and executed once. 307 | 308 | ###### 4.6.2 Set `$ENABLE_COARSE_FUSION=1` and run this script. 309 | The following folder will be created: 310 | Under $DATA_PATH/results/, a folder named by fusion information. 311 | The main cost in fusion includes I/O and post-processing (removing non-maximum components). 312 | In our future release, we will implement post-processing in C for acceleration. 313 | 314 | 315 | #### 4.7 Oracle testing (optional) (requires: 4.4) 316 | 317 | **NOTE**: Without this step, you can also run the coarse-to-fine testing process. 318 | This stage is still recommended, so that you can check the quality of the fine-scaled models. 319 | 320 | ###### 4.7.1 Check `run.sh` and set `$ORACLE_TESTING_PLANE` and `$ORACLE_TESTING_GPU`. 321 | You need to run X|Y|Z planes individually, so you can use 3 GPUs in parallel. 322 | You can also set ORACLE_TESTING_PLANE=A, so that three planes are tested orderly in one GPU. 323 | 324 | ###### 4.7.2 Set `$ENABLE_ORACLE_TESTING=1` and run this script. 325 | The following folder will be created: 326 | Under $DATA_PATH/results/, a folder named by training information. 327 | Testing each volume costs ~10 seconds on a Titan-X Pascal GPU, or ~8s on a Titan-Xp GPU. 328 | 329 | 330 | #### 4.8 Oracle fusion (optional) (requires: 4.7) 331 | 332 | **NOTE**: Without this step, you can also run the coarse-to-fine testing process. 333 | This stage is still recommended, so that you can check the quality of the fine-scaled models. 334 | 335 | ###### 4.8.1 Fusion is perfomed on CPU and all X|Y|Z planes are combined and executed once. 336 | 337 | ###### 4.8.2 Set `$ENABLE_ORACLE_FUSION=1` and run this script. 338 | The following folder will be created: 339 | Under $DATA_PATH/results/, a folder named by fusion information. 340 | The main cost in fusion includes I/O and post-processing (removing non-maximum components). 341 | In our future release, we will implement post-processing in C for acceleration. 342 | 343 | 344 | #### 4.9 Coarse-to-fine testing (requires: 4.5) 345 | 346 | ###### 4.9.1 Check run.sh and set `$COARSE2FINE_TESTING_GPU`. 347 | Fusion is performed on CPU and all X|Y|Z planes are combined. 348 | Currently X|Y|Z testing processes are executed with one GPU, but it is not time-comsuming. 349 | 350 | ###### 4.9.2 Set `$ENABLE_COARSE2FINE_TESTING=1` and run this script. 351 | The following folder will be created: 352 | Under $DATA_PATH/results/, a folder named by coarse-to-fine information (very long). 353 | This function calls both fine-scaled testing and fusion codes, so both GPU and CPU are used. 354 | In our future release, we will implement post-processing in C for acceleration. 355 | 356 | **NOTE**: currently we set the maximal rounds of iteration to be 10 in order to observe the convergence. 357 | Most often, it reaches an inter-DSC of >99% after 3-5 iterations. 358 | If you hope to save time, you can slight modify the codes in coarse2fine_testing.py. 359 | Testing each volume costs ~40 seconds on a Titan-X Pascal GPU, or ~32s on a Titan-Xp GPU. 360 | If you set the threshold to be 99%, this stage will be done within 2 minutes (in average). 361 | 362 | 363 | Congratulations! You have finished the entire process. Check your results now! 364 | 365 | 366 | ## 5. Pre-trained Models on the NIH Dataset 367 | 368 | **NOTE**: all these models were trained following our default settings. 369 | 370 | The 82 cases in the NIH dataset are split into 4 folds: 371 | * **Fold #0**: testing on Cases 01, 02, ..., 20; 372 | * **Fold #1**: testing on Cases 21, 22, ..., 40; 373 | * **Fold #2**: testing on Cases 41, 42, ..., 61; 374 | * **Fold #3**: testing on Cases 62, 63, ..., 82. 375 | 376 | We provide the individually-trained models on each plane of each fold, in total 12 files. 377 | 378 | Each of these models is around 1.03GB, approximately the size of two (coarse+fine) FCN models. 379 | * **Fold #0**: [[X]](https://drive.google.com/open?id=1ILrnkXh7CEDHKJM7dMHrX23S-ecAjH1Q) 380 | [[Y]](https://drive.google.com/open?id=1hNh6jQhuyN6H-sGOA10llGk-J_dW6R6W) 381 | [[Z]](https://drive.google.com/open?id=1wvvv_lSahREzSHh8yUuoEZ3rECWYW4oh) 382 | (**Accuracy**: coarse-to-fine 84.44%) 383 | * **Fold #1**: [[X]](https://drive.google.com/open?id=15KSS84Z63C2y4me6ALtm_aUBMVbf-0Jq) 384 | [[Y]](https://drive.google.com/open?id=1WHdrsYIGvaXuCcl1PWvjNC9HN7o6Ubbb) 385 | [[Z]](https://drive.google.com/open?id=10HNIObWNMZ-kk4iG7Tr7fECRmcJBslkL) 386 | (**Accuracy**: coarse-to-fine 84.35%) 387 | * **Fold #2**: [[X]](https://drive.google.com/open?id=1uL-F-hvACbykoTPeAdSU-3Yq4i9NmHeX) 388 | [[Y]](https://drive.google.com/open?id=1admx2RM_3L8p3T33pdnDu2NMoWey59G9) 389 | [[Z]](https://drive.google.com/open?id=1lCI4jO9TorEdfL0eP7Qqjw8c6qmVfWMB) 390 | (**Accuracy**: coarse-to-fine 84.12%) 391 | * **Fold #3**: [[X]](https://drive.google.com/open?id=1vqoiZ2xgySJCk2_9FhFMbAJCa8IuuI26) 392 | [[Y]](https://drive.google.com/open?id=1JM82TZYRl8HhXnTpjFxy96T3hfGge9XV) 393 | [[Z]](https://drive.google.com/open?id=11N_xDMvx6NvuyzSUxLZUItU31nl2if0C) 394 | (**Accuracy**: coarse-to-fine 85.43%) 395 | * Average accuracy over 82 cases: 84.59%. 396 | 397 | *We ran our codes several times, and the average accuracy varies between 84.4% and 84.6%.* 398 | 399 | If you encounter any problems in downloading these files, please contact Lingxi Xie (198808xc@gmail.com). 400 | 401 | We also attach the log files for your reference here. Please refer to the `logs/` folder. 402 | 403 | 404 | ## 6. Versions 405 | 406 | The current version is v2.0. 407 | 408 | 409 | ## 7. Contact Information 410 | 411 | If you encounter any problems in using these codes, please open an issue in this repository. 412 | You may also contact Qihang Yu (yucornetto@gmail.com) or Lingxi Xie (198808xc@gmail.com). 413 | 414 | Thanks for your interest! Have fun! 415 | 416 | -------------------------------------------------------------------------------- /icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/198808xc/OrganSegRSTN/e78e44c5ed85e5871a6a74dc7d2845de166908f7/icon.png --------------------------------------------------------------------------------