├── model ├── inception_residual_train_prediction_1fm │ ├── valid_file.txt │ ├── label_class_selection.prototxt │ ├── run_save_average_prediction.m │ ├── train.sh │ ├── predict.sh │ ├── train_file.txt │ ├── generate_16_average_probs.m │ ├── predict_16models.sh │ ├── solver.prototxt │ ├── de_augment_data.m │ ├── combinePredicctionSlice.m │ └── deploy.prototxt ├── inception_residual_train_prediction_3fm │ ├── valid_file.txt │ ├── label_class_selection.prototxt │ ├── run_save_average_prediction.m │ ├── train.sh │ ├── generate_16_average_probs.m │ ├── predict_16models.sh │ ├── train_file.txt │ ├── slice_tiff_stack.m │ ├── solver.prototxt │ ├── de_augment_data.m │ ├── combinePredicctionSlice.m │ ├── run_segmentation_on_valid_set.m │ └── run_segmentation_on_test_sets.m └── inception_resiudal_train_prediciton_5fm │ ├── valid_file.txt │ ├── label_class_selection.prototxt │ ├── run_save_average_prediction.m │ ├── train.sh │ ├── generate_16_average_probs.m │ ├── train_file.txt │ ├── predict_16models.sh │ ├── solver.prototxt │ ├── de_augment_data.m │ ├── run_segmentation_train.m │ ├── combinePredicctionSlice.m │ └── run_segmentation_test.m ├── .gitmodules ├── scripts ├── full_fill.m ├── ReplacePixelsWithModeNew.m ├── make_submit_tiff.m ├── my_border_labeling.m ├── write_label2rgb_image.m ├── de_augment_data.m ├── create_new_vertical_closed_label.m ├── SNEMI3D_metrics.m ├── augment_data.m └── read_data_write_data_with_enhanced_labels.m ├── README.md └── LICENSE /model/inception_residual_train_prediction_1fm/valid_file.txt: -------------------------------------------------------------------------------- 1 | ../../data/snemi3d_valid_v1.h5 -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/valid_file.txt: -------------------------------------------------------------------------------- 1 | ../../data/snemi3d_valid_v1.h5 -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/valid_file.txt: -------------------------------------------------------------------------------- 1 | ../../data/snemi3d_valid_v1.h5 -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "caffe_nd_sense_segmentation"] 2 | path = caffe_nd_sense_segmentation 3 | url = https://github.com/Xiaomi2008/caffe_nd_sense_segmentation 4 | -------------------------------------------------------------------------------- /scripts/full_fill.m: -------------------------------------------------------------------------------- 1 | function x=full_fill(x) 2 | count =0; 3 | %while(length(find(x==0)) > 0) 4 | while(sum(x(:)==0) > 0) 5 | %x = ReplacePixelsWithModeNew(x, find(x==0)); 6 | x = ReplacePixelsWithModeNew(x, x==0); 7 | count =count+1; 8 | %disp(['count loop = ' num2str(count)]) 9 | %imshow(label2rgb(x)) 10 | end 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/label_class_selection.prototxt: -------------------------------------------------------------------------------- 1 | ignore_rest_of_label:true 2 | rest_of_label_mapping:false; 3 | rest_of_label_mapping_label: 1; 4 | rest_of_label_prob : 0.5; 5 | label_prob_mapping_info{ 6 | label: 0 7 | prob: 1 8 | } 9 | label_prob_mapping_info{ 10 | label: 1 11 | prob: 0.3 12 | } 13 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/label_class_selection.prototxt: -------------------------------------------------------------------------------- 1 | ignore_rest_of_label:true 2 | rest_of_label_mapping:false; 3 | rest_of_label_mapping_label: 1; 4 | rest_of_label_prob : 0.5; 5 | label_prob_mapping_info{ 6 | label: 0 7 | prob: 1 8 | } 9 | label_prob_mapping_info{ 10 | label: 1 11 | prob: 0.3 12 | } 13 | -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/label_class_selection.prototxt: -------------------------------------------------------------------------------- 1 | ignore_rest_of_label:true 2 | rest_of_label_mapping:false; 3 | rest_of_label_mapping_label: 1; 4 | rest_of_label_prob : 0.5; 5 | label_prob_mapping_info{ 6 | label: 0 7 | prob: 1 8 | } 9 | label_prob_mapping_info{ 10 | label: 1 11 | prob: 0.3 12 | } 13 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/run_save_average_prediction.m: -------------------------------------------------------------------------------- 1 | folder='predict' 2 | average=generate_16_average_probs(folder); 3 | save_mat_file=[folder filesep 'ave_probs.mat']; 4 | save_h5_file=[folder filesep 'ave_probs.h5']; 5 | save(save_mat_file,'average','-v7.3'); 6 | p=single(average); 7 | d_details.location = '/'; 8 | d_details.Name = 'probabilities'; 9 | hdf5write(save_h5_file,d_details,p); -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/run_save_average_prediction.m: -------------------------------------------------------------------------------- 1 | folder='predict' 2 | average=generate_16_average_probs(folder); 3 | save_mat_file=[folder filesep 'ave_probs.mat']; 4 | save_h5_file=[folder filesep 'ave_probs.h5']; 5 | save(save_mat_file,'average','-v7.3'); 6 | p=single(average); 7 | d_details.location = '/'; 8 | d_details.Name = 'probabilities'; 9 | hdf5write(save_h5_file,d_details,p); -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/run_save_average_prediction.m: -------------------------------------------------------------------------------- 1 | folder='predict' 2 | average=generate_16_average_probs(folder); 3 | save_mat_file=[folder filesep 'ave_probs.mat']; 4 | save_h5_file=[folder filesep 'ave_probs.h5']; 5 | save(save_mat_file,'average','-v7.3'); 6 | p=single(average); 7 | d_details.location = '/'; 8 | d_details.Name = 'probabilities'; 9 | hdf5write(save_h5_file,d_details,p); -------------------------------------------------------------------------------- /scripts/ReplacePixelsWithModeNew.m: -------------------------------------------------------------------------------- 1 | function res=ReplacePixelsWithModeNew(im, pixels) 2 | pad = padarray(im, [1 1]); 3 | c = im2col(pad, [3 3], 'sliding'); 4 | %c(find(c==0)) = NaN; 5 | c(c==0) = NaN; 6 | md = mode(c); 7 | md = reshape(md, size(im)); 8 | %md(find(isnan(md))) = 0; 9 | md(isnan(md)) = 0; 10 | 11 | res = im; 12 | res(pixels) = md(pixels); 13 | res(isnan(res)) = 0; 14 | %res(find(isnan(res))) = 0; 15 | -------------------------------------------------------------------------------- /scripts/make_submit_tiff.m: -------------------------------------------------------------------------------- 1 | function make_submit_tiff(vol,prefix) 2 | submit_dir ='submit'; 3 | if ~exist(submit_dir,'dir') 4 | mkdir(submit_dir); 5 | end 6 | for i=1:size(vol,3) 7 | meta_file=[prefix num2str(i) '.mha']; 8 | meta_file=[submit_dir filesep meta_file]; 9 | 10 | writemeta(meta_file,squeeze(vol(:,:,i))); 11 | end 12 | read_prefix=[submit_dir filesep prefix]; 13 | write32bitTiff(read_prefix); 14 | 15 | end -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/train.sh: -------------------------------------------------------------------------------- 1 | #0!/bin/bash 2 | caffe_path=../../../caffe_nd_sense_segmetation/.build_release/tools 3 | log_dir=LOG 4 | GLOG_log_dir=$log_dir $caffe_path/caffe.bin train \ 5 | --solver=solver.prototxt \ 6 | --gpu 6 \ 7 | --snapshot=trained_weights/inception_fcn_mscal_classifier_iter_12470.solverstate \ 8 | #--gpu 3 9 | #--weights=../../trained_temp/bigneuron_7fm_vgg_init_deconv_iter_391.caffemodel \ 10 | -------------------------------------------------------------------------------- /scripts/my_border_labeling.m: -------------------------------------------------------------------------------- 1 | function lb=my_border_labeling(data) 2 | b_size=size(data); 3 | x=round(b_size(1)/2); 4 | y=round(b_size(2)/2); 5 | 6 | if data(x,y)==0 7 | lb =0; 8 | return 9 | end 10 | 11 | uq=unique(data); 12 | if uq(1)==0 13 | uq(1)=[]; 14 | end 15 | 16 | 17 | if ~isempty(uq) && length(uq)>1 18 | lb= 0; 19 | else 20 | lb=1; 21 | end 22 | 23 | %data(data~=0)=1; 24 | % 25 | 26 | 27 | end -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/predict.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-7.0/lib64 3 | caffe_path=/tempspace/tzeng/caffe_nd_sense_segmetation/build/tools 4 | GLOG_logtostderr=1 $caffe_path/predict_seg_new.bin \ 5 | --model=deploy.prototxt \ 6 | --weights=trained_weights/inception_fcn_mscal_classifier_iter_12000.caffemodel \ 7 | --data=../../data/snemi3d_test.h5 \ 8 | --predict=predict_single/snemi3d_valid.h5 \ 9 | --shift_axis=2 \ 10 | --shift_stride=1 \ 11 | --gpu=0 12 | 13 | #snemi3d_valid.h5 14 | 15 | #snemi3d_test.h5 16 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/train.sh: -------------------------------------------------------------------------------- 1 | #0!/bin/bash 2 | caffe_path=../../../caffe_nd_sense_segmetation/.build_release/tools 3 | log_dir=LOG 4 | GLOG_log_dir=$log_dir $caffe_path/caffe.bin train \ 5 | --solver=solver.prototxt \ 6 | --gpu 3 \ 7 | --weights=trained_weights/inception_fcn_mscal_classifier_fullstacks_train_iter_44000.caffemodel 8 | #--snapshot=trained_weights/inception_fcn_mscal_classifier_fullstacks_train_iter_1322.solverstate 9 | #--snapshot=trained_weights/inception_fcn_iter_20069.solverstate \ 10 | #--gpu 3 11 | #--weights=../../trained_temp/bigneuron_7fm_vgg_init_deconv_iter_391.caffemodel \ 12 | -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/train.sh: -------------------------------------------------------------------------------- 1 | #0!/bin/bash 2 | caffe_path=../../../caffe_nd_sense_segmetation/.build_release/tools 3 | log_dir=LOG 4 | GLOG_log_dir=$log_dir $caffe_path/caffe.bin train \ 5 | --solver=solver.prototxt \ 6 | --gpu 4 \ 7 | --weights=trained_weights/inception_fcn_mscal_classifier_fullstacks_train_iter_44000.caffemodel 8 | #--snapshot=trained_weights/inception_fcn_mscal_classifier_fullstacks_train_iter_1322.solverstate 9 | #--snapshot=trained_weights/inception_fcn_iter_20069.solverstate \ 10 | #--gpu 3 11 | #--weights=../../trained_temp/bigneuron_7fm_vgg_init_deconv_iter_391.caffemodel \ 12 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/train_file.txt: -------------------------------------------------------------------------------- 1 | ../../data/snemi3d_train_v1.h5 2 | ../../data/snemi3d_train_v2.h5 3 | ../../data/snemi3d_train_v3.h5 4 | ../../data/snemi3d_train_v4.h5 5 | ../../data/snemi3d_train_v5.h5 6 | ../../data/snemi3d_train_v6.h5 7 | ../../data/snemi3d_train_v7.h5 8 | ../../data/snemi3d_train_v8.h5 9 | ../../data/snemi3d_train_v9.h5 10 | ../../data/snemi3d_train_v10.h5 11 | ../../data/snemi3d_train_v11.h5 12 | ../../data/snemi3d_train_v12.h5 13 | ../../data/snemi3d_train_v13.h5 14 | ../../data/snemi3d_train_v14.h5 15 | ../../data/snemi3d_train_v15.h5 16 | ../../data/snemi3d_train_v16.h5 -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/generate_16_average_probs.m: -------------------------------------------------------------------------------- 1 | function average=generate_16_average_probs(folder) 2 | addpath('../../script') 3 | for i=1:16 4 | folder_name=[folder filesep 'v' num2str(i)]; 5 | prob=combinePredicctionSlice(folder_name); 6 | data{i}=prob; 7 | end 8 | 9 | average=de_augment_data(data); 10 | tiff_file_save=[folder filesep 'ave_16.tiff']; 11 | mx_im=max(average(:)); 12 | for i=1:size(average,3) 13 | b=average(:,:,i); 14 | im=255-uint8(b*(255/mx_im)); 15 | imwrite(im,tiff_file_save,'WriteMode','append'); 16 | disp(['write #' num2str(i) ' image ... ' tiff_file_save]); 17 | end 18 | end -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/generate_16_average_probs.m: -------------------------------------------------------------------------------- 1 | function average=generate_16_average_probs(folder) 2 | addpath('../../script') 3 | for i=1:16 4 | folder_name=[folder filesep 'v' num2str(i)]; 5 | prob=combinePredicctionSlice(folder_name); 6 | data{i}=prob; 7 | end 8 | 9 | average=de_augment_data(data); 10 | tiff_file_save=[folder filesep 'ave_16.tiff']; 11 | mx_im=max(average(:)); 12 | for i=1:size(average,3) 13 | b=average(:,:,i); 14 | im=255-uint8(b*(255/mx_im)); 15 | imwrite(im,tiff_file_save,'WriteMode','append'); 16 | disp(['write #' num2str(i) ' image ... ' tiff_file_save]); 17 | end 18 | end -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/generate_16_average_probs.m: -------------------------------------------------------------------------------- 1 | function average=generate_16_average_probs(folder) 2 | addpath('../../script') 3 | for i=1:16 4 | folder_name=[folder filesep 'v' num2str(i)]; 5 | prob=combinePredicctionSlice(folder_name); 6 | data{i}=prob; 7 | end 8 | 9 | average=de_augment_data(data); 10 | tiff_file_save=[folder filesep 'ave_16.tiff']; 11 | mx_im=max(average(:)); 12 | for i=1:size(average,3) 13 | b=average(:,:,i); 14 | im=255-uint8(b*(255/mx_im)); 15 | imwrite(im,tiff_file_save,'WriteMode','append'); 16 | disp(['write #' num2str(i) ' image ... ' tiff_file_save]); 17 | end 18 | end -------------------------------------------------------------------------------- /scripts/write_label2rgb_image.m: -------------------------------------------------------------------------------- 1 | function write_label2rgb_image(L,fuse_im,file_prefix) 2 | lable_rgb = label2rgb3d(L,'hsv','w','Shuffle'); 3 | for K=1:length(L(1, 1, :)) 4 | %outputFileName = sprintf('img_%d.tif',K); 5 | %imwrite(label2rgb(L_test(:, :, K)), outputFileName); 6 | 7 | %imwrite(squeeze(lable_rgb(:,:,K,:),outoutFiileName)); 8 | im=squeeze(lable_rgb(:,:,K,:)); 9 | if nargin >1 10 | f_im=squeeze(fuse_im(:,:,K,:)); 11 | im = imfuse(im,f_im,'blend','Scaling','joint'); 12 | end 13 | if nargin >2 14 | file=[file_prefix '.tiff']; 15 | else 16 | file='img.tiff'; 17 | end 18 | imwrite(im,file,'WriteMode','append'); 19 | end 20 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/predict_16models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # need bash shell 3 | caffe_path=/tempspace/tzeng/caffe_nd_sense_segmetation/build/tools 4 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-7.0/lib64 5 | for idx in {1..16..1} 6 | do 7 | predict_dir=predict/v$idx; 8 | if [ ! -d "$predict_dir" ]; then 9 | # Control will enter here if $DIRECTORY doesn't exist. 10 | mkdir "predict/v$idx" 11 | fi 12 | GLOG_logtostderr=1 $caffe_path/predict_seg_new.bin \ 13 | --model=deploy.prototxt \ 14 | --weights=trained_weights/inception_fcn_mscal_classifier_fullstacks_train_iter_50000.caffemodel \ 15 | --data=../../data/snemi3d_test_v$idx.h5 \ 16 | --predict=$predict_dir/test.h5 \ 17 | --shift_axis=2 \ 18 | --shift_stride=1 \ 19 | --gpu=0 20 | 21 | done 22 | 23 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/train_file.txt: -------------------------------------------------------------------------------- 1 | ../../data/snemi3d_train_full_stacks_v1.h5 2 | ../../data/snemi3d_train_full_stacks_v2.h5 3 | ../../data/snemi3d_train_full_stacks_v3.h5 4 | ../../data/snemi3d_train_full_stacks_v4.h5 5 | ../../data/snemi3d_train_full_stacks_v5.h5 6 | ../../data/snemi3d_train_full_stacks_v6.h5 7 | ../../data/snemi3d_train_full_stacks_v7.h5 8 | ../../data/snemi3d_train_full_stacks_v8.h5 9 | ../../data/snemi3d_train_full_stacks_v9.h5 10 | ../../data/snemi3d_train_full_stacks_v10.h5 11 | ../../data/snemi3d_train_full_stacks_v11.h5 12 | ../../data/snemi3d_train_full_stacks_v12.h5 13 | ../../data/snemi3d_train_full_stacks_v13.h5 14 | ../../data/snemi3d_train_full_stacks_v14.h5 15 | ../../data/snemi3d_train_full_stacks_v15.h5 16 | ../../data/snemi3d_train_full_stacks_v16.h5 -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/train_file.txt: -------------------------------------------------------------------------------- 1 | ../../data/snemi3d_train_full_stacks_v1.h5 2 | ../../data/snemi3d_train_full_stacks_v2.h5 3 | ../../data/snemi3d_train_full_stacks_v3.h5 4 | ../../data/snemi3d_train_full_stacks_v4.h5 5 | ../../data/snemi3d_train_full_stacks_v5.h5 6 | ../../data/snemi3d_train_full_stacks_v6.h5 7 | ../../data/snemi3d_train_full_stacks_v7.h5 8 | ../../data/snemi3d_train_full_stacks_v8.h5 9 | ../../data/snemi3d_train_full_stacks_v9.h5 10 | ../../data/snemi3d_train_full_stacks_v10.h5 11 | ../../data/snemi3d_train_full_stacks_v11.h5 12 | ../../data/snemi3d_train_full_stacks_v12.h5 13 | ../../data/snemi3d_train_full_stacks_v13.h5 14 | ../../data/snemi3d_train_full_stacks_v14.h5 15 | ../../data/snemi3d_train_full_stacks_v15.h5 16 | ../../data/snemi3d_train_full_stacks_v16.h5 -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/predict_16models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # need bash shell 3 | caffe_path=/tempspace/tzeng/caffe_nd_sense_segmetation/build/tools 4 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-7.0/lib64 5 | for idx in {1..16..1} 6 | do 7 | predict_dir=predict/v$idx; 8 | if [ ! -d "$predict_dir" ]; then 9 | # Control will enter here if $DIRECTORY doesn't exist. 10 | mkdir "predict/v$idx" 11 | fi 12 | GLOG_logtostderr=1 $caffe_path/predict_seg_new.bin \ 13 | --model=deploy.prototxt \ 14 | --weights=trained_weights/inception_fcn_mscal_classifier_iter_50000.caffemodel \ 15 | --data=../../data/snemi3d_train_full_stacks_v$idx.h5 \ 16 | --predict=$predict_dir/test.h5 \ 17 | --shift_axis=2 \ 18 | --shift_stride=1 \ 19 | --gpu=0 20 | 21 | done 22 | #snemi3d_test_v$idx.h5 23 | 24 | #snemi3d_train_full_stacks_v$idx.h5 25 | 26 | #snemi3d_valid.h5 27 | #snemi3d_test_last10slice.h5 28 | -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/predict_16models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # need bash shell 3 | caffe_path=/tempspace/tzeng/caffe_nd_sense_segmetation/build/tools 4 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-7.0/lib64 5 | for idx in {1..16..1} 6 | do 7 | predict_dir=predict/v$idx; 8 | if [ ! -d "$predict_dir" ]; then 9 | # Control will enter here if $DIRECTORY doesn't exist. 10 | mkdir "predict/v$idx" 11 | fi 12 | GLOG_logtostderr=1 $caffe_path/predict_seg_new.bin \ 13 | --model=deploy.prototxt \ 14 | --weights=trained_weights/inception_fcn_mscal_classifier_fullstacks_train_iter_50000.caffemodel \ 15 | --data=../../data/snemi3d_test_v$idx.h5 \ 16 | --predict=$predict_dir/test.h5 \ 17 | --shift_axis=2 \ 18 | --shift_stride=1 \ 19 | --gpu=0 20 | 21 | done 22 | #snemi3d_test_v$idx.h5 23 | 24 | 25 | #snemi3d_train_full_stacks_v$idx.h5 26 | #snemi3d_valid.h5 27 | #snemi3d_test_last10slice.h5 28 | #snemi3d_train_full_stacks_v$idx.h5 29 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/slice_tiff_stack.m: -------------------------------------------------------------------------------- 1 | function write_mtiff_2_stiff(filename,out_folder) 2 | 3 | Imf=imfinfo(filename); 4 | 5 | if ~exist(out_folder,'dir') 6 | mkdir(out_folder) 7 | end 8 | 9 | mImage=Imf(1).Width 10 | nImage=Imf(1).Height 11 | NumberImages=length(Imf) 12 | color_dim = length(Imf(1).BitsPerSample) 13 | if color_dim>1 14 | FinalImage=zeros(nImage,nImage,color_dim,NumberImages); 15 | else 16 | FinalImage=zeros(nImage,nImage,NumberImages); 17 | end 18 | for i=1:NumberImages 19 | if color_dim>1 20 | FinalImage(:,:,:,i)=imread(filename,i); 21 | else 22 | FinalImage(:,:,i)=imread(filename,i); 23 | end 24 | end 25 | filename_base ='t'; 26 | for i=1:NumberImages 27 | %im=labels(:,:,i); 28 | filename=[out_folder filesep filename_base num2str(i) '.tiff']; 29 | if color_dim>1 30 | im =FinalImage(:,:,:,i); 31 | else 32 | im=FinalImage(:,:,i); 33 | end 34 | 35 | imwrite(uint8(im),filename); 36 | disp(['write #' num2str(i) ' image ... ' filename]); 37 | end -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepEM3D 2 | This is the implementation code for paper submitted to Bioinformatics: **"DeepEM3D: Approaching human-level performance on 3D anisotropic EM image segmentation "** 3 | 4 | # Required environment: 5 | C++, bash shell, matlab, Cuda7.5 6 | 7 | # Data 8 | (1). Register at: 9 | http://brainiac2.mit.edu/SNEMI3D/user/register 10 | 11 | (2). Login in and download data at: 12 | http://brainiac2.mit.edu/SNEMI3D/downloads 13 | 14 | (3) Convert image files into h5 file that contains **\data** and **\label** sets. 15 | 16 | # Code 17 | 1. To generate boundary labels: 18 | run matlab scripts: */scripts/create_new_vertical_closed_label.m* 19 | 20 | 2. To generate all data h5 files (train, valid, test): 21 | run matlab scripts: */scripts/read_data_write_data_with_enhanced_labels.m* 22 | 23 | 3. To train and predict netwroks models: 24 | run shell scripts: */model/inception_residual_train_prediction_xfm/train.sh* **or** *predict.sh* 25 | 26 | 4. To generate segmentation on test set: 27 | run matlab scripts */model/inception_residual_train_prediction_3fm/run_segmentation_on_test_set.m* 28 | 29 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "train_val.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 20 10 | test_iter: 20 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 1e-02 13 | power: 0.8 14 | momentum: 0.9 15 | weight_decay: 0.0005 16 | average_loss: 16 17 | # The learning rate policy 18 | lr_policy: "poly" 19 | #stepsize: 2500 20 | # Display every 100 iterations 21 | display: 3 22 | # The maximum number of iterations 23 | max_iter: 50000 24 | iter_size: 8 25 | # snapshot intermediate results 26 | snapshot: 2000 27 | #snapshot_format: HDF5 28 | snapshot_format: BINARYPROTO 29 | snapshot_prefix: "trained_weights/inception_fcn_mscal_classifier" 30 | # solver mode: CPU or GPU 31 | solver_mode: GPU 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 DiveLab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "train_val.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 10 10 | test_iter: 20 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 1e-02 13 | power: 0.8 14 | momentum: 0.9 15 | weight_decay: 0.0005 16 | average_loss: 16 17 | # The learning rate policy 18 | lr_policy: "poly" 19 | #stepsize: 2500 20 | # Display every 100 iterations 21 | display: 3 22 | # The maximum number of iterations 23 | max_iter: 50000 24 | iter_size: 8 25 | # snapshot intermediate results 26 | snapshot: 2000 27 | #snapshot_format: HDF5 28 | snapshot_format: BINARYPROTO 29 | snapshot_prefix: "trained_weights/inception_fcn_mscal_classifier_fullstacks_train" 30 | # solver mode: CPU or GPU 31 | solver_mode: GPU 32 | -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "train_val.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 10 10 | test_iter: 20 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 1e-02 13 | power: 0.8 14 | momentum: 0.9 15 | weight_decay: 0.0005 16 | average_loss: 16 17 | # The learning rate policy 18 | lr_policy: "poly" 19 | #stepsize: 2500 20 | # Display every 100 iterations 21 | display: 3 22 | # The maximum number of iterations 23 | max_iter: 50000 24 | iter_size: 8 25 | # snapshot intermediate results 26 | snapshot: 2000 27 | #snapshot_format: HDF5 28 | snapshot_format: BINARYPROTO 29 | snapshot_prefix: "trained_weights/inception_fcn_mscal_classifier_fullstacks_train" 30 | # solver mode: CPU or GPU 31 | solver_mode: GPU 32 | -------------------------------------------------------------------------------- /scripts/de_augment_data.m: -------------------------------------------------------------------------------- 1 | function prob=de_augment_data(D) 2 | d_size=length(D); 3 | prob1_sum=recover8Variation(D(1:8)); 4 | if d_size ==16 5 | prob2_sum=recover8Variation(D(9:16)); 6 | temp_sum2=prob2_sum; 7 | 8 | slices=size(temp_sum2,1); 9 | %sweep Z dimension 10 | for i=1:slices/2 11 | temp=temp_sum2(slices-i+1,:,:); 12 | temp_sum2(slices-i+1,:,:)=temp_sum2(i,:,:); 13 | temp_sum2(i,:,:)=temp; 14 | end 15 | prob2_sum2=temp_sum2; 16 | prob=(prob1_sum+prob2_sum)/16 17 | else 18 | prob=prob1_sum/d_size; 19 | end 20 | 21 | function sum=recover8Variation(x) 22 | 23 | for i = 1:8 24 | prob = x{j}; 25 | for j = 1:size(prob,3) 26 | x=squeeze(prob(:,:,i)); 27 | switch(j) 28 | case 1 29 | average(:,:,j) = average(:,:,j)+x(:,:,1); 30 | case 2 31 | average(:,:,j) = average(:,:,j) + flipdim(x(:,:,1),1); 32 | case 3 33 | average(:,:,j) = average(:,:,j) + flipdim(x(:,:,1),2); 34 | case 4 35 | average(:,:,j) = average(:,:,j) + rot90(x(:,:,1), -1); 36 | case 5 37 | average(:,:,j) = average(:,:,j) + rot90(x(:,:,1)); 38 | case 6 39 | average(:,:,j) = average(:,:,j) + flipdim(rot90(x(:,:,1),-1), 1); 40 | case 7 41 | average(:,:,j) = average(:,:,j) + flipdim(rot90(x(:,:,1),-1), 2); 42 | case 8 43 | average(:,:,j) = average(:,:,j) + rot90(x(:,:,1),2); 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /scripts/create_new_vertical_closed_label.m: -------------------------------------------------------------------------------- 1 | gt_labels=h5read('../hd5/train_labelEM.h5','/label'); 2 | %bw_labels=h5read('../data/snemi3d_train_full_stacks_widen_label100.h5','/label'); 3 | bw_labels=h5read('../data/snemi3d_train_full_stacks_label100.h5','/label'); 4 | 5 | gt_labels=permute( gt_labels, [2 3 1]); 6 | bw_labels=permute( bw_labels, [2 3 1]); 7 | 8 | fun =@my_border_labeling 9 | new_labels_1 =zeros(size(gt_labels)); 10 | new_labels_2 =zeros(size(gt_labels)); 11 | parfor i=1:1024 12 | gt_2d=squeeze(gt_labels(i,:,:)); 13 | new_labels_1_1(i,:,:) = nlfilter(gt_2d,[1 3],fun); 14 | new_labels_1_2(i,:,:) = nlfilter(gt_2d,[3 1],fun); 15 | end 16 | 17 | parfor i=1:1024 18 | gt_2d=squeeze(gt_labels(:,i,:)); 19 | new_labels_2_1(:,i,:) = nlfilter(gt_2d,[1 3],fun); 20 | new_labels_2_2(:,i,:) = nlfilter(gt_2d,[3 1],fun); 21 | end 22 | 23 | 24 | bw_labels(1:2,:,:)=1; 25 | bw_labels(1023:1024,:,:)=1; 26 | 27 | bw_labels(:,1:2,:)=1; 28 | bw_labels(:,1023:1024,:,:)=1; 29 | combine_new_labels= (1-new_labels_1_1)+(1-new_labels_1_2)+(1-new_labels_2_1)+(1-new_labels_2_2)+(1-bw_labels); 30 | combine_new_labels(combine_new_labels>1)=1; 31 | 32 | combine_new_labels=1-combine_new_labels; 33 | save_h5_file='../data/vertical_enhanced_thin_label3x3.h5' 34 | d_details.location = '/'; 35 | d_details.Name = 'labels'; 36 | hdf5write(save_h5_file,d_details,combine_new_labels); 37 | 38 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/de_augment_data.m: -------------------------------------------------------------------------------- 1 | function prob=de_augment_data(D) 2 | d_size=length(D); 3 | prob1_sum=recover8Variation(D(1:8)); 4 | if d_size ==16 5 | prob2_sum=recover8Variation(D(9:16)); 6 | temp_sum2=prob2_sum; 7 | 8 | slices=size(temp_sum2,3); 9 | %sweep Z dimension 10 | for i=1:slices/2 11 | temp=temp_sum2(:,:,slices-i+1); 12 | temp_sum2(:,:,slices-i+1)=temp_sum2(:,:,i); 13 | temp_sum2(:,:,i)=temp; 14 | end 15 | prob2_sum=temp_sum2; 16 | prob=(prob1_sum+prob2_sum)/16; 17 | else 18 | prob=prob1_sum/d_size; 19 | end 20 | 21 | function sum=recover8Variation(x) 22 | prob=x{1}; 23 | average=zeros(size(prob)); 24 | for i = 1:8 25 | prob = x{i}; 26 | for j = 1:size(prob,3) 27 | p=squeeze(prob(:,:,j)); 28 | switch(i) 29 | case 1 30 | average(:,:,j) = average(:,:,j)+p(:,:,1); 31 | case 2 32 | average(:,:,j) = average(:,:,j) + flipdim(p(:,:,1),1); 33 | case 3 34 | average(:,:,j) = average(:,:,j) + flipdim(p(:,:,1),2); 35 | case 4 36 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1), -1); 37 | case 5 38 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1)); 39 | case 6 40 | average(:,:,j) = average(:,:,j) + flipdim(rot90(p(:,:,1),-1), 1); 41 | case 7 42 | average(:,:,j) = average(:,:,j) + flipdim(rot90(p(:,:,1),-1), 2); 43 | case 8 44 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1),2); 45 | end 46 | xxx=1; 47 | end 48 | sum=average; 49 | end 50 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/de_augment_data.m: -------------------------------------------------------------------------------- 1 | function prob=de_augment_data(D) 2 | d_size=length(D); 3 | prob1_sum=recover8Variation(D(1:8)); 4 | if d_size ==16 5 | prob2_sum=recover8Variation(D(9:16)); 6 | temp_sum2=prob2_sum; 7 | 8 | slices=size(temp_sum2,3); 9 | %sweep Z dimension 10 | for i=1:slices/2 11 | temp=temp_sum2(:,:,slices-i+1); 12 | temp_sum2(:,:,slices-i+1)=temp_sum2(:,:,i); 13 | temp_sum2(:,:,i)=temp; 14 | end 15 | prob2_sum=temp_sum2; 16 | prob=(prob1_sum+prob2_sum)/16; 17 | else 18 | prob=prob1_sum/d_size; 19 | end 20 | 21 | function sum=recover8Variation(x) 22 | prob=x{1}; 23 | average=zeros(size(prob)); 24 | for i = 1:8 25 | prob = x{i}; 26 | for j = 1:size(prob,3) 27 | p=squeeze(prob(:,:,j)); 28 | switch(i) 29 | case 1 30 | average(:,:,j) = average(:,:,j)+p(:,:,1); 31 | case 2 32 | average(:,:,j) = average(:,:,j) + flipdim(p(:,:,1),1); 33 | case 3 34 | average(:,:,j) = average(:,:,j) + flipdim(p(:,:,1),2); 35 | case 4 36 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1), -1); 37 | case 5 38 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1)); 39 | case 6 40 | average(:,:,j) = average(:,:,j) + flipdim(rot90(p(:,:,1),-1), 1); 41 | case 7 42 | average(:,:,j) = average(:,:,j) + flipdim(rot90(p(:,:,1),-1), 2); 43 | case 8 44 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1),2); 45 | end 46 | xxx=1; 47 | end 48 | sum=average; 49 | end 50 | -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/de_augment_data.m: -------------------------------------------------------------------------------- 1 | function prob=de_augment_data(D) 2 | d_size=length(D); 3 | prob1_sum=recover8Variation(D(1:8)); 4 | if d_size ==16 5 | prob2_sum=recover8Variation(D(9:16)); 6 | temp_sum2=prob2_sum; 7 | 8 | slices=size(temp_sum2,3); 9 | %sweep Z dimension 10 | for i=1:slices/2 11 | temp=temp_sum2(:,:,slices-i+1); 12 | temp_sum2(:,:,slices-i+1)=temp_sum2(:,:,i); 13 | temp_sum2(:,:,i)=temp; 14 | end 15 | prob2_sum=temp_sum2; 16 | prob=(prob1_sum+prob2_sum)/16; 17 | else 18 | prob=prob1_sum/d_size; 19 | end 20 | 21 | function sum=recover8Variation(x) 22 | prob=x{1}; 23 | average=zeros(size(prob)); 24 | for i = 1:8 25 | prob = x{i}; 26 | for j = 1:size(prob,3) 27 | p=squeeze(prob(:,:,j)); 28 | switch(i) 29 | case 1 30 | average(:,:,j) = average(:,:,j)+p(:,:,1); 31 | case 2 32 | average(:,:,j) = average(:,:,j) + flipdim(p(:,:,1),1); 33 | case 3 34 | average(:,:,j) = average(:,:,j) + flipdim(p(:,:,1),2); 35 | case 4 36 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1), -1); 37 | case 5 38 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1)); 39 | case 6 40 | average(:,:,j) = average(:,:,j) + flipdim(rot90(p(:,:,1),-1), 1); 41 | case 7 42 | average(:,:,j) = average(:,:,j) + flipdim(rot90(p(:,:,1),-1), 2); 43 | case 8 44 | average(:,:,j) = average(:,:,j) + rot90(p(:,:,1),2); 45 | end 46 | xxx=1; 47 | end 48 | sum=average; 49 | end 50 | -------------------------------------------------------------------------------- /scripts/SNEMI3D_metrics.m: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % SNEMI3D challenge: 3D segmentation of neurites in EM images 3 | % 4 | % Script to calculate the segmentation error between some 3D 5 | % original labels and their corresponding proposed labels. 6 | % 7 | % The evaluation metric is: 8 | % - Rand error: 1 - F-score of adapted Rand index 9 | % 10 | % author: Ignacio Arganda-Carreras (iarganda@mit.edu) 11 | % More information at http://brainiac.mit.edu/SNEMI3D 12 | % 13 | % This script released under the terms of the General Public 14 | % License in its latest edition. 15 | % 16 | % Input: 17 | % segA - ground truth (16-bit labels, 0 = background) 18 | % segB - proposed labels (16-bit labels, 0 = background) 19 | % Output: 20 | % re - adapated Rand error (1.0 - F-score of adapted Rand index) 21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 22 | function [re] = SNEMI3D_metrics( segA, segB ) 23 | 24 | segA = double(segA)+1; 25 | segB = double(segB)+1; 26 | n = numel(segA); 27 | 28 | n_labels_A = max(segA(:)); 29 | n_labels_B = max(segB(:)); 30 | 31 | % compute overlap matrix 32 | p_ij = sparse(segA(:),segB(:),1,n_labels_A,n_labels_B); 33 | 34 | % a_i 35 | a_i = sum(p_ij(2:end,:), 2); 36 | 37 | % b_j 38 | b_j = sum(p_ij(2:end,2:end), 1); 39 | 40 | p_i0 = p_ij(2:end,1); % pixels marked as BG in segB which are not BG in segA 41 | p_ij = p_ij(2:end,2:end); 42 | 43 | sumA = sum(a_i.*a_i); 44 | sumB = sum(b_j.*b_j) + sum(p_i0)/n; 45 | sumAB = sum(sum(p_ij.^2)) + sum(p_i0)/n; 46 | 47 | % Rand index 48 | %ri = full(1 - (sumA + sumB - 2*sumAB)/ n^2); 49 | 50 | % precision 51 | prec = sumAB / sumB; 52 | 53 | % recall 54 | rec = sumAB / sumA; 55 | 56 | % F-score 57 | fScore = 2.0 * prec * rec / (prec + rec); 58 | 59 | re = 1.0 - fScore; 60 | 61 | end 62 | 63 | -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/run_segmentation_train.m: -------------------------------------------------------------------------------- 1 | save_dir='../../hd5' 2 | data_dir='../../data'; 3 | data_dir='/tempspace/tzeng/snmes3d/data' 4 | raw_file ='snems3d_train_old.mat'; 5 | addpath('../../scripts') 6 | raw_file=[data_dir filesep raw_file]; 7 | load(raw_file); 8 | label=elm_labels{1}; 9 | lb=permute(label,[2,3,1]); 10 | 11 | mat_train_deconv_file =[data_dir filesep 'train_average8_10.mat']; 12 | mat_train_1fm_file=['../inception_ResNet_fcn_1fm_multiscale_classifier_1fm_2d/predict/ave_probs_train_iter_12000.mat']; 13 | mat_train_3fm_file=['inception_multiscale_3fm_1x3_3x1_enhanced_fulltrain/ave_probs_test_iter_14522.mat']; 14 | %train_stack_prob_dir ='predict_single/prob_train_iter_2000.mat'; 15 | mat_train_file ='predict/ave_probs_train_iter_8000.mat'; 16 | Td=load(mat_train_file); 17 | prob=1-Td.average; 18 | 19 | load(mat_train_deconv_file); 20 | deconv_prob=average; 21 | 22 | Td_1fm=load(mat_train_1fm_file); 23 | prob_1f=1-Td_1fm.average; 24 | prob_train=prob(:,:,21:100); 25 | prob=prob_train; 26 | %prob=max(prob_train); 27 | %prob=1-prob_train; 28 | 29 | 30 | %prob(:,:,1)=deconv_prob(:,:,1); 31 | %prob(:,:,100)=deconv_prob(:,:,100); 32 | th=0.086 33 | h = fspecial('Gaussian', [6 6], 1); 34 | prob_mask_th=0.8 35 | 36 | ths=[0.05:0.01:0.24]; 37 | lbs=label; 38 | lbs=permute(lbs,[2 3 1]); 39 | 40 | % parfor i=1:length(ths) 41 | % th=ths(i); 42 | % idx_ths(i)=th 43 | % L = watershed(imhmin(imfilter(prob, h), th),6); 44 | % ARD(i)=SNEMI3D_metrics(lb(:,:,21:100),L) 45 | % display(sprintf('watershed threshold = %d, metric = %d', th,ARD(i))); 46 | % end 47 | 48 | 49 | 50 | %L = watershed(imhmin(imfilter(prob, h), 0.086),6); 51 | % lbs=label; 52 | % lbs=permute(lbs,[2 3 1]); 53 | % display(sprintf('watershed threshold = %d, metric = %d', th, SNEMI3D_metrics(lbs,L))); 54 | % %[out_map,out_map_fill,L,ws]=watershed_post_processing(prob,'3d'); 55 | % display(sprintf('watershed threshold = %d, metric = %d', th, SNEMI3D_metrics(lbs,L))); 56 | % %display(sprintf('outmap threshold = %d, metric = %d', th, SNEMI3D_metrics(label,out_map))); -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/combinePredicctionSlice.m: -------------------------------------------------------------------------------- 1 | function prob=combinePredicctionSlice(folder_name, threshold) 2 | if ~exist(folder_name,'dir') 3 | disp(['Can''t find folder: ' folder_name]); 4 | return 5 | end 6 | a = dir([folder_name filesep '*.h5']); 7 | name_list = {}; 8 | len_list = []; 9 | names={a.name}; 10 | f_idx=[cellfun(@(x) x(end)=='5',names,'UniformOutput',false)]; 11 | 12 | %return 13 | % if strcmp(a(3).name , 'Thumbs.db') 14 | % a = a(4:end); 15 | % else 16 | % a = a(3:end); 17 | % end 18 | % for i = 1: length(a) 19 | % name_list{i,1} = a(i).name; 20 | % len_list = [ len_list; length(name_list{i,1})]; 21 | % end 22 | 23 | % [~, idx] = min(len_list); 24 | % name_temp = name_list{idx} 25 | 26 | f_idx=[f_idx{:}]; 27 | name_temp=names{f_idx} 28 | %names 29 | %return 30 | name_temp = name_temp(1:end-4) 31 | 32 | tiff_file_save = strcat(folder_name, filesep,'prediction_result.tif'); 33 | delete(tiff_file_save); 34 | mx=0; 35 | 36 | length(f_idx) 37 | for i = 0: length(f_idx)-1 38 | %i 39 | filename = strcat(folder_name, filesep, name_temp, num2str(i),'.h5') 40 | temp1 = hdf5info(filename); 41 | b = hdf5read(temp1.GroupHierarchy.Datasets); 42 | mx_s=max(size(size(b))); 43 | 44 | if mx_s==4 45 | b = squeeze(b(1,:,:,2)); 46 | elseif mx_s==3 47 | b = squeeze(b(:,:,2)); 48 | end 49 | c = zeros(size(b)); 50 | %minn=min(b(:)); 51 | %b=b-minn; 52 | mx_c=max(max(b)); 53 | if mx2 && save_h5_mat==True 79 | mat_file=[folder_name filesep 'prob.mat']; 80 | h5_file=[folder_name filesep 'prob.h5']; 81 | save(mat_file,'prob') 82 | 83 | p_details.location = '/'; 84 | p_details.Name = 'probabilities'; 85 | hdf5write(h5_file,p_details,prob); 86 | end 87 | 88 | 89 | -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/combinePredicctionSlice.m: -------------------------------------------------------------------------------- 1 | function prob=combinePredicctionSlice(folder_name, save_h5_mat, threshold) 2 | if ~exist(folder_name,'dir') 3 | disp(['Can''t find folder: ' folder_name]); 4 | return 5 | end 6 | a = dir([folder_name filesep '*.h5']); 7 | name_list = {}; 8 | len_list = []; 9 | names={a.name}; 10 | f_idx=[cellfun(@(x) x(end)=='5',names,'UniformOutput',false)]; 11 | 12 | %return 13 | % if strcmp(a(3).name , 'Thumbs.db') 14 | % a = a(4:end); 15 | % else 16 | % a = a(3:end); 17 | % end 18 | % for i = 1: length(a) 19 | % name_list{i,1} = a(i).name; 20 | % len_list = [ len_list; length(name_list{i,1})]; 21 | % end 22 | 23 | % [~, idx] = min(len_list); 24 | % name_temp = name_list{idx} 25 | 26 | f_idx=[f_idx{:}]; 27 | name_temp=names{f_idx} 28 | %names 29 | %return 30 | name_temp = name_temp(1:end-4) 31 | 32 | tiff_file_save = strcat(folder_name, filesep,'prediction_result.tif'); 33 | delete(tiff_file_save); 34 | mx=0; 35 | 36 | length(f_idx) 37 | for i = 0: length(f_idx)-1 38 | %i 39 | filename = strcat(folder_name, filesep, name_temp, num2str(i),'.h5') 40 | temp1 = hdf5info(filename); 41 | b = hdf5read(temp1.GroupHierarchy.Datasets); 42 | mx_s=max(size(size(b))); 43 | 44 | if mx_s==4 45 | b = squeeze(b(1,:,:,2)); 46 | elseif mx_s==3 47 | b = squeeze(b(:,:,2)); 48 | end 49 | c = zeros(size(b)); 50 | %minn=min(b(:)); 51 | %b=b-minn; 52 | mx_c=max(max(b)); 53 | if mx1 && save_h5_mat==true 79 | mat_file=[folder_name filesep 'prob.mat']; 80 | h5_file=[folder_name filesep 'prob.h5']; 81 | save(mat_file,'prob') 82 | 83 | p_details.location = '/'; 84 | p_details.Name = 'probabilities'; 85 | hdf5write(h5_file,p_details,prob); 86 | end 87 | 88 | 89 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/run_segmentation_on_valid_set.m: -------------------------------------------------------------------------------- 1 | save_dir='../../hd5' 2 | data_dir='../../data'; 3 | data_dir='/tempspace/tzeng/snmes3d/data' 4 | raw_file ='snems3d_train_old.mat'; 5 | raw_file=[data_dir filesep raw_file]; 6 | load(raw_file); 7 | label=elm_labels{1}; 8 | lb=permute(label,[2,3,1]); 9 | 10 | mat_train_deconv_file =[data_dir filesep 'train_average8_10.mat']; 11 | 12 | mat_train_file_1fm='../inception_ResNet_fcn_1fm_multiscale_classifier_1fm_2d/predict/ave_probs_train_iter_12000.mat' 13 | 14 | 15 | mat_train_file_5fm=['../inception_5m_multiscale_classfier_thin_1x3_3x1_v_label_fullstack_train/predict/ave_probs_train_iter_32000.mat']; 16 | 17 | mat_train_file_3fm=['../inception_multiscale_3fm_1x3_3x1_enhanced_fulltrain/predict/ave_probs_train_iter_14522.mat']; 18 | 19 | 20 | mat_train_file ='predict/ave_probs_train_iter_50000.mat'; 21 | Td=load(mat_train_file); 22 | prob=1-Td.average; 23 | 24 | 25 | load(mat_train_file_1fm); 26 | prob_train_1fm=1-average; 27 | 28 | load(mat_train_file_5fm); 29 | prob_train_5fm=1-average; 30 | 31 | load(mat_train_file_3fm); 32 | prob_train_3fm=1-average; 33 | 34 | %prob=max(max(max(prob_train_1fm,prob_train_5fm),prob),prob_train_3fm); 35 | prob=max(max(prob_train_5fm,prob),prob_train_3fm); 36 | %prob=1-prob_train; 37 | 38 | load(mat_train_deconv_file); 39 | deconv_prob=average; 40 | %prob(:,:,1)=deconv_prob(:,:,1); 41 | %prob(:,:,100)=deconv_prob(:,:,100); 42 | th=0.086 43 | h = fspecial('Gaussian', [6 6], 2); 44 | prob_mask_th=0.8 45 | 46 | % ths=[0.14:0.002:0.24]; 47 | % lbs=label; 48 | % lbs=permute(lbs,[2 3 1]); 49 | % parfor i=1:length(ths) 50 | % th=ths(i); 51 | % L = watershed(imhmin(imfilter(prob, h), th),6); 52 | % display(sprintf('watershed threshold = %d, metric = %d', th, SNEMI3D_metrics(lb,L))); 53 | % end 54 | 55 | 56 | 57 | hs=[0.05:0.1:1] 58 | ths=[0.14:0.02:2.4] 59 | parfor i=1:length(hs) 60 | h=fspecial('Gaussian', [6 6], hs(i)); 61 | B=zeros(size(prob)); 62 | B(prob>0.45)=1; 63 | for j=1:length(ths) 64 | th=ths(j); 65 | L = watershed(imhmin(imfilter(prob, h), th),6); 66 | display(sprintf('watershed threshold = %d, h = %d, metric = %d', th,hs(i), SNEMI3D_metrics(lb,L))); 67 | %L_Bf=watershed(imhmin(imfilter(B, h), th),6); 68 | %display(sprintf('watershed threshold = %d, h = %d, metric = %d', th,hs(i), SNEMI3D_metrics(lb,L_Bf))); 69 | end 70 | end 71 | 72 | 73 | 74 | 75 | %L = watershed(imhmin(imfilter(prob, h), 0.086),6); 76 | % lbs=label; 77 | % lbs=permute(lbs,[2 3 1]); 78 | % display(sprintf('watershed threshold = %d, metric = %d', th, SNEMI3D_metrics(lbs,L))); 79 | % %[out_map,out_map_fill,L,ws]=watershed_post_processing(prob,'3d'); 80 | % display(sprintf('watershed threshold = %d, metric = %d', th, SNEMI3D_metrics(lbs,L))); 81 | % %display(sprintf('outmap threshold = %d, metric = %d', th, SNEMI3D_metrics(label,out_map))); -------------------------------------------------------------------------------- /scripts/augment_data.m: -------------------------------------------------------------------------------- 1 | function [D,L]=augment_data(x,y) 2 | [data1,label1]=create8Variation(x,y); 3 | 4 | slices=size(x,1); 5 | %sweep Z dimension 6 | for i=1:slices/2 7 | temp_x=x(slices-i+1,:,:); 8 | x(slices-i+1,:,:)=x(i,:,:); 9 | x(i,:,:)=temp_x; 10 | 11 | temp_y=y(slices-i+1,:,:); 12 | y(slices-i+1,:,:)=y(i,:,:); 13 | y(i,:,:)=temp_y; 14 | end 15 | [data2,label2]=create8Variation(x,y); 16 | 17 | D=[data1 data2]; 18 | L=[label1 label2]; 19 | end 20 | 21 | function [D,L]=create8Variation(x,y) 22 | for j = 1:8 23 | for i = 1:size(x,1) 24 | original = squeeze(x(i,:,:)); 25 | lb = squeeze(y(i,:,:)); 26 | switch(j) 27 | case 1 28 | case 2 29 | original = flipdim(original,1); 30 | lb = flipdim(lb,1); 31 | case 3 32 | original = flipdim(original,2); 33 | lb = flipdim(lb,2); 34 | case 4 35 | original = rot90(original); 36 | lb = rot90(lb); 37 | case 5 38 | original = rot90(original, -1); 39 | lb = rot90(lb, -1); 40 | case 6 41 | original = rot90(flipdim(original, 1)); 42 | lb = rot90(flipdim(lb, 1)); 43 | case 7 44 | original = rot90(flipdim(original,2)); 45 | lb = rot90(flipdim(lb,2)); 46 | case 8 47 | original = rot90(original, 2); 48 | lb = rot90(lb, 2); 49 | end 50 | data(i,:,:) = original; 51 | label(i,:,:) =lb; 52 | %elm_labels(i,:,:) = label; 53 | end 54 | D{j} = data; 55 | L{j}=label; 56 | clear data; 57 | clear label; 58 | end 59 | end 60 | 61 | 62 | function [D,L,Seg_L]=create8Variation_withSeglabel(x,y,segY) 63 | for j = 1:8 64 | for i = 1:size(x,1) 65 | original = squeeze(x(i,:,:)); 66 | lb = squeeze(y(i,:,:)); 67 | seg_lb= squeeze(segY(i,:,:)); 68 | switch(j) 69 | case 1 70 | case 2 71 | original = flipdim(original,1); 72 | lb = flipdim(lb,1); 73 | seg_lb=flipdim(seg_lb,1) 74 | case 3 75 | original = flipdim(original,2); 76 | lb = flipdim(lb,2); 77 | seg_lb = flipdim(seg_lb,2); 78 | case 4 79 | original = rot90(original); 80 | lb = rot90(lb); 81 | seg_lb = rot90(seg_lb); 82 | case 5 83 | original = rot90(original, -1); 84 | lb = rot90(lb, -1); 85 | seg_lb = rot90(seg_lb, -1); 86 | case 6 87 | original = rot90(flipdim(original, 1)); 88 | lb = rot90(flipdim(lb, 1)); 89 | seg_lb = rot90(flipdim(seg_lb, 1)); 90 | case 7 91 | original = rot90(flipdim(original,2)); 92 | lb = rot90(flipdim(lb,2)); 93 | seg_lb = rot90(flipdim(seg_lb, 2)); 94 | case 8 95 | original = rot90(original, 2); 96 | lb = rot90(lb, 2); 97 | seg_lb = rot90(seg_lb, 2); 98 | end 99 | data(i,:,:) = original; 100 | label(i,:,:) =lb; 101 | seg_label(i,:,:)=seg_lb 102 | %elm_labels(i,:,:) = label; 103 | end 104 | D{j} = data; 105 | L{j}=label; 106 | Seg_L{j}=seg_label; 107 | clear data; 108 | clear label; 109 | clear Seg_L; 110 | end 111 | end -------------------------------------------------------------------------------- /model/inception_resiudal_train_prediciton_5fm/run_segmentation_test.m: -------------------------------------------------------------------------------- 1 | addpath('../../scripts/'); 2 | hd_dir='../../hd5' 3 | data_dir='../../data' 4 | 5 | hd5_raw_image_file ='snemi3d_test_v1.h5'; 6 | hd5_raw_image_file =[data_dir filesep hd5_raw_image_file]; 7 | Raw_img=h5read(hd5_raw_image_file,'/data'); 8 | Raw_img=permute(Raw_img,[2 3 1]); 9 | mat_test_file ='predict/ave_probs_test_iter_32000.mat'; 10 | mat_test_deconv_file =[data_dir filesep 'test_average8_10.mat']; 11 | mat_test_1fm_file=['../inception_ResNet_fcn_1fm_multiscale_classifier_1fm_2d/predict/ave_probs_test_iter_30000.mat']; 12 | mat_test_3fm_file=['../inception_multiscale_3fm_1x3_3x1_enhanced_fulltrain/predict/ave_probs_test_iter_14522.mat']; 13 | h_fill = fspecial('Gaussian', [12 12], 8); 14 | load(mat_test_deconv_file); 15 | deconv_prob_test=average; 16 | load(mat_test_file); 17 | prob_test=1-average; 18 | load(mat_test_1fm_file); 19 | prob_test_1fm=1-average; 20 | 21 | load(mat_test_3fm_file); 22 | prob_test_3fm=1-average; 23 | fix_slice_num =[1:100]; 24 | prob_test(:,:,fix_slice_num)=max(max(prob_test(:,:,fix_slice_num),prob_test_1fm(:,:,fix_slice_num)),prob_test_3fm(:,:,fix_slice_num)); 25 | prob_test(:,:,30)=prob_test(:,:,29)*0.5+prob_test(:,:,31)*0.5; 26 | h= fspecial('Gaussian', [12 12], 8); 27 | imhm_th_3d=0.23; 28 | L = watershed(imhmin(imfilter(prob_test, h), imhm_th_3d),6); 29 | %prob_deconv_merge=max(deconv_prob_test,prob_test); 30 | %L 31 | 32 | % figure,imshow(label2rgb(L(:,:,30))) 33 | 34 | 35 | 36 | % disp('merge region on 3d watershed ...') 37 | %merge_iters=20; 38 | %L_merge=merge_seg2(L,merge_iters); 39 | 40 | 41 | disp('fill background and remove 0 on 3d watershed ...') 42 | %L_fill_merge=L_merge; 43 | L_fill_merge=L; 44 | prob_mask_th=0.80; 45 | %L_fill_merge=L; 46 | L_fill_merge(find(deconv_prob_test>=prob_mask_th)) = 0; 47 | 48 | L_fill_merge=double(L_fill_merge); 49 | parfor i=1:size(L_fill_merge,3) 50 | disp(['disp ' num2str(i)]) 51 | f = full_fill(L_fill_merge(:,:,i)); 52 | out_map_test(:,:,i)=f; 53 | end 54 | 55 | 56 | % run 2d binary watershed on prob map and merge slice based on 3d watershed 57 | % D=prob_test; 58 | % D(prob_test>=0.6)=1; 59 | % D(prob_test<0.6)=0; 60 | % L_2d=watershed(D,8) % 8 specify 2d Watershed. 61 | % L_2d=gpuArray(L_2d); 62 | % num_2d_reg=size(unique(L_2d)) 63 | % for i=1:num_2d_reg-1 64 | % idx=(L_2d==i); 65 | % %idx_array=L_18(idx); 66 | % L_2d(idx)=mode(out_map(idx)); 67 | % %uq=unique(idx_array); 68 | % disp(['process ' num2str(i)]) 69 | % end 70 | % L_f=gather(L_2d); 71 | 72 | %make_submit_tiff(out_map_test,'three_5fm3fm1fm_thin_fuse_th023_filter_12_12_8_outmap') 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | %write_label2rgb_image(out_map,Raw_img,'incep_1x3_thin_boundary_out_map_w3d_th0175_mask095'); 90 | 91 | 92 | 93 | % make_submit_tiff(L_f,'iter_22000_2d_3d_ws_fuse_outmap') 94 | write_label2rgb_image(out_map_test,Raw_img,'three_5fm3fm1fm_thin_fuse_th023_filter_12_12_8_outmap'); 95 | write_label2rgb_image(L); 96 | 97 | 98 | %display(sprintf('watershed threshold = %d, metric = %d', th, SNEMI3D_metrics(label,ws))); 99 | %display(sprintf('outmap threshold = %d, metric = %d', th, SNEMI3D_metrics(label,out_map))); 100 | %[out_map,L,ws]=watershed_post_processing(prob,'3d'); -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/run_segmentation_on_test_sets.m: -------------------------------------------------------------------------------- 1 | addpath('../../scripts/'); 2 | hd_dir='../../hd5' 3 | data_dir='../../data' 4 | 5 | hd5_raw_image_file ='snemi3d_test_v1.h5'; 6 | hd5_raw_image_file =[data_dir filesep hd5_raw_image_file]; 7 | Raw_img=h5read(hd5_raw_image_file,'/data'); 8 | Raw_img=permute(Raw_img,[2 3 1]); 9 | 10 | mat_test_file ='predict/ave_probs_test_iter_50000.mat'; 11 | mat_test_deconv_file =[data_dir filesep 'test_average8_10.mat']; 12 | 13 | mat_test_file_1fm='../inception_ResNet_fcn_1fm_multiscale_classifier_1fm_2d/predict/ave_probs_test_iter_50000.mat' 14 | 15 | mat_test_file_5fm=['../inception_5m_multiscale_classfier_thin_1x3_3x1_v_label_fullstack_train/predict/ave_probs_test_iter_50000.mat']; 16 | 17 | mat_test_file_3fm=['../inception_multiscale_3fm_1x3_3x1_enhanced_fulltrain/predict/ave_probs_test_iter_48000.mat']; 18 | 19 | mat_test_3x3filter_5fm_file=['../inception_5fm_multiscale_classfiier_enhanced3x3_fullstak_train/predict/ave_probs_test_iter_50000.mat']; 20 | 21 | mat_test_file_fullstack_test_fm=['../inception_5fm_multiscale_classfiier_enhanced_fullstak_train/predict/ave_probs_test_iter_46394.mat']; 22 | 23 | 24 | 25 | 26 | %load(mat_test_deconv_file); 27 | %deconv_prob_test=average; 28 | 29 | 30 | 31 | load(mat_test_file_1fm); 32 | prob_test_1fm=1-average; 33 | 34 | load(mat_test_file_5fm); 35 | prob_test_5fm=1-average; 36 | 37 | load(mat_test_file_3fm); 38 | prob_test_3fm=1-average; 39 | 40 | 41 | load(mat_test_file); 42 | prob_test3x3=1-average; 43 | 44 | 45 | load(mat_test_file_fullstack_test_fm); 46 | prob_test_full=1-average; 47 | 48 | 49 | load(mat_test_3x3filter_5fm_file); 50 | prob_test_3x3_5fm=1-average; 51 | 52 | 53 | 54 | %% =============== ensemble models ================================= 55 | prob_test=max(max(prob_test_1fm,prob_test_5fm),prob_test_3fm); 56 | prob_test_full(:,:,1)=max(prob_test(:,:,1),prob_test_3x3_5fm(:,:,1)); 57 | prob_test_full(:,:,2:99)=max(prob_test_1fm(:,:,2:99),prob_test_full(:,:,2:99)); 58 | prob_test(:,:,1:10)=prob_test_full(:,:,1:10); 59 | prob_test(:,:,30)=max(max(prob_test(:,:,29),prob_test(:,:,31)),prob_test(:,:,31)); 60 | prob_test(:,:,31:34)=max(prob_test_1fm(:,:,31:34),prob_test_full(:,:,31:34)); 61 | prob_test(:,:,60:63)=max(prob_test_full(:,:,60:63),prob_test_1fm(:,:,60:63)); 62 | 63 | 64 | 65 | %% =============== perform 3D watershed ================================= 66 | h= fspecial('Gaussian', [6 6], 0.105); 67 | imhm_th_3d=0.19; 68 | prob_mask_th=0.8; 69 | disp('3D watershed ...') 70 | L = watershed(imhmin(imfilter(prob_test, h), imhm_th_3d),6); 71 | 72 | 73 | 74 | L_fill_merge=L; 75 | L_fill_merge(deconv_prob_test>=prob_mask_th) = 0; 76 | L_fill_merge=double(L_fill_merge); 77 | parfor i=1:size(L_fill_merge,3) 78 | disp(['disp ' num2str(i)]) 79 | f = full_fill(L_fill_merge(:,:,i)); 80 | out_map(:,:,i)=f; 81 | end 82 | 83 | %% ============================== Make submission Files =============================== 84 | make_submit_tiff(out_map,'iter_50000_1fm3fm5fm_correctByFull_outmap_th019_slice1') 85 | write_label2rgb_image(out_map,Raw_img,'segmentation_test_iter_50000_1fm3fm5fm_correctByFull_outmap_th019_slice_1') 86 | 87 | %make_submit_tiff(out_map,'Best_iter_50000_1fm3fm5fm_correctByFull_outmap_th020_rindx0064745019') 88 | %write_label2rgb_image(out_map,Raw_img,'Best_segmentation_test_iter_50000_1fm3fm5fm_correctByFull_outmap_th020_rindx0064745019'); 89 | % write_label2rgb_image(L); 90 | 91 | -------------------------------------------------------------------------------- /scripts/read_data_write_data_with_enhanced_labels.m: -------------------------------------------------------------------------------- 1 | if ~ispc 2 | train_dir='../images/train-input'; 3 | train_lb_dir='../images/train-label2d_widen'; 4 | save_dir ='../data'; 5 | test_dir='../images/test-input'; 6 | else 7 | train_dir='..\images\train-input'; 8 | train_lb_dir='..\images\train-label2d_widen'; 9 | test_dir='..\images\test-input'; 10 | save_dir ='..\data'; 11 | end 12 | d_details.location = '/'; 13 | d_details.Name = 'data'; 14 | l_details.location = '/'; 15 | l_details.Name = 'label'; 16 | 17 | d_file_name_base='train-input_slice_'; 18 | l_file_name_base='label2d_widen_'; 19 | 20 | 21 | enhanced_label_file='../data/vertical_enhanced_thin_label3x3.h5'; 22 | 23 | enhanced_label = hdf5read(enhanced_label_file,'/labels'); 24 | size(enhanced_label) 25 | enhanced_label=permute(enhanced_label,[3 1 2]); 26 | 27 | 28 | %%-----------------------train full 100 stacks ----------------------------------- 29 | data_arr=zeros(100,1024,1024); 30 | elm_labels_arr=zeros(100,1024,1024); 31 | for i=1:100 32 | d_filename= [d_file_name_base num2str(i) '.tif']; 33 | d_file_full_name=[train_dir filesep d_filename]; 34 | tr_img_data=imread(d_file_full_name); 35 | data_arr(i,:,:)=tr_img_data; 36 | 37 | l_filename= [l_file_name_base num2str(i) '.tif']; 38 | l_file_full_name=[train_lb_dir filesep l_filename]; 39 | tr_img_lb_data=imread(l_file_full_name); 40 | elm_labels_arr(i,:,:)=tr_img_lb_data; 41 | disp(['reading ' num2str(i) ' images ...' ]); 42 | 43 | end 44 | %elm_labels_arr(elm_labels_arr==255)=1; 45 | 46 | 47 | 48 | elm_labels_arr=enhanced_label; 49 | data{1}=data_arr; 50 | elm_labels{1}=elm_labels_arr; 51 | 52 | 53 | 54 | size(data_arr) 55 | d_tr =single(data_arr); 56 | l_tr =single(elm_labels_arr); 57 | 58 | figure,imshow(uint8(squeeze(d_tr(50,:,:)))) 59 | figure,imshow(squeeze(l_tr(50,:,:))) 60 | 61 | [data,labels]=augment_data(d_tr,l_tr) 62 | for i=1:length(data) 63 | d_tr=data{i}; 64 | l_tr=labels{i}; 65 | hdf5write([save_dir filesep 'snemi3d_train_full_stacks_v' num2str(i) '.h5'],d_details,d_tr,l_details,l_tr); 66 | end 67 | 68 | savefile=[save_dir filesep 'snemi3d_train_full_stacks_v16.mat']; 69 | save(savefile,'data','labels','-v7.3'); 70 | 71 | %--------------------------------------------------------------------------------------------- 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | % %----------first 20 as valid stacks ------------------------------------------------------- 81 | 82 | 83 | data_arr=zeros(20,1024,1024); 84 | elm_labels_arr=zeros(20,1024,1024); 85 | for i=1:20 86 | d_filename= [d_file_name_base num2str(i) '.tif']; 87 | d_file_full_name=[train_dir filesep d_filename]; 88 | tr_img_data=imread(d_file_full_name); 89 | data_arr(i,:,:)=tr_img_data; 90 | 91 | l_filename= [l_file_name_base num2str(i) '.tif']; 92 | l_file_full_name=[train_lb_dir filesep l_filename]; 93 | tr_img_lb_data=imread(l_file_full_name); 94 | elm_labels_arr(i,:,:)=tr_img_lb_data; 95 | disp(['reading ' num2str(i) ' images ...' ]); 96 | 97 | end 98 | %elm_labels_arr(elm_labels_arr==255)=1; 99 | elm_labels_arr=enhanced_label(1:20,:,:); 100 | data{1}=data_arr; 101 | elm_labels{1}=elm_labels_arr; 102 | 103 | 104 | d_tr =single(data_arr); 105 | l_tr =single(elm_labels_arr); 106 | 107 | [data,labels]=augment_data(d_tr,l_tr) 108 | for i=1:length(data) 109 | d_tr=data{i}; 110 | l_tr=labels{i}; 111 | hdf5write([save_dir filesep 'snemi3d_valid_v' num2str(i) '.h5'],d_details,d_tr,l_details,l_tr); 112 | end 113 | 114 | savefile=[save_dir filesep 'snemi3d_valid_v8.mat']; 115 | save(savefile,'data','labels','-v7.3'); 116 | 117 | 118 | clear data_arr 119 | clear elm_labels_arr 120 | 121 | data_arr=zeros(80,1024,1024); 122 | elm_labels_arr=zeros(80,1024,1024); 123 | for i=21:100 124 | d_filename= [d_file_name_base num2str(i) '.tif']; 125 | d_file_full_name=[train_dir filesep d_filename]; 126 | tr_img_data=imread(d_file_full_name); 127 | data_arr(i-20,:,:)=tr_img_data; 128 | 129 | l_filename= [l_file_name_base num2str(i) '.tif']; 130 | l_file_full_name=[train_lb_dir filesep l_filename]; 131 | tr_img_lb_data=imread(l_file_full_name); 132 | elm_labels_arr(i-20,:,:)=tr_img_lb_data; 133 | disp(['reading ' num2str(i) ' images ...' ]); 134 | 135 | end 136 | %elm_labels_arr(elm_labels_arr==255)=1; 137 | elm_labels_arr=enhanced_label(21:end,:,:); 138 | %data{1}=data_arr; 139 | elm_labels{1}=elm_labels_arr; 140 | %savefile=[save_dir filesep 'snemi3d_train.mat']; 141 | %save(savefile,'data','elm_labels','-v7.3'); 142 | d_te =single(data_arr); 143 | l_te =single(elm_labels_arr); 144 | 145 | 146 | [data,labels]=augment_data(d_te,l_te) 147 | for i=1:length(data) 148 | d_te=data{i}; 149 | l_te=labels{i}; 150 | hdf5write([save_dir filesep 'snemi3d_train_v' num2str(i) '.h5'],d_details,d_te,l_details,l_te); 151 | end 152 | 153 | 154 | savefile=[save_dir filesep 'snemi3d_train_v8.mat']; 155 | save(savefile,'data','labels','-v7.3'); 156 | 157 | 158 | 159 | clear data_arr 160 | data_arr=zeros(10,1024,1024); 161 | 162 | 163 | 164 | %----------------- small test 10 slice ------------------------------------ 165 | d_file_name_base='test-input_slice_'; 166 | for i=90:100 167 | d_filename= [d_file_name_base num2str(i) '.tif']; 168 | d_file_full_name=[test_dir filesep d_filename]; 169 | te_img_data=imread(d_file_full_name); 170 | data_arr(i-89,:,:)=te_img_data; 171 | disp(['reading ' num2str(i) ' testing images ...' ]); 172 | 173 | end 174 | 175 | %data{1}=data_arr; 176 | %savefile=[save_dir filesep 'snemi3d_test.mat']; 177 | %save(savefile,'data','-v7.3'); 178 | d_te =single(data_arr); 179 | 180 | l_te =single(elm_labels_arr); 181 | 182 | 183 | [data,labels]=augment_data(d_te,l_te) 184 | for i=1:length(data) 185 | d_te=data{i}; 186 | l_te=labels{i}; 187 | hdf5write([save_dir filesep 'snemi3d_test_last10slice_v' num2str(i) '.h5'],d_details,d_te); 188 | end 189 | 190 | savefile=[save_dir filesep 'snemi3d_test_last10slice_v8.mat']; 191 | save(savefile,'data','-v7.3'); 192 | %hdf5write([save_dir filesep 'snemi3d_test_last10slice.h5'],d_details,d_te); 193 | 194 | 195 | 196 | clear data_arr 197 | data_arr=zeros(100,1024,1024); 198 | elm_labels_arr=zeros(100,1024,1024); 199 | %----------------- full test slice ------------------------------------ 200 | d_file_name_base='test-input_slice_'; 201 | for i=1:100 202 | d_filename= [d_file_name_base num2str(i) '.tif']; 203 | d_file_full_name=[test_dir filesep d_filename]; 204 | te_img_data=imread(d_file_full_name); 205 | data_arr(i,:,:)=te_img_data; 206 | disp(['reading ' num2str(i) ' testing images ...' ]); 207 | 208 | end 209 | 210 | %data{1}=data_arr; 211 | %savefile=[save_dir filesep 'snemi3d_test.mat']; 212 | %save(savefile,'data','-v7.3'); 213 | elm_labels_arr=enhanced_label; 214 | d_te =single(data_arr); 215 | 216 | l_te =single(elm_labels_arr); 217 | 218 | 219 | [data,labels]=augment_data(d_te,l_te); 220 | for i=1:length(data) 221 | d_te=data{i}; 222 | %l_te=labels{i}; 223 | disp(['writing ' num2str(i) ' file ...']); 224 | hdf5write([save_dir filesep 'snemi3d_test_v' num2str(i) '.h5'],d_details,d_te); 225 | end 226 | 227 | savefile=[save_dir filesep 'snemi3d_test_v8.mat']; 228 | save(savefile,'data','-v7.3'); 229 | %hdf5write([save_dir filesep 'snemi3d_test_last10slice.h5'],d_details,d_te); 230 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "BIG_N_INCEPTION_FCN" 2 | input: "data" 3 | input_shape{ 4 | dim: 1 5 | dim: 1 6 | dim: 1024 7 | dim: 1024 8 | dim: 1 9 | } 10 | 11 | layer { 12 | name: "conv1_1" 13 | type: "Convolution" 14 | bottom: "data" 15 | top: "conv1_1" 16 | param {lr_mult: 1} param {lr_mult: 2} 17 | convolution_param { 18 | num_output: 32 19 | kernel_size: 3 kernel_size: 3 kernel_size: 1 20 | stride: 2 stride: 2 stride: 1 21 | pad:1 pad:1 pad:0 22 | weight_filler { type: "gaussian" std: 0.01} 23 | bias_filler {type: "constant" value: 0} 24 | } 25 | } 26 | 27 | 28 | layer { 29 | bottom: "conv1_1" 30 | top: "conv1_1" 31 | name: "bn_conv1_1" 32 | type: "BatchNorm" 33 | } 34 | 35 | layer { 36 | bottom: "conv1_1" 37 | top: "conv1_1" 38 | name: "scale_conv1_1" 39 | type: "Scale" 40 | scale_param { 41 | bias_term: true 42 | } 43 | } 44 | layer { 45 | name: "relu1_1" 46 | type: "ReLU" 47 | bottom: "conv1_1" 48 | top: "conv1_1" 49 | } 50 | 51 | layer { 52 | name: "conv1_2" 53 | type: "Convolution" 54 | bottom: "conv1_1" 55 | top: "conv1_2" 56 | param {lr_mult: 1} param {lr_mult: 2} 57 | phase:PREDICT 58 | convolution_param { 59 | num_output: 64 60 | kernel_size: 3 kernel_size: 3 kernel_size: 1 61 | stride: 1 62 | pad:1 pad:1 pad:0 63 | weight_filler { type: "gaussian" std: 0.01} 64 | bias_filler {type: "constant" value: 0} 65 | } 66 | } 67 | 68 | layer { 69 | bottom: "conv1_2" top: "conv1_2" name: "bn_conv1_2" type: "BatchNorm" 70 | # # batch_norm_param {use_global_stats: true} 71 | } 72 | 73 | layer { 74 | bottom: "conv1_2" top: "conv1_2" name: "scale_conv1_2" type: "Scale" 75 | scale_param { bias_term: true} 76 | } 77 | layer { 78 | name: "relu1_2" type: "ReLU" bottom: "conv1_2" top: "conv1_2" 79 | } 80 | 81 | 82 | layer { 83 | name: "reshape" 84 | type: "Reshape" 85 | bottom: "conv1_2" 86 | top: "conv1_2" 87 | reshape_param { shape: {dim: 0 dim: 0 dim: 0 dim: 0 } } 88 | } 89 | 90 | 91 | # layer { 92 | # name: "pool1" 93 | # type: "Pooling" 94 | # bottom: "conv1_2" 95 | # top: "pool1" 96 | # top: "pool1_mask" 97 | # pooling_param { 98 | # pool: MAX 99 | # kernel_size: 2 100 | # stride: 2 101 | # } 102 | # } 103 | 104 | 105 | layer { 106 | name: "conv2_1b" 107 | type: "Convolution" 108 | bottom: "conv1_2" 109 | top: "conv2_1b" 110 | param {lr_mult: 1} param {lr_mult: 2} 111 | convolution_param { 112 | num_output:64 113 | kernel_size: 1 114 | stride: 1 pad:0 115 | weight_filler { type: "xavier" } 116 | bias_filler { type: "constant" value: 0 } 117 | } 118 | } 119 | layer { 120 | bottom: "conv2_1b" top: "conv2_1b" name: "bn_conv2_1b" type: "BatchNorm" 121 | # # batch_norm_param {use_global_stats: true} 122 | } 123 | 124 | layer { 125 | bottom: "conv2_1b" top: "conv2_1b" name: "scale_conv2_1b" type: "Scale" 126 | scale_param { bias_term: true} 127 | } 128 | layer { 129 | name: "relu2_1b" type: "ReLU" bottom: "conv2_1b" top: "conv2_1b" 130 | } 131 | 132 | 133 | 134 | layer { 135 | name: "conv2_1b_3x3" 136 | type: "Convolution" 137 | bottom: "conv2_1b" 138 | top: "conv2_1b_3x3" 139 | param {lr_mult: 1} param {lr_mult: 2} 140 | phase:PREDICT 141 | convolution_param { 142 | num_output: 96 143 | kernel_size: 3 144 | stride: 1 pad:1 145 | weight_filler { type: "xavier" } 146 | bias_filler { type: "constant" value: 0 } 147 | } 148 | } 149 | layer { 150 | bottom: "conv2_1b_3x3" top: "conv2_1b_3x3" name: "bn_conv2_1b_3x3" type: "BatchNorm" 151 | # # batch_norm_param {use_global_stats: true} 152 | } 153 | 154 | layer { 155 | bottom: "conv2_1b_3x3" top: "conv2_1b_3x3" name: "scale_conv2_1b_3x3" type: "Scale" 156 | scale_param { bias_term: true} 157 | } 158 | # layer { 159 | # name: "relu2_1b_3x3" type: "ReLU" bottom: "conv2_1b_3x3" top: "conv2_1b_3x3" 160 | # } 161 | 162 | 163 | 164 | layer { 165 | name: "conv2_1x1" 166 | type: "Convolution" 167 | bottom: "conv1_2" 168 | top: "conv2_1x1" 169 | param {lr_mult: 1} param {lr_mult: 2} 170 | phase:PREDICT 171 | convolution_param { 172 | num_output:64 173 | kernel_size: 1 174 | stride: 1 pad:0 175 | weight_filler { type: "xavier" } 176 | bias_filler { type: "constant" value: 0 } 177 | } 178 | } 179 | layer { 180 | bottom: "conv2_1x1" top: "conv2_1x1" name: "bn_conv2_1x1" type: "BatchNorm" 181 | } 182 | 183 | layer { 184 | bottom: "conv2_1x1" top: "conv2_1x1" name: "scale_conv2_1x1" type: "Scale" 185 | scale_param { bias_term: true} 186 | } 187 | layer { 188 | name: "relu2_1x1" type: "ReLU" bottom: "conv2_1x1" top: "conv2_1x1" 189 | } 190 | 191 | 192 | 193 | layer { 194 | name: "conv2_1x7" 195 | type: "Convolution" 196 | bottom: "conv2_1x1" 197 | top: "conv2_1x7" 198 | param {lr_mult: 1} param {lr_mult: 2} 199 | phase:PREDICT 200 | convolution_param { 201 | num_output:64 202 | kernel_w: 1 kernel_h: 7 203 | stride: 1 pad_w:0 pad_h:3 204 | weight_filler { type: "xavier" } 205 | bias_filler { type: "constant" value: 0 } 206 | } 207 | } 208 | layer { 209 | bottom: "conv2_1x7" top: "conv2_1x7" name: "bn_conv2_1x7" type: "BatchNorm" 210 | # # batch_norm_param {use_global_stats: true} 211 | } 212 | 213 | layer { 214 | bottom: "conv2_1x7" top: "conv2_1x7" name: "scale_conv2_1x7" type: "Scale" 215 | scale_param { bias_term: true} 216 | } 217 | layer { 218 | name: "relu2_1x7" type: "ReLU" bottom: "conv2_1x7" top: "conv2_1x7" 219 | } 220 | 221 | 222 | layer { 223 | name: "conv2_7x1" 224 | type: "Convolution" 225 | bottom: "conv2_1x7" 226 | top: "conv2_7x1" 227 | param {lr_mult: 1} param {lr_mult: 2} 228 | phase:PREDICT 229 | convolution_param { 230 | num_output:64 231 | kernel_w: 7 kernel_h: 1 232 | stride: 1 pad_w:3 pad_h:0 233 | weight_filler { type: "xavier" } 234 | bias_filler { type: "constant" value: 0 } 235 | } 236 | } 237 | layer { 238 | bottom: "conv2_7x1" top: "conv2_7x1" name: "bn_conv2_7x1" type: "BatchNorm" 239 | # # batch_norm_param {use_global_stats: true} 240 | } 241 | 242 | layer { 243 | bottom: "conv2_7x1" top: "conv2_7x1" name: "scale_conv2_7x1" type: "Scale" 244 | scale_param { bias_term: true} 245 | } 246 | layer { 247 | name: "relu2_7x1" type: "ReLU" bottom: "conv2_7x1" top: "conv2_7x1" 248 | } 249 | 250 | 251 | 252 | layer { 253 | name: "conv2_3x3" 254 | type: "Convolution" 255 | bottom: "conv2_7x1" 256 | top: "conv2_3x3" 257 | param {lr_mult: 1} param {lr_mult: 2} 258 | phase:PREDICT 259 | convolution_param { 260 | num_output:96 261 | kernel_size: 3 262 | stride: 1 pad:1 263 | weight_filler { type: "xavier" } 264 | bias_filler { type: "constant" value: 0 } 265 | } 266 | } 267 | layer { 268 | bottom: "conv2_3x3" top: "conv2_3x3" name: "bn_conv2_3x3" type: "BatchNorm" 269 | # # batch_norm_param {use_global_stats: true} 270 | } 271 | 272 | layer { 273 | bottom: "conv2_3x3" top: "conv2_3x3" name: "scale_conv2_3x3" type: "Scale" 274 | scale_param { bias_term: true} 275 | } 276 | layer { 277 | name: "relu2_3x3" type: "ReLU" bottom: "conv2_3x3" top: "conv2_3x3" 278 | } 279 | 280 | 281 | layer{ 282 | name: "concat_stem_1" 283 | type: "Concat" 284 | bottom:"conv2_1b_3x3" 285 | bottom: "conv2_3x3" 286 | top: "concat_stem_1" 287 | } 288 | 289 | 290 | layer { 291 | name: "stem_concat_conv_3x3" 292 | type: "Convolution" 293 | bottom: "concat_stem_1" 294 | top: "stem_concat_conv_3x3" 295 | param {lr_mult: 1} param {lr_mult: 2} 296 | #phase:PREDICT 297 | convolution_param { 298 | num_output:192 299 | kernel_size: 3 300 | stride: 2 pad:1 301 | weight_filler { type: "xavier" } 302 | bias_filler { type: "constant" value: 0 } 303 | } 304 | } 305 | layer { 306 | bottom: "stem_concat_conv_3x3" top: "stem_concat_conv_3x3" name: "bn_stem_concat_conv_3x3" type: "BatchNorm" 307 | } 308 | 309 | layer { 310 | bottom: "stem_concat_conv_3x3" top: "stem_concat_conv_3x3" name: "scale_stem_concat_conv_3x3" type: "Scale" 311 | scale_param { bias_term: true} 312 | } 313 | layer { 314 | name: "relu_stem_concat_conv_3x3" type: "ReLU" bottom: "stem_concat_conv_3x3" top: "stem_concat_conv_3x3" 315 | } 316 | 317 | layer { 318 | name: "pool_stem_concat" 319 | type: "Pooling" 320 | bottom: "concat_stem_1" 321 | top: "pool_stem_concat" 322 | #phase:PREDICT 323 | #top: "pool2_mask" 324 | pooling_param { 325 | pool: MAX 326 | kernel_size: 3 327 | stride: 2 328 | } 329 | } 330 | 331 | layer{ 332 | name: "concat_stem_2" 333 | type: "Concat" 334 | bottom:"pool_stem_concat" 335 | bottom: "stem_concat_conv_3x3" 336 | top: "concat_stem_2" 337 | } 338 | 339 | 340 | 341 | #---------------------------inception_Res_A-------------------------------------- 342 | layer { 343 | name: "conv3_1b" 344 | type: "Convolution" 345 | bottom: "concat_stem_2" 346 | top: "conv3_1b" 347 | param {lr_mult: 1} param {lr_mult: 2} 348 | convolution_param { 349 | num_output: 384 350 | kernel_size: 1 351 | stride: 1 pad:0 352 | weight_filler { type: "xavier" } 353 | bias_filler { type: "constant" value: 0 } 354 | } 355 | } 356 | layer { 357 | bottom: "conv3_1b" top: "conv3_1b" name: "bn_conv3_1b" type: "BatchNorm" 358 | # batch_norm_param {use_global_stats: true} 359 | } 360 | 361 | layer { 362 | bottom: "conv3_1b" top: "conv3_1b" name: "scale_conv3_1b" type: "Scale" 363 | scale_param { bias_term: true} 364 | } 365 | layer { 366 | name: "relu3_1b" type: "ReLU" bottom: "conv3_1b" top: "conv3_1b" 367 | } 368 | #-------a_1x1---- 369 | layer { 370 | name: "ira_A_1_conv1x1" 371 | type: "Convolution" 372 | bottom: "conv3_1b" 373 | top: "ira_A_1_conv1x1" 374 | param {lr_mult: 1} param {lr_mult: 2} 375 | #phase:PREDICT 376 | convolution_param { 377 | num_output:32 378 | kernel_size: 1 379 | weight_filler { type: "xavier" } 380 | bias_filler { type: "constant" value: 0 } 381 | } 382 | } 383 | layer { 384 | bottom: "ira_A_1_conv1x1" top: "ira_A_1_conv1x1" name: "bn_ira_A_1_conv1x1" type: "BatchNorm" 385 | } 386 | 387 | layer { 388 | bottom: "ira_A_1_conv1x1" top: "ira_A_1_conv1x1" name: "scale_ira_A_1_conv1x1" type: "Scale" 389 | scale_param { bias_term: true} 390 | } 391 | layer { 392 | name: "relu_ira_A_1_conv1x1" type: "ReLU" bottom: "ira_A_1_conv1x1" top: "ira_A_1_conv1x1" 393 | } 394 | 395 | #-------a2---------- 396 | layer { 397 | name: "ira_A_2_conv1x1" 398 | type: "Convolution" 399 | bottom: "conv3_1b" 400 | top: "ira_A_2_conv1x1" 401 | param {lr_mult: 1} param {lr_mult: 2} 402 | #phase:PREDICT 403 | convolution_param { 404 | num_output:32 405 | kernel_size: 1 406 | weight_filler { type: "xavier" } 407 | bias_filler { type: "constant" value: 0 } 408 | } 409 | } 410 | layer { 411 | bottom: "ira_A_2_conv1x1" top: "ira_A_2_conv1x1" name: "bn_ira_A_2_conv1x1" type: "BatchNorm" 412 | } 413 | 414 | layer { 415 | bottom: "ira_A_2_conv1x1" top: "ira_A_2_conv1x1" name: "scale_ira_A_2_conv1x1" type: "Scale" 416 | scale_param { bias_term: true} 417 | } 418 | layer { 419 | name: "relu_ira_A_2_conv1x1" type: "ReLU" bottom: "ira_A_2_conv1x1" top: "ira_A_2_conv1x1" 420 | } 421 | 422 | 423 | layer { 424 | name: "ira_A_2_conv3x3" 425 | type: "Convolution" 426 | bottom: "ira_A_2_conv1x1" 427 | top: "ira_A_2_conv3x3" 428 | param {lr_mult: 1} param {lr_mult: 2} 429 | # phase:PREDICT 430 | convolution_param { 431 | num_output:32 432 | kernel_size: 3 433 | pad:1 434 | weight_filler { type: "xavier" } 435 | bias_filler { type: "constant" value: 0 } 436 | } 437 | } 438 | layer { 439 | bottom: "ira_A_2_conv3x3" top: "ira_A_2_conv3x3" name: "bn_ira_A_2_conv3x3" type: "BatchNorm" 440 | } 441 | 442 | layer { 443 | bottom: "ira_A_2_conv3x3" top: "ira_A_2_conv3x3" name: "scale_ira_A_2_conv3x3" type: "Scale" 444 | scale_param { bias_term: true} 445 | } 446 | layer { 447 | name: "relu_ira_A_2_conv3x3" type: "ReLU" bottom: "ira_A_2_conv3x3" top: "ira_A_2_conv3x3" 448 | } 449 | 450 | 451 | 452 | #-----------a3------------------------------- 453 | layer { 454 | name: "ira_A_3_conv1x1" 455 | type: "Convolution" 456 | bottom: "conv3_1b" 457 | top: "ira_A_3_conv1x1" 458 | param {lr_mult: 1} param {lr_mult: 2} 459 | # phase:PREDICT 460 | convolution_param { 461 | num_output:32 462 | kernel_size: 1 463 | weight_filler { type: "xavier" } 464 | bias_filler { type: "constant" value: 0 } 465 | } 466 | } 467 | layer { 468 | bottom: "ira_A_3_conv1x1" top: "ira_A_3_conv1x1" name: "bn_ira_A_3_conv1x1" type: "BatchNorm" 469 | } 470 | 471 | layer { 472 | bottom: "ira_A_3_conv1x1" top: "ira_A_3_conv1x1" name: "scale_ira_A_3_conv1x1" type: "Scale" 473 | scale_param { bias_term: true} 474 | } 475 | layer { 476 | name: "relu_ira_A_3_conv1x1" type: "ReLU" bottom: "ira_A_3_conv1x1" top: "ira_A_3_conv1x1" 477 | } 478 | 479 | 480 | 481 | 482 | layer { 483 | name: "ira_A_3_conv3x3_1" 484 | type: "Convolution" 485 | bottom: "ira_A_3_conv1x1" 486 | top: "ira_A_3_conv3x3_1" 487 | # phase:PREDICT 488 | param {lr_mult: 1} param {lr_mult: 2} 489 | convolution_param { 490 | num_output:48 491 | kernel_size: 3 492 | pad: 1 493 | weight_filler { type: "xavier" } 494 | bias_filler { type: "constant" value: 0 } 495 | } 496 | } 497 | layer { 498 | bottom: "ira_A_3_conv3x3_1" top: "ira_A_3_conv3x3_1" name: "bn_ira_A_3_conv3x3_1" type: "BatchNorm" 499 | } 500 | 501 | layer { 502 | bottom: "ira_A_3_conv3x3_1" top: "ira_A_3_conv3x3_1" name: "scale_ira_A_3_conv3x3_1" type: "Scale" 503 | scale_param { bias_term: true} 504 | } 505 | layer { 506 | name: "relu_ira_A_3_conv3x3_1" type: "ReLU" bottom: "ira_A_3_conv3x3_1" top: "ira_A_3_conv3x3_1" 507 | } 508 | 509 | 510 | 511 | 512 | layer { 513 | name: "ira_A_3_conv3x3_2" 514 | type: "Convolution" 515 | bottom: "ira_A_3_conv3x3_1" 516 | top: "ira_A_3_conv3x3_2" 517 | param {lr_mult: 1} param {lr_mult: 2} 518 | # phase:PREDICT 519 | convolution_param { 520 | num_output:64 521 | kernel_size: 3 522 | pad: 1 523 | weight_filler { type: "xavier" } 524 | bias_filler { type: "constant" value: 0 } 525 | } 526 | } 527 | layer { 528 | bottom: "ira_A_3_conv3x3_2" top: "ira_A_3_conv3x3_2" name: "bn_ira_A_3_conv3x3_2" type: "BatchNorm" 529 | } 530 | 531 | layer { 532 | bottom: "ira_A_3_conv3x3_2" top: "ira_A_3_conv3x3_2" name: "scale_ira_A_3_conv3x3_2" type: "Scale" 533 | scale_param { bias_term: true} 534 | } 535 | layer { 536 | name: "relu_ira_A_3_conv3x3_2" type: "ReLU" bottom: "ira_A_3_conv3x3_2" top: "ira_A_3_conv3x3_2" 537 | } 538 | 539 | 540 | 541 | 542 | layer{ 543 | name: "ira_A_concat" 544 | type: "Concat" 545 | bottom:"ira_A_1_conv1x1" 546 | bottom: "ira_A_2_conv3x3" 547 | bottom: "ira_A_3_conv3x3_2" 548 | top: "ira_A_concat" 549 | } 550 | 551 | 552 | layer { 553 | name: "ira_A_concat_top_conv_1x1" 554 | type: "Convolution" 555 | bottom: "ira_A_concat" 556 | top: "ira_A_concat_top_conv_1x1" 557 | param {lr_mult: 1} param {lr_mult: 2} 558 | # phase:PREDICT 559 | convolution_param { 560 | num_output:384 561 | kernel_size: 1 562 | weight_filler { type: "xavier" } 563 | bias_filler { type: "constant" value: 0 } 564 | } 565 | } 566 | layer { 567 | bottom: "ira_A_concat_top_conv_1x1" top: "ira_A_concat_top_conv_1x1" name: "bn_ra_A_concat_top_conv_1x1" type: "BatchNorm" 568 | } 569 | 570 | layer { 571 | bottom: "ira_A_concat_top_conv_1x1" top: "ira_A_concat_top_conv_1x1" name: "scale_ra_A_concat_top_conv_1x1" type: "Scale" 572 | scale_param { bias_term: true} 573 | } 574 | 575 | layer{ 576 | name: "conv3_sum" 577 | type: "Eltwise" 578 | bottom: "conv3_1b" 579 | bottom:"ira_A_concat_top_conv_1x1" 580 | top:"conv3_sum" 581 | # phase:PREDICT 582 | eltwise_param {operation: SUM coeff: 1 coeff:0.1} 583 | } 584 | 585 | 586 | # layer { 587 | # bottom: "conv3_sum" top: "conv3_sum" name: "bn_conv3_sum" type: "BatchNorm" 588 | # # batch_norm_param {use_global_stats: true} 589 | # } 590 | 591 | # layer { 592 | # bottom: "conv3_sum" top: "conv3_sum" name: "scale_conv3_sum" type: "Scale" 593 | # scale_param { bias_term: true} 594 | # } 595 | layer {name: "relu3_sum" type: "ReLU" bottom: "conv3_sum" top: "conv3_sum"} 596 | 597 | #---------------------- 598 | 599 | ##=================================REDUCTION_A======================== 600 | #-----reduction A Pooling a-------------- 601 | layer { 602 | name: "ira_v4_reduction_A/pool" 603 | type: "Pooling" 604 | bottom: "conv3_sum" 605 | top: "ira_v4_reduction_A/pool" 606 | #top: "pool2_mask" 607 | pooling_param { 608 | pool: MAX 609 | kernel_size: 3 610 | stride: 2 611 | } 612 | } 613 | #-----reduction A conv1x1_b-------------- 614 | layer { 615 | name: "ira_v4_reduction_A/conv3x3_reduction_b" 616 | type: "Convolution" 617 | bottom: "conv3_sum" 618 | top: "ira_v4_reduction_A/conv3x3_reduction_b" 619 | param {lr_mult: 1} param {lr_mult: 2} 620 | convolution_param { 621 | num_output:384 622 | kernel_size: 3 623 | stride:2 624 | pad:1 625 | weight_filler { type: "xavier" } 626 | bias_filler { type: "constant" value: 0 } 627 | } 628 | } 629 | layer { 630 | bottom: "ira_v4_reduction_A/conv3x3_reduction_b" top: "ira_v4_reduction_A/conv3x3_reduction_b" name: "bn_ira_v4_reduction_A/conv3x3_reduction_b" type: "BatchNorm" 631 | } 632 | 633 | layer { 634 | bottom: "ira_v4_reduction_A/conv3x3_reduction_b" top: "ira_v4_reduction_A/conv3x3_reduction_b" name: "scale_ira_v4_reduction_A/conv3x3_reduction_b" type: "Scale" 635 | scale_param { bias_term: true} 636 | } 637 | layer { 638 | name: "relu_ira_v4_reduction_A/conv3x3_reduction_b" type: "ReLU" bottom: "ira_v4_reduction_A/conv3x3_reduction_b" top: "ira_v4_reduction_A/conv3x3_reduction_b" 639 | } 640 | 641 | #-----reduction A conv_recdution _c -------------- 642 | layer { 643 | name: "ira_v4_reduction_A/conv1x1_c" 644 | type: "Convolution" 645 | bottom: "conv3_sum" 646 | top: "ira_v4_reduction_A/conv1x1_c" 647 | param {lr_mult: 1} param {lr_mult: 2} 648 | convolution_param { 649 | num_output:256 650 | kernel_size: 1 651 | weight_filler { type: "xavier" } 652 | bias_filler { type: "constant" value: 0 } 653 | } 654 | } 655 | layer { 656 | bottom: "ira_v4_reduction_A/conv1x1_c" top: "ira_v4_reduction_A/conv1x1_c" name: "bn_ira_v4_reduction_A/conv1x1_c" type: "BatchNorm" 657 | } 658 | 659 | layer { 660 | bottom: "ira_v4_reduction_A/conv1x1_c" top: "ira_v4_reduction_A/conv1x1_c" name: "scale_ira_v4_reduction_A/conv1x1_c" type: "Scale" 661 | scale_param { bias_term: true} 662 | } 663 | layer { 664 | name: "relu_ira_v4_reduction_A/conv1x1_c" type: "ReLU" bottom: "ira_v4_reduction_A/conv1x1_c" top: "ira_v4_reduction_A/conv1x1_c" 665 | } 666 | 667 | #-------c__3x3_2---- 668 | layer { 669 | name: "ira_v4_reduction_A/conv3x3_c" 670 | type: "Convolution" 671 | bottom: "ira_v4_reduction_A/conv1x1_c" 672 | top: "ira_v4_reduction_A/conv3x3_c" 673 | param {lr_mult: 1} param {lr_mult: 2} 674 | convolution_param { 675 | num_output:256 676 | kernel_size: 3 677 | pad:1 678 | weight_filler { type: "xavier" } 679 | bias_filler { type: "constant" value: 0 } 680 | } 681 | } 682 | layer { 683 | bottom: "ira_v4_reduction_A/conv3x3_c" top: "ira_v4_reduction_A/conv3x3_c" name: "bn_ira_v4_reduction_A/conv3x3_c" type: "BatchNorm" 684 | } 685 | 686 | layer { 687 | bottom: "ira_v4_reduction_A/conv3x3_c" top: "ira_v4_reduction_A/conv3x3_c" name: "scale_ira_v4_reduction_A/conv3x3_c" type: "Scale" 688 | scale_param { bias_term: true} 689 | } 690 | layer { 691 | name: "relu_ira_v4_reduction_A/conv3x3_c" type: "ReLU" bottom: "ira_v4_reduction_A/conv3x3_c" top: "ira_v4_reduction_A/conv3x3_c" 692 | } 693 | 694 | #-----3x3_c_reduction 695 | layer { 696 | name: "ira_v4_reduction_A/conv3x3_reduction_c" 697 | type: "Convolution" 698 | bottom: "ira_v4_reduction_A/conv3x3_c" 699 | top: "ira_v4_reduction_A/conv3x3_reduction_c" 700 | param {lr_mult: 1} param {lr_mult: 2} 701 | convolution_param { 702 | num_output:384 703 | kernel_size: 3 704 | stride: 2 705 | pad:1 706 | weight_filler { type: "xavier" } 707 | bias_filler { type: "constant" value: 0 } 708 | } 709 | } 710 | layer { 711 | bottom: "ira_v4_reduction_A/conv3x3_reduction_c" top: "ira_v4_reduction_A/conv3x3_reduction_c" name: "bn_ira_v4_reduction_A/conv3x3_reduction_c" type: "BatchNorm" 712 | } 713 | 714 | layer { 715 | bottom: "ira_v4_reduction_A/conv3x3_reduction_c" top: "ira_v4_reduction_A/conv3x3_reduction_c" name: "scale_ira_v4_reduction_A/conv3x3_reduction_c" type: "Scale" 716 | scale_param { bias_term: true} 717 | } 718 | layer { 719 | name: "relu_ira_v4_reduction_A/conv3x3_reduction_c" type: "ReLU" bottom: "ira_v4_reduction_A/conv3x3_reduction_c" top: "ira_v4_reduction_A/conv3x3_reduction_c" 720 | } 721 | ##----reduction A_concat_top --------------------- 722 | layer{ 723 | name: "ira_v4_reduction_A/concat" 724 | type: "Concat" 725 | bottom:"ira_v4_reduction_A/pool" 726 | bottom: "ira_v4_reduction_A/conv3x3_reduction_b" 727 | bottom: "ira_v4_reduction_A/conv3x3_reduction_c" 728 | top: "ira_v4_reduction_A/concat" 729 | } 730 | #==============================End of REDUCTION_A ======================================== 731 | 732 | 733 | 734 | #=========================inception_Res_B========================================== 735 | # the ira_v4_reduction_A/concat of the First module is ira_v4_reduction_A/concat 736 | 737 | layer { 738 | name: "conv4_1b" 739 | type: "Convolution" 740 | bottom: "ira_v4_reduction_A/concat" 741 | top: "conv4_1b" 742 | param {lr_mult: 1} param {lr_mult: 2} 743 | convolution_param { 744 | num_output: 1154 745 | kernel_size: 1 746 | stride: 1 pad:0 747 | weight_filler { type: "xavier" } 748 | bias_filler { type: "constant" value: 0 } 749 | } 750 | } 751 | layer { 752 | bottom: "conv4_1b" top: "conv4_1b" name: "bn_conv4_1b" type: "BatchNorm" 753 | # batch_norm_param {use_global_stats: true} 754 | } 755 | 756 | layer { 757 | bottom: "conv4_1b" top: "conv4_1b" name: "scale_conv4_1b" type: "Scale" 758 | scale_param { bias_term: true} 759 | } 760 | layer { 761 | name: "relu4_1b" type: "ReLU" bottom: "conv4_1b" top: "conv4_1b" 762 | } 763 | 764 | #-------a_1_1x1---- 765 | layer { 766 | name: "ira_Inception_B_block_1/a_conv1x1_1" 767 | type: "Convolution" 768 | bottom: "conv4_1b" 769 | top: "ira_Inception_B_block_1/a_conv1x1_1" 770 | param {lr_mult: 1} param {lr_mult: 2} 771 | convolution_param { 772 | num_output:192 773 | kernel_size: 1 774 | weight_filler { type: "xavier" } 775 | bias_filler { type: "constant" value: 0 } 776 | } 777 | } 778 | layer { 779 | name: "bn_ira_Inception_B_block_1/a_conv1x1_1" 780 | type: "BatchNorm" 781 | bottom: "ira_Inception_B_block_1/a_conv1x1_1" 782 | top: "ira_Inception_B_block_1/a_conv1x1_1" 783 | 784 | } 785 | layer { 786 | name: "scale_ira_Inception_B_block_1/a_conv1x1_1" 787 | type: "Scale" 788 | bottom: "ira_Inception_B_block_1/a_conv1x1_1" 789 | top: "ira_Inception_B_block_1/a_conv1x1_1" 790 | scale_param { bias_term: true} 791 | } 792 | layer { 793 | name: "relu_ira_Inception_B_block_1/a_conv1x1_1" 794 | type: "ReLU" 795 | bottom: "ira_Inception_B_block_1/a_conv1x1_1" 796 | top: "ira_Inception_B_block_1/a_conv1x1_1" 797 | } 798 | 799 | #-------b1---------- 800 | layer { 801 | name: "ira_Inception_B_block_1/b_conv1x1_1" 802 | type: "Convolution" 803 | bottom: "conv4_1b" 804 | top: "ira_Inception_B_block_1/b_conv1x1_1" 805 | param {lr_mult: 1} param {lr_mult: 2} 806 | convolution_param { 807 | num_output:128 808 | kernel_size: 1 809 | weight_filler { type: "xavier" } 810 | bias_filler { type: "constant" value: 0 } 811 | } 812 | } 813 | layer { 814 | name: "bn_ira_Inception_B_block_1/b_conv1x1_1" 815 | type: "BatchNorm" 816 | bottom: "ira_Inception_B_block_1/b_conv1x1_1" 817 | top: "ira_Inception_B_block_1/b_conv1x1_1" 818 | } 819 | 820 | layer { 821 | name: "scale_ira_Inception_B_block_1/b_conv1x1_1" 822 | type: "Scale" 823 | bottom: "ira_Inception_B_block_1/b_conv1x1_1" 824 | top: "ira_Inception_B_block_1/b_conv1x1_1" 825 | scale_param { bias_term: true} 826 | } 827 | layer { 828 | name: "relu_ira_Inception_B_block_1/b_conv1x1_1" 829 | type: "ReLU" 830 | bottom: "ira_Inception_B_block_1/b_conv1x1_1" 831 | top: "ira_Inception_B_block_1/b_conv1x1_1" 832 | } 833 | 834 | #-------b2---------- 835 | layer { 836 | name: "ira_Inception_B_block_1/b_conv1x7_1" 837 | type: "Convolution" 838 | bottom: "ira_Inception_B_block_1/b_conv1x1_1" 839 | top: "ira_Inception_B_block_1/b_conv1x7_1" 840 | param {lr_mult: 1} param {lr_mult: 2} 841 | convolution_param { 842 | num_output:160 843 | kernel_h: 1 844 | kernel_w: 7 845 | pad_h: 0 846 | pad_w: 3 847 | weight_filler { type: "xavier" } 848 | bias_filler { type: "constant" value: 0 } 849 | } 850 | } 851 | layer { 852 | name: "bn_ira_Inception_B_block_1/b_conv1x7_1" 853 | type: "BatchNorm" 854 | bottom: "ira_Inception_B_block_1/b_conv1x7_1" 855 | top: "ira_Inception_B_block_1/b_conv1x7_1" 856 | } 857 | 858 | layer { 859 | name: "scale_ira_Inception_B_block_1/b_conv1x7_1" 860 | type: "Scale" 861 | bottom: "ira_Inception_B_block_1/b_conv1x7_1" 862 | top: "ira_Inception_B_block_1/b_conv1x7_1" 863 | scale_param { bias_term: true} 864 | } 865 | layer { 866 | name: "relu_ira_Inception_B_block_1/b_conv1x7_1" 867 | type: "ReLU" 868 | bottom: "ira_Inception_B_block_1/b_conv1x7_1" 869 | top: "ira_Inception_B_block_1/b_conv1x7_1" 870 | } 871 | 872 | 873 | 874 | #-----------b3------------------------------- 875 | layer { 876 | name: "ira_Inception_B_block_1/b_conv7x1_1" 877 | type: "Convolution" 878 | bottom: "ira_Inception_B_block_1/b_conv1x7_1" 879 | top: "ira_Inception_B_block_1/b_conv7x1_1" 880 | param {lr_mult: 1} param {lr_mult: 2} 881 | convolution_param { 882 | num_output:192 883 | kernel_h: 7 884 | kernel_w: 1 885 | pad_h: 3 886 | pad_w: 0 887 | weight_filler { type: "xavier" } 888 | bias_filler { type: "constant" value: 0 } 889 | } 890 | } 891 | layer { 892 | name: "bn_ira_Inception_B_block_1/b_conv7x1_1" 893 | type: "BatchNorm" 894 | bottom: "ira_Inception_B_block_1/b_conv7x1_1" 895 | top: "ira_Inception_B_block_1/b_conv7x1_1" 896 | } 897 | 898 | layer { 899 | name: "scale_ira_Inception_B_block_1/b_conv7x1_1" 900 | type: "Scale" 901 | bottom: "ira_Inception_B_block_1/b_conv7x1_1" 902 | top: "ira_Inception_B_block_1/b_conv7x1_1" 903 | scale_param { bias_term: true} 904 | } 905 | layer { 906 | name: "relu_ira_Inception_B_block_1/b_conv7x1_1" 907 | type: "ReLU" 908 | bottom: "ira_Inception_B_block_1/b_conv7x1_1" 909 | top: "ira_Inception_B_block_1/b_conv7x1_1" 910 | } 911 | 912 | 913 | #--concatation----------------------- 914 | 915 | layer{ 916 | name: "ira_Inception_B_block_1/concat" 917 | type: "Concat" 918 | bottom:"ira_Inception_B_block_1/a_conv1x1_1" 919 | bottom: "ira_Inception_B_block_1/b_conv7x1_1" 920 | top: "ira_Inception_B_block_1/concat" 921 | } 922 | 923 | #--top conv over paths----------------------- 924 | 925 | layer { 926 | name: "ira_Inception_B_block_1/top_conv_1x1" 927 | type: "Convolution" 928 | bottom: "ira_Inception_B_block_1/concat" 929 | top: "ira_Inception_B_block_1/top_conv_1x1" 930 | param {lr_mult: 1} param {lr_mult: 2} 931 | convolution_param { 932 | num_output:1154 933 | kernel_size: 1 934 | weight_filler { type: "xavier" } 935 | bias_filler { type: "constant" value: 0 } 936 | } 937 | } 938 | layer { 939 | name: "bn_ira_Inception_B_block_1/top_conv_1x1" 940 | type: "BatchNorm" 941 | bottom: "ira_Inception_B_block_1/top_conv_1x1" 942 | top: "ira_Inception_B_block_1/top_conv_1x1" 943 | 944 | } 945 | 946 | layer { 947 | name: "scale_ira_Inception_B_block_1/top_conv_1x1" 948 | type: "Scale" 949 | bottom: "ira_Inception_B_block_1/top_conv_1x1" 950 | top: "ira_Inception_B_block_1/top_conv_1x1" 951 | scale_param { bias_term: true} 952 | } 953 | 954 | 955 | #--Sum before relu ----------------------- 956 | 957 | layer{ 958 | #name: "ira_Inception_B_block_1/sum" 959 | name: "conv4_sum" 960 | type: "Eltwise" 961 | bottom: "conv4_1b" 962 | bottom:"ira_Inception_B_block_1/top_conv_1x1" 963 | #top:"ira_Inception_B_block_1/sum" 964 | top: "conv4_sum" 965 | eltwise_param {operation: SUM coeff: 1 coeff: 0.1} 966 | } 967 | # relu 968 | 969 | layer { 970 | #name: "relu_ira_Inception_B_block_1/sum" 971 | name: "relu_conv4_sum" 972 | type: "ReLU" 973 | bottom: "conv4_sum" 974 | top:"conv4_sum" 975 | # bottom: "ira_Inception_B_block_1/sum" 976 | #top: "ira_Inception_B_block_1/sum" 977 | } 978 | #=========================end_inception_Res_B========================================== 979 | 980 | 981 | ##=================================REDUCTION_B======================== 982 | 983 | 984 | 985 | #-----reduction B Pooling a-------------- 986 | layer { 987 | name: "ira_Reduction_B_block_1/a_pool" 988 | type: "Pooling" 989 | bottom: "conv4_sum" 990 | top: "ira_Reduction_B_block_1/a_pool" 991 | pooling_param { 992 | pool: MAX 993 | kernel_size: 3 994 | stride: 2 995 | } 996 | } 997 | 998 | #-----reduction B b1 conv1x1 -------------- 999 | layer { 1000 | name: "ira_Reduction_B_block_1/b_conv1x1_1" 1001 | type: "Convolution" 1002 | bottom: "conv4_sum" 1003 | top: "ira_Reduction_B_block_1/b_conv1x1_1" 1004 | param {lr_mult: 1} param {lr_mult: 2} 1005 | convolution_param { 1006 | num_output: 256 1007 | kernel_size: 1 1008 | stride:1 1009 | pad:0 1010 | weight_filler { type: "xavier" } 1011 | bias_filler { type: "constant" value: 0 } 1012 | } 1013 | } 1014 | layer { 1015 | name: "bn_ira_Reduction_B_block_1/b_conv1x1_1" 1016 | type: "BatchNorm" 1017 | bottom: "ira_Reduction_B_block_1/b_conv1x1_1" 1018 | top: "ira_Reduction_B_block_1/b_conv1x1_1" 1019 | } 1020 | 1021 | layer { 1022 | name: "scale_ira_Reduction_B_block_1/b_conv1x1_1" 1023 | type: "Scale" 1024 | bottom: "ira_Reduction_B_block_1/b_conv1x1_1" 1025 | top: "ira_Reduction_B_block_1/b_conv1x1_1" 1026 | scale_param { bias_term: true} 1027 | } 1028 | layer { 1029 | name: "relu_ira_Reduction_B_block_1/b_conv1x1_1" 1030 | type: "ReLU" 1031 | bottom: "ira_Reduction_B_block_1/b_conv1x1_1" 1032 | top: "ira_Reduction_B_block_1/b_conv1x1_1" 1033 | } 1034 | 1035 | #-----reduction B b2 conv3x3-------------- 1036 | layer { 1037 | name: "ira_Reduction_B_block_1/b_conv3x3_1" 1038 | type: "Convolution" 1039 | bottom: "ira_Reduction_B_block_1/b_conv1x1_1" 1040 | top: "ira_Reduction_B_block_1/b_conv3x3_1" 1041 | param {lr_mult: 1} param {lr_mult: 2} 1042 | convolution_param { 1043 | num_output: 384 1044 | kernel_size: 3 1045 | stride: 2 1046 | pad: 1 1047 | weight_filler { type: "xavier" } 1048 | bias_filler { type: "constant" value: 0 } 1049 | } 1050 | } 1051 | layer { 1052 | name: "bn_ira_Reduction_B_block_1/b_conv3x3_1" 1053 | type: "BatchNorm" 1054 | bottom: "ira_Reduction_B_block_1/b_conv3x3_1" 1055 | top: "ira_Reduction_B_block_1/b_conv3x3_1" 1056 | } 1057 | 1058 | layer { 1059 | name: "scale_ira_Reduction_B_block_1/b_conv3x3_1" 1060 | type: "Scale" 1061 | bottom: "ira_Reduction_B_block_1/b_conv3x3_1" 1062 | top: "ira_Reduction_B_block_1/b_conv3x3_1" 1063 | scale_param { bias_term: true} 1064 | } 1065 | layer { 1066 | name: "relu_ira_Reduction_B_block_1/b_conv3x3_1" 1067 | type: "ReLU" 1068 | bottom: "ira_Reduction_B_block_1/b_conv3x3_1" 1069 | top: "ira_Reduction_B_block_1/b_conv3x3_1" 1070 | } 1071 | 1072 | 1073 | 1074 | #-----reduction B c1 conv1x1 -------------- 1075 | layer { 1076 | name: "ira_Reduction_B_block_1/c_conv1x1_1" 1077 | type: "Convolution" 1078 | bottom: "conv4_sum" 1079 | top: "ira_Reduction_B_block_1/c_conv1x1_1" 1080 | param {lr_mult: 1} param {lr_mult: 2} 1081 | convolution_param { 1082 | num_output: 256 1083 | kernel_size: 1 1084 | stride:1 1085 | pad:0 1086 | weight_filler { type: "xavier" } 1087 | bias_filler { type: "constant" value: 0 } 1088 | } 1089 | } 1090 | layer { 1091 | name: "bn_ira_Reduction_B_block_1/c_conv1x1_1" 1092 | type: "BatchNorm" 1093 | bottom: "ira_Reduction_B_block_1/c_conv1x1_1" 1094 | top: "ira_Reduction_B_block_1/c_conv1x1_1" 1095 | } 1096 | 1097 | layer { 1098 | name: "scale_ira_Reduction_B_block_1/c_conv1x1_1" 1099 | type: "Scale" 1100 | bottom: "ira_Reduction_B_block_1/c_conv1x1_1" 1101 | top: "ira_Reduction_B_block_1/c_conv1x1_1" 1102 | scale_param { bias_term: true} 1103 | } 1104 | layer { 1105 | name: "relu_ira_Reduction_B_block_1/c_conv1x1_1" 1106 | type: "ReLU" 1107 | bottom: "ira_Reduction_B_block_1/c_conv1x1_1" 1108 | top: "ira_Reduction_B_block_1/c_conv1x1_1" 1109 | } 1110 | 1111 | #-----reduction B c the second conv3x3-------------- 1112 | layer { 1113 | name: "ira_Reduction_B_block_1/c_conv3x3_1" 1114 | type: "Convolution" 1115 | bottom: "ira_Reduction_B_block_1/c_conv1x1_1" 1116 | top: "ira_Reduction_B_block_1/c_conv3x3_1" 1117 | param {lr_mult: 1} param {lr_mult: 2} 1118 | convolution_param { 1119 | num_output: 288 1120 | kernel_size: 3 1121 | stride: 2 1122 | pad: 1 1123 | weight_filler { type: "xavier" } 1124 | bias_filler { type: "constant" value: 0 } 1125 | } 1126 | } 1127 | layer { 1128 | name: "bn_ira_Reduction_B_block_1/c_conv3x3_1" 1129 | type: "BatchNorm" 1130 | bottom: "ira_Reduction_B_block_1/c_conv3x3_1" 1131 | top: "ira_Reduction_B_block_1/c_conv3x3_1" 1132 | } 1133 | 1134 | layer { 1135 | name: "scale_ira_Reduction_B_block_1/c_conv3x3_1" 1136 | type: "Scale" 1137 | bottom: "ira_Reduction_B_block_1/c_conv3x3_1" 1138 | top: "ira_Reduction_B_block_1/c_conv3x3_1" 1139 | scale_param { bias_term: true} 1140 | } 1141 | layer { 1142 | name: "relu_ira_Reduction_B_block_1/c_conv3x3_1" 1143 | type: "ReLU" 1144 | bottom: "ira_Reduction_B_block_1/c_conv3x3_1" 1145 | top: "ira_Reduction_B_block_1/c_conv3x3_1" 1146 | } 1147 | 1148 | 1149 | #-----reduction B d1 conv1x1 -------------- 1150 | layer { 1151 | name: "ira_Reduction_B_block_1/d_conv1x1_1" 1152 | type: "Convolution" 1153 | bottom: "conv4_sum" 1154 | top: "ira_Reduction_B_block_1/d_conv1x1_1" 1155 | param {lr_mult: 1} param {lr_mult: 2} 1156 | convolution_param { 1157 | num_output: 256 1158 | kernel_size: 1 1159 | stride:1 1160 | pad:0 1161 | weight_filler { type: "xavier" } 1162 | bias_filler { type: "constant" value: 0 } 1163 | } 1164 | } 1165 | layer { 1166 | name: "bn_ira_Reduction_B_block_1/d_conv1x1_1" 1167 | type: "BatchNorm" 1168 | bottom: "ira_Reduction_B_block_1/d_conv1x1_1" 1169 | top: "ira_Reduction_B_block_1/d_conv1x1_1" 1170 | } 1171 | 1172 | layer { 1173 | name: "scale_ira_Reduction_B_block_1/d_conv1x1_1" 1174 | type: "Scale" 1175 | bottom: "ira_Reduction_B_block_1/d_conv1x1_1" 1176 | top: "ira_Reduction_B_block_1/d_conv1x1_1" 1177 | scale_param { bias_term: true} 1178 | } 1179 | layer { 1180 | name: "relu_ira_Reduction_B_block_1/d_conv1x1_1" 1181 | type: "ReLU" 1182 | bottom: "ira_Reduction_B_block_1/d_conv1x1_1" 1183 | top: "ira_Reduction_B_block_1/d_conv1x1_1" 1184 | } 1185 | 1186 | #-----reduction B d the second conv3x3-------------- 1187 | layer { 1188 | name: "ira_Reduction_B_block_1/d_conv3x3_1" 1189 | type: "Convolution" 1190 | bottom: "ira_Reduction_B_block_1/d_conv1x1_1" 1191 | top: "ira_Reduction_B_block_1/d_conv3x3_1" 1192 | param {lr_mult: 1} param {lr_mult: 2} 1193 | convolution_param { 1194 | num_output: 288 1195 | kernel_size: 3 1196 | stride: 1 1197 | pad: 1 1198 | weight_filler { type: "xavier" } 1199 | bias_filler { type: "constant" value: 0 } 1200 | } 1201 | } 1202 | layer { 1203 | name: "bn_ira_Reduction_B_block_1/d_conv3x3_1" 1204 | type: "BatchNorm" 1205 | bottom: "ira_Reduction_B_block_1/d_conv3x3_1" 1206 | top: "ira_Reduction_B_block_1/d_conv3x3_1" 1207 | } 1208 | 1209 | layer { 1210 | name: "scale_ira_Reduction_B_block_1/d_conv3x3_1" 1211 | type: "Scale" 1212 | bottom: "ira_Reduction_B_block_1/d_conv3x3_1" 1213 | top: "ira_Reduction_B_block_1/d_conv3x3_1" 1214 | scale_param { bias_term: true} 1215 | } 1216 | layer { 1217 | name: "relu_ira_Reduction_B_block_1/d_conv3x3_1" 1218 | type: "ReLU" 1219 | bottom: "ira_Reduction_B_block_1/d_conv3x3_1" 1220 | top: "ira_Reduction_B_block_1/d_conv3x3_1" 1221 | } 1222 | 1223 | #-----reduction B d the second conv3x3-------------- 1224 | layer { 1225 | name: "ira_Reduction_B_block_1/d_conv3x3_2" 1226 | type: "Convolution" 1227 | bottom: "ira_Reduction_B_block_1/d_conv3x3_1" 1228 | top: "ira_Reduction_B_block_1/d_conv3x3_2" 1229 | param {lr_mult: 1} param {lr_mult: 2} 1230 | convolution_param { 1231 | num_output: 320 1232 | kernel_size: 3 1233 | stride: 2 1234 | pad: 1 1235 | weight_filler { type: "xavier" } 1236 | bias_filler { type: "constant" value: 0 } 1237 | } 1238 | } 1239 | layer { 1240 | name: "bn_ira_Reduction_B_block_1/d_conv3x3_2" 1241 | type: "BatchNorm" 1242 | bottom: "ira_Reduction_B_block_1/d_conv3x3_2" 1243 | top: "ira_Reduction_B_block_1/d_conv3x3_2" 1244 | } 1245 | 1246 | layer { 1247 | name: "scale_ira_Reduction_B_block_1/d_conv3x3_2" 1248 | type: "Scale" 1249 | bottom: "ira_Reduction_B_block_1/d_conv3x3_2" 1250 | top: "ira_Reduction_B_block_1/d_conv3x3_2" 1251 | scale_param { bias_term: true} 1252 | } 1253 | layer { 1254 | name: "relu_ira_Reduction_B_block_1/d_conv3x3_2" 1255 | type: "ReLU" 1256 | bottom: "ira_Reduction_B_block_1/d_conv3x3_2" 1257 | top: "ira_Reduction_B_block_1/d_conv3x3_2" 1258 | } 1259 | 1260 | 1261 | 1262 | 1263 | ##----reduction B_concat_top --------------------- 1264 | layer{ 1265 | name: "ira_Reduction_B_block_1/concat" 1266 | type: "Concat" 1267 | bottom:"ira_Reduction_B_block_1/a_pool" 1268 | bottom: "ira_Reduction_B_block_1/b_conv3x3_1" 1269 | bottom: "ira_Reduction_B_block_1/c_conv3x3_1" 1270 | bottom: "ira_Reduction_B_block_1/d_conv3x3_2" 1271 | top: "ira_Reduction_B_block_1/concat" 1272 | } 1273 | #==============================End of REDUCTION_B ======================================== 1274 | 1275 | 1276 | 1277 | 1278 | #=========================inception_Res_C========================================== 1279 | # the input of the First module is input 1280 | 1281 | layer { 1282 | name: "conv5_1b" 1283 | type: "Convolution" 1284 | bottom: "ira_Reduction_B_block_1/concat" 1285 | top: "conv5_1b" 1286 | param {lr_mult: 1} param {lr_mult: 2} 1287 | convolution_param { 1288 | num_output: 2048 1289 | kernel_size: 1 1290 | stride: 1 pad:0 1291 | weight_filler { type: "xavier" } 1292 | bias_filler { type: "constant" value: 0 } 1293 | } 1294 | } 1295 | layer { 1296 | bottom: "conv5_1b" top: "conv5_1b" name: "bn_conv5_1b" type: "BatchNorm" 1297 | # batch_norm_param {use_global_stats: true} 1298 | } 1299 | 1300 | layer { 1301 | bottom: "conv5_1b" top: "conv5_1b" name: "scale_conv5_1b" type: "Scale" 1302 | scale_param { bias_term: true} 1303 | } 1304 | layer { 1305 | name: "relu5_1b" type: "ReLU" bottom: "conv5_1b" top: "conv5_1b" 1306 | } 1307 | 1308 | #-------a_1_1x1---- 1309 | layer { 1310 | name: "ira_Inception_C_block_1/a_conv1x1_1" 1311 | type: "Convolution" 1312 | bottom: "conv5_1b" 1313 | top: "ira_Inception_C_block_1/a_conv1x1_1" 1314 | param {lr_mult: 1} param {lr_mult: 2} 1315 | convolution_param { 1316 | num_output:192 1317 | kernel_size: 1 1318 | weight_filler { type: "xavier" } 1319 | bias_filler { type: "constant" value: 0 } 1320 | } 1321 | } 1322 | layer { 1323 | name: "bn_ira_Inception_C_block_1/a_conv1x1_1" 1324 | type: "BatchNorm" 1325 | bottom: "ira_Inception_C_block_1/a_conv1x1_1" 1326 | top: "ira_Inception_C_block_1/a_conv1x1_1" 1327 | 1328 | } 1329 | layer { 1330 | name: "scale_ira_Inception_C_block_1/a_conv1x1_1" 1331 | type: "Scale" 1332 | bottom: "ira_Inception_C_block_1/a_conv1x1_1" 1333 | top: "ira_Inception_C_block_1/a_conv1x1_1" 1334 | scale_param { bias_term: true} 1335 | } 1336 | layer { 1337 | name: "relu_ira_Inception_C_block_1/a_conv1x1_1" 1338 | type: "ReLU" 1339 | bottom: "ira_Inception_C_block_1/a_conv1x1_1" 1340 | top: "ira_Inception_C_block_1/a_conv1x1_1" 1341 | } 1342 | 1343 | #-------b1---------- 1344 | layer { 1345 | name: "ira_Inception_C_block_1/b_conv1x1_1" 1346 | type: "Convolution" 1347 | bottom: "conv5_1b" 1348 | top: "ira_Inception_C_block_1/b_conv1x1_1" 1349 | param {lr_mult: 1} param {lr_mult: 2} 1350 | convolution_param { 1351 | num_output:192 1352 | kernel_size: 1 1353 | weight_filler { type: "xavier" } 1354 | bias_filler { type: "constant" value: 0 } 1355 | } 1356 | } 1357 | layer { 1358 | name: "bn_ira_Inception_C_block_1/b_conv1x1_1" 1359 | type: "BatchNorm" 1360 | bottom: "ira_Inception_C_block_1/b_conv1x1_1" 1361 | top: "ira_Inception_C_block_1/b_conv1x1_1" 1362 | } 1363 | 1364 | layer { 1365 | name: "scale_ira_Inception_C_block_1/b_conv1x1_1" 1366 | type: "Scale" 1367 | bottom: "ira_Inception_C_block_1/b_conv1x1_1" 1368 | top: "ira_Inception_C_block_1/b_conv1x1_1" 1369 | scale_param { bias_term: true} 1370 | } 1371 | layer { 1372 | name: "relu_ira_Inception_C_block_1/b_conv1x1_1" 1373 | type: "ReLU" 1374 | bottom: "ira_Inception_C_block_1/b_conv1x1_1" 1375 | top: "ira_Inception_C_block_1/b_conv1x1_1" 1376 | } 1377 | 1378 | #-------b2---------- 1379 | layer { 1380 | name: "ira_Inception_C_block_1/b_conv1x7_1" 1381 | type: "Convolution" 1382 | bottom: "ira_Inception_C_block_1/b_conv1x1_1" 1383 | top: "ira_Inception_C_block_1/b_conv1x7_1" 1384 | param {lr_mult: 1} param {lr_mult: 2} 1385 | convolution_param { 1386 | num_output:224 1387 | kernel_h: 1 1388 | kernel_w: 3 1389 | pad_h: 0 1390 | pad_w: 1 1391 | weight_filler { type: "xavier" } 1392 | bias_filler { type: "constant" value: 0 } 1393 | } 1394 | } 1395 | layer { 1396 | name: "bn_ira_Inception_C_block_1/b_conv1x7_1" 1397 | type: "BatchNorm" 1398 | bottom: "ira_Inception_C_block_1/b_conv1x7_1" 1399 | top: "ira_Inception_C_block_1/b_conv1x7_1" 1400 | } 1401 | 1402 | layer { 1403 | name: "scale_ira_Inception_C_block_1/b_conv1x7_1" 1404 | type: "Scale" 1405 | bottom: "ira_Inception_C_block_1/b_conv1x7_1" 1406 | top: "ira_Inception_C_block_1/b_conv1x7_1" 1407 | scale_param { bias_term: true} 1408 | } 1409 | layer { 1410 | name: "relu_ira_Inception_C_block_1/b_conv1x7_1" 1411 | type: "ReLU" 1412 | bottom: "ira_Inception_C_block_1/b_conv1x7_1" 1413 | top: "ira_Inception_C_block_1/b_conv1x7_1" 1414 | } 1415 | 1416 | 1417 | 1418 | #-----------b3------------------------------- 1419 | layer { 1420 | name: "ira_Inception_C_block_1/b_conv7x1_1" 1421 | type: "Convolution" 1422 | bottom: "ira_Inception_C_block_1/b_conv1x7_1" 1423 | top: "ira_Inception_C_block_1/b_conv7x1_1" 1424 | param {lr_mult: 1} param {lr_mult: 2} 1425 | convolution_param { 1426 | num_output:256 1427 | kernel_h: 3 1428 | kernel_w: 1 1429 | pad_h: 1 1430 | pad_w: 0 1431 | weight_filler { type: "xavier" } 1432 | bias_filler { type: "constant" value: 0 } 1433 | } 1434 | } 1435 | layer { 1436 | name: "bn_ira_Inception_C_block_1/b_conv7x1_1" 1437 | type: "BatchNorm" 1438 | bottom: "ira_Inception_C_block_1/b_conv7x1_1" 1439 | top: "ira_Inception_C_block_1/b_conv7x1_1" 1440 | } 1441 | 1442 | layer { 1443 | name: "scale_ira_Inception_C_block_1/b_conv7x1_1" 1444 | type: "Scale" 1445 | bottom: "ira_Inception_C_block_1/b_conv7x1_1" 1446 | top: "ira_Inception_C_block_1/b_conv7x1_1" 1447 | scale_param { bias_term: true} 1448 | } 1449 | layer { 1450 | name: "relu_ira_Inception_C_block_1/b_conv7x1_1" 1451 | type: "ReLU" 1452 | bottom: "ira_Inception_C_block_1/b_conv7x1_1" 1453 | top: "ira_Inception_C_block_1/b_conv7x1_1" 1454 | } 1455 | 1456 | 1457 | #--concatation----------------------- 1458 | 1459 | layer{ 1460 | name: "ira_Inception_C_block_1/concat" 1461 | type: "Concat" 1462 | bottom:"ira_Inception_C_block_1/a_conv1x1_1" 1463 | bottom: "ira_Inception_C_block_1/b_conv7x1_1" 1464 | top: "ira_Inception_C_block_1/concat" 1465 | } 1466 | 1467 | #--top conv over paths----------------------- 1468 | 1469 | layer { 1470 | name: "ira_Inception_C_block_1/top_conv_1x1" 1471 | type: "Convolution" 1472 | bottom: "ira_Inception_C_block_1/concat" 1473 | top: "ira_Inception_C_block_1/top_conv_1x1" 1474 | param {lr_mult: 1} param {lr_mult: 2} 1475 | convolution_param { 1476 | num_output:2048 1477 | kernel_size: 1 1478 | weight_filler { type: "xavier" } 1479 | bias_filler { type: "constant" value: 0 } 1480 | } 1481 | } 1482 | layer { 1483 | name: "bn_ira_Inception_C_block_1/top_conv_1x1" 1484 | type: "BatchNorm" 1485 | bottom: "ira_Inception_C_block_1/top_conv_1x1" 1486 | top: "ira_Inception_C_block_1/top_conv_1x1" 1487 | 1488 | } 1489 | 1490 | layer { 1491 | name: "scale_ira_Inception_C_block_1/top_conv_1x1" 1492 | type: "Scale" 1493 | bottom: "ira_Inception_C_block_1/top_conv_1x1" 1494 | top: "ira_Inception_C_block_1/top_conv_1x1" 1495 | scale_param { bias_term: true} 1496 | } 1497 | 1498 | 1499 | #--Sum before relu ----------------------- 1500 | 1501 | layer{ 1502 | #name: "ira_Inception_C_block_1/sum" 1503 | name: "conv5_sum" 1504 | type: "Eltwise" 1505 | bottom: "conv5_1b" 1506 | bottom:"ira_Inception_C_block_1/top_conv_1x1" 1507 | #top:"ira_Inception_C_block_1/sum" 1508 | top: "conv5_sum" 1509 | eltwise_param {operation: SUM coeff: 1 coeff: 0.1} 1510 | } 1511 | # relu 1512 | 1513 | layer { 1514 | #name: "relu_ira_Inception_C_block_1/sum" 1515 | name: "relu_conv5_sum" 1516 | type: "ReLU" 1517 | bottom: "conv5_sum" 1518 | top:"conv5_sum" 1519 | # bottom: "ira_Inception_C_block_1/sum" 1520 | #top: "ira_Inception_C_block_1/sum" 1521 | } 1522 | #=========================end_inception_Res_B========================================== 1523 | 1524 | 1525 | 1526 | 1527 | 1528 | 1529 | 1530 | # --------------------conv 4_X---------------------------- 1531 | 1532 | 1533 | 1534 | 1535 | 1536 | #########-----------------------------conv5x_____---------------------------------------------- 1537 | 1538 | #---conv 4_X---------------------------- 1539 | 1540 | 1541 | # layer { 1542 | # name: "pool5" 1543 | # type: "Pooling" 1544 | # bottom: "conv4_sum" 1545 | # top: "pool5" 1546 | # pooling_param { 1547 | # pool: MAX 1548 | # kernel_size: 2 1549 | # stride: 2 1550 | # } 1551 | # } 1552 | 1553 | # layer { 1554 | # name: "conv5_1b" 1555 | # type: "Convolution" 1556 | # bottom: "ira_Reduction_B_block_1/concat" 1557 | # top: "conv5_1b" 1558 | # param {lr_mult: 1} param {lr_mult: 2} 1559 | # convolution_param { 1560 | # num_output: 1024 1561 | # kernel_size: 1 1562 | # stride: 1 pad:0 1563 | # weight_filler { type: "xavier" } 1564 | # bias_filler { type: "constant" value: 0 } 1565 | # } 1566 | # } 1567 | # layer { 1568 | # bottom: "conv5_1b" top: "conv5_1b" name: "bn_conv5_1b" type: "BatchNorm" 1569 | # # batch_norm_param {use_global_stats: true} 1570 | # } 1571 | 1572 | # layer { 1573 | # bottom: "conv5_1b" top: "conv5_1b" name: "scale_conv5_1b" type: "Scale" 1574 | # scale_param { bias_term: true} 1575 | # } 1576 | # layer { 1577 | # name: "relu5_1b" type: "ReLU" bottom: "conv5_1b" top: "conv5_1b" 1578 | 1579 | # } 1580 | 1581 | # layer { 1582 | # name: "conv5_1" 1583 | # type: "Convolution" 1584 | # bottom: "ira_Reduction_B_block_1/concat" 1585 | # top: "conv5_1" 1586 | # param {lr_mult: 1} param {lr_mult: 2} 1587 | # convolution_param { 1588 | # num_output: 1024 1589 | # kernel_size: 3 1590 | # stride: 1 pad:1 1591 | # weight_filler { type: "xavier" } 1592 | # bias_filler { type: "constant" value: 0 } 1593 | # } 1594 | # } 1595 | 1596 | # layer { 1597 | # bottom: "conv5_1" top: "conv5_1" name: "bn_conv5_1" type: "BatchNorm" 1598 | # # batch_norm_param {use_global_stats: true} 1599 | # } 1600 | 1601 | # layer { 1602 | # bottom: "conv5_1" top: "conv5_1" name: "scale_conv5_1" type: "Scale" 1603 | # scale_param { bias_term: true} 1604 | # } 1605 | # layer { 1606 | # name: "relu4_1" type: "ReLU" bottom: "conv5_1" top: "conv5_1" 1607 | # } 1608 | 1609 | 1610 | 1611 | 1612 | 1613 | # layer { 1614 | # name: "conv5_2" 1615 | # type: "Convolution" 1616 | # bottom: "conv5_1" 1617 | # top: "conv5_2" 1618 | # param {lr_mult: 1} param {lr_mult: 2} 1619 | # convolution_param { 1620 | # num_output: 1024 1621 | # kernel_size: 3 1622 | # stride: 1 pad:1 1623 | # weight_filler { type: "xavier" } 1624 | # bias_filler { type: "constant" value: 0 } 1625 | # } 1626 | # } 1627 | 1628 | # layer { 1629 | # bottom: "conv5_2" top: "conv5_2" name: "bn_conv5_2" type: "BatchNorm" 1630 | # # batch_norm_param {use_global_stats: true} 1631 | # } 1632 | 1633 | # layer { 1634 | # bottom: "conv5_2" top: "conv5_2" name: "scale_conv5_2" type: "Scale" 1635 | # scale_param { bias_term: true} 1636 | # } 1637 | # layer { 1638 | # name: "relu4_2" type: "ReLU" bottom: "conv5_2" top: "conv5_2" 1639 | # } 1640 | 1641 | # layer { 1642 | # name: "conv5_3" 1643 | # type: "Convolution" 1644 | # bottom: "conv5_2" 1645 | # top: "conv5_3" 1646 | # param {lr_mult: 1} param {lr_mult: 2} 1647 | # convolution_param { 1648 | # num_output: 1024 1649 | # kernel_size: 3 1650 | # stride: 1 pad:1 1651 | # weight_filler { type: "xavier" } 1652 | # bias_filler { type: "constant" value: 0 } 1653 | # } 1654 | # } 1655 | # layer { 1656 | # bottom: "conv5_3" top: "conv5_3" name: "bn_conv5_3" type: "BatchNorm" 1657 | # # batch_norm_param {use_global_stats: true} 1658 | # } 1659 | 1660 | # layer { 1661 | # bottom: "conv5_3" top: "conv5_3" name: "scale_conv5_3" type: "Scale" 1662 | # scale_param { bias_term: true} 1663 | # } 1664 | 1665 | 1666 | 1667 | # layer{ 1668 | # name: "conv5_sum" 1669 | # type: "Eltwise" 1670 | # bottom:"conv5_3" 1671 | # bottom: "conv5_1b" 1672 | # top:"conv5_sum" 1673 | # eltwise_param {operation: SUM } 1674 | # } 1675 | 1676 | 1677 | # layer {name: "relu5_sum" type: "ReLU" bottom: "conv5_sum" top: "conv5_sum"} 1678 | 1679 | # layer { 1680 | # name: "pool5" 1681 | # type: "Pooling" 1682 | # bottom: "conv5_sum" 1683 | # top: "pool5" 1684 | # top: "pool5_mask" 1685 | # pooling_param { 1686 | # pool: MAX 1687 | # kernel_size: 2 1688 | # stride: 2 1689 | # } 1690 | # } 1691 | 1692 | 1693 | 1694 | 1695 | 1696 | 1697 | #----------------- devconv to xx times ------------------------------------------------------- 1698 | 1699 | 1700 | 1701 | # layer { type: "Unpooling" bottom: "pool5" bottom: "pool5_mask" top: "unpool5" name: "unpool5" 1702 | # unpooling_param { unpool: MAX kernel_size: 2 stride: 2 pad:0} 1703 | # } 1704 | 1705 | 1706 | # layer { bottom: 'unpool5' top: 'deconv5_2b' name: 'deconv5_2b' type: "Deconvolution" 1707 | # param {lr_mult: 1} param {lr_mult: 2} 1708 | # convolution_param { engine:CUDNN num_output: 128 pad: 0 kernel_size: 1 1709 | # weight_filler { type: "gaussian" std: 0.01 } 1710 | # bias_filler { type: "constant" value: 0 }} } 1711 | # layer { 1712 | # bottom: "deconv5_2b" top: "deconv5_2b" name: "bn_deconv5_2b" type: "BatchNorm" 1713 | # # # batch_norm_param {use_global_stats: true} 1714 | # } 1715 | 1716 | # layer { 1717 | # bottom: "deconv5_2b" top: "deconv5_2b" name: "scale_deconv5_2b" type: "Scale" 1718 | # scale_param { bias_term: true} 1719 | # } 1720 | # layer { 1721 | # name: "relu4_2b" type: "ReLU" bottom: "deconv5_2b" top: "deconv5_2b" 1722 | # } 1723 | 1724 | 1725 | 1726 | 1727 | # layer { bottom: 'unpool5' top: 'deconv5_2' name: 'deconv5_2' type: "Deconvolution" 1728 | # param {lr_mult: 1} param {lr_mult: 2} 1729 | # convolution_param { engine:CUDNN num_output: 128 pad: 1 kernel_size: 3 1730 | # weight_filler { type: "gaussian" std: 0.01 } 1731 | # bias_filler { type: "constant" value: 0 }} } 1732 | 1733 | # layer { 1734 | # bottom: "deconv5_2" top: "deconv5_2" name: "bn_deconv5_2" type: "BatchNorm" 1735 | # # # batch_norm_param {use_global_stats: true} 1736 | # } 1737 | 1738 | # layer { 1739 | # bottom: "deconv5_2" top: "deconv5_2" name: "scale_deconv5_2" type: "Scale" 1740 | # scale_param { bias_term: true} 1741 | # } 1742 | # layer { 1743 | # name: "relu4_2" type: "ReLU" bottom: "deconv5_2" top: "deconv5_2" 1744 | # } 1745 | 1746 | 1747 | # layer { bottom: 'deconv5_2' top: 'deconv5_1' name: 'deconv5_1' type: "Deconvolution" 1748 | # param {lr_mult: 1} param {lr_mult: 2} 1749 | # convolution_param { engine:CUDNN num_output: 128 pad: 1 kernel_size: 3 1750 | # weight_filler { type: "gaussian" std: 0.01 } 1751 | # bias_filler { type: "constant" value: 0 }} } 1752 | # layer { 1753 | # bottom: "deconv5_1" top: "deconv5_1" name: "bn_deconv5_1" type: "BatchNorm" 1754 | # # # batch_norm_param {use_global_stats: true} 1755 | # } 1756 | 1757 | # layer { 1758 | # bottom: "deconv5_1" top: "deconv5_1" name: "scale_deconv5_1" type: "Scale" 1759 | # scale_param { bias_term: true} 1760 | # } 1761 | 1762 | 1763 | 1764 | # layer{ 1765 | # name: "deconv5_sum" type: "Eltwise" bottom:"deconv5_2b" bottom: "deconv5_1" top:"deconv5_sum" 1766 | # eltwise_param {operation: SUM } 1767 | # } 1768 | # layer { 1769 | # name: "relu4_deconv5_sum" type: "ReLU" bottom: "deconv5_sum" top: "deconv5_sum" 1770 | # } 1771 | 1772 | 1773 | 1774 | 1775 | 1776 | # #------------------deconv 4----------------------------------------------------- 1777 | # # layer { bottom: 'unpool4' top: 'deconv4_3' name: 'deconv4_3' type: "Deconvolution" 1778 | # # param {lr_mult: 1} param {lr_mult: 2} 1779 | # # convolution_param { num_output: 512 pad: 1 kernel_size: 3 1780 | # # weight_filler { type: "gaussian" std: 0.01 } 1781 | # # bias_filler { type: "constant" value: 0 }} } 1782 | # # 1783 | # # layer { bottom: 'deconv4_3' top: 'deconv4_3' name: 'derelu4_3' type: "ReLU" } 1784 | 1785 | 1786 | 1787 | # layer { type: "Unpooling" bottom: "deconv5_sum" bottom: "pool4_mask" top: "unpool4" name: "unpool4" 1788 | # unpooling_param { unpool: MAX kernel_size: 2 stride: 2 pad:0} 1789 | # } 1790 | 1791 | # layer { bottom: 'unpool4' top: 'deconv4_2b' name: 'deconv4_2b' type: "Deconvolution" 1792 | # param {lr_mult: 1} param {lr_mult: 2} 1793 | # convolution_param { engine:CUDNN num_output: 128 pad: 0 kernel_size: 1 1794 | # weight_filler { type: "gaussian" std: 0.01 } 1795 | # bias_filler { type: "constant" value: 0 }} } 1796 | # layer { 1797 | # bottom: "deconv4_2b" top: "deconv4_2b" name: "bn_deconv4_2b" type: "BatchNorm" 1798 | # # # batch_norm_param {use_global_stats: true} 1799 | # } 1800 | 1801 | # layer { 1802 | # bottom: "deconv4_2b" top: "deconv4_2b" name: "scale_deconv4_2b" type: "Scale" 1803 | # scale_param { bias_term: true} 1804 | # } 1805 | # layer { 1806 | # name: "relu4_2b" type: "ReLU" bottom: "deconv4_2b" top: "deconv4_2b" 1807 | # } 1808 | 1809 | 1810 | 1811 | 1812 | # layer { bottom: 'unpool4' top: 'deconv4_2' name: 'deconv4_2' type: "Deconvolution" 1813 | # param {lr_mult: 1} param {lr_mult: 2} 1814 | # convolution_param { engine:CUDNN num_output: 128 pad: 1 kernel_size: 3 1815 | # weight_filler { type: "gaussian" std: 0.01 } 1816 | # bias_filler { type: "constant" value: 0 }} } 1817 | 1818 | # layer { 1819 | # bottom: "deconv4_2" top: "deconv4_2" name: "bn_deconv4_2" type: "BatchNorm" 1820 | # # # batch_norm_param {use_global_stats: true} 1821 | # } 1822 | 1823 | # layer { 1824 | # bottom: "deconv4_2" top: "deconv4_2" name: "scale_deconv4_2" type: "Scale" 1825 | # scale_param { bias_term: true} 1826 | # } 1827 | # layer { 1828 | # name: "relu4_2" type: "ReLU" bottom: "deconv4_2" top: "deconv4_2" 1829 | # } 1830 | 1831 | 1832 | # layer { bottom: 'deconv4_2' top: 'deconv4_1' name: 'deconv4_1' type: "Deconvolution" 1833 | # param {lr_mult: 1} param {lr_mult: 2} 1834 | # convolution_param { engine:CUDNN num_output: 128 pad: 1 kernel_size: 3 1835 | # weight_filler { type: "gaussian" std: 0.01 } 1836 | # bias_filler { type: "constant" value: 0 }} } 1837 | # layer { 1838 | # bottom: "deconv4_1" top: "deconv4_1" name: "bn_deconv4_1" type: "BatchNorm" 1839 | # # # batch_norm_param {use_global_stats: true} 1840 | # } 1841 | 1842 | # layer { 1843 | # bottom: "deconv4_1" top: "deconv4_1" name: "scale_deconv4_1" type: "Scale" 1844 | # scale_param { bias_term: true} 1845 | # } 1846 | 1847 | 1848 | 1849 | # layer{ 1850 | # name: "deconv4_sum" type: "Eltwise" bottom:"deconv4_2b" bottom: "deconv4_1" top:"deconv4_sum" 1851 | # eltwise_param {operation:SUM } 1852 | # } 1853 | # # layer { 1854 | # # bottom: "deconv4_sum" top: "deconv4_sum" name: "bn_deconv4_sum" type: "BatchNorm" 1855 | # # # # batch_norm_param {use_global_stats: true} 1856 | # # } 1857 | 1858 | # # layer { 1859 | # # bottom: "deconv4_sum" top: "deconv4_sum" name: "scale_deconv4_sum" type: "Scale" 1860 | # # scale_param { bias_term: true} 1861 | # # } 1862 | # layer { 1863 | # name: "relu4_deconv4_sum" type: "ReLU" bottom: "deconv4_sum" top: "deconv4_sum" 1864 | # } 1865 | 1866 | 1867 | # #-------- deconv_3------------------------------------- 1868 | 1869 | # layer { type: "Unpooling" bottom: "deconv4_sum" bottom: "pool3_mask" top: "unpool3" name: "unpool3" 1870 | # unpooling_param { unpool: MAX kernel_size: 2 stride: 2 pad:0} 1871 | # } 1872 | 1873 | # layer { bottom: 'unpool3' top: 'deconv3_3b' name: 'deconv3_3b' type: "Deconvolution" 1874 | # param {lr_mult: 1} param {lr_mult: 2} 1875 | # convolution_param { engine:CUDNN num_output: 128 pad: 0 kernel_size: 1 1876 | # weight_filler { type: "gaussian" std: 0.01 } 1877 | # bias_filler { type: "constant" value: 0 }} } 1878 | # layer { 1879 | # bottom: "deconv3_3b" top: "deconv3_3b" name: "bn_deconv3_3b" type: "BatchNorm" 1880 | # # # batch_norm_param {use_global_stats: true} 1881 | # } 1882 | 1883 | # layer { 1884 | # bottom: "deconv3_3b" top: "deconv3_3b" name: "scale_deconv3_3b" type: "Scale" 1885 | # scale_param { bias_term: true} 1886 | # } 1887 | # layer { 1888 | # name: "relu_deconv3_3b" type: "ReLU" bottom: "deconv3_3b" top: "deconv3_3b" 1889 | # } 1890 | 1891 | 1892 | # layer { bottom: 'unpool3' top: 'deconv3_3' name: 'deconv3_3' type: "Deconvolution" 1893 | # param {lr_mult: 1} param {lr_mult: 2} 1894 | # convolution_param { engine:CUDNN num_output: 128 pad: 1 kernel_size: 3 1895 | # weight_filler { type: "gaussian" std: 0.01 } 1896 | # bias_filler { type: "constant" value: 0 }} } 1897 | # layer { 1898 | # bottom: "deconv3_3" top: "deconv3_3" name: "bn_deconv3_3" type: "BatchNorm" 1899 | # # # batch_norm_param {use_global_stats: true} 1900 | # } 1901 | # layer { 1902 | # bottom: "deconv3_3" top: "deconv3_3" name: "scale_deconv3_3" type: "Scale" 1903 | # scale_param { bias_term: true} 1904 | # } 1905 | # layer { 1906 | # name: "relu_deconv3_3" type: "ReLU" bottom: "deconv3_3" top: "deconv3_3" 1907 | # } 1908 | 1909 | 1910 | # layer { bottom: 'deconv3_3' top: 'deconv3_2' name: 'deconv3_2' type: "Deconvolution" 1911 | # param {lr_mult: 1} param {lr_mult: 2} 1912 | # convolution_param { engine:CUDNN num_output: 128 pad: 1 kernel_size: 3 1913 | # weight_filler { type: "gaussian" std: 0.01 } 1914 | # bias_filler { type: "constant" value: 0 }} } 1915 | # layer { 1916 | # bottom: "deconv3_2" top: "deconv3_2" name: "bn_deconv3_2" type: "BatchNorm" 1917 | # # # batch_norm_param {use_global_stats: true} 1918 | # } 1919 | 1920 | # layer { 1921 | # bottom: "deconv3_2" top: "deconv3_2" name: "scale_deconv3_2" type: "Scale" 1922 | # scale_param { bias_term: true} 1923 | # } 1924 | 1925 | # layer{ 1926 | # name: "deconv3_sum" type: "Eltwise" bottom:"deconv3_3b" bottom: "deconv3_2" top:"deconv3_sum" 1927 | # eltwise_param {operation: SUM } 1928 | # } 1929 | 1930 | # # layer { 1931 | # # bottom: "deconv3_sum" top: "deconv3_sum" name: "bn_deconv3_sum" type: "BatchNorm" 1932 | # # # # batch_norm_param {use_global_stats: true} 1933 | # # } 1934 | 1935 | # # layer { 1936 | # # bottom: "deconv3_sum" top: "deconv3_sum" name: "scale_deconv3_sum" type: "Scale" 1937 | # # scale_param { bias_term: true} 1938 | # # } 1939 | # layer { 1940 | # name: "relu3_sum" type: "ReLU" bottom: "deconv3_sum" top: "deconv3_sum" 1941 | # } 1942 | # #-------- deconv_2------------------------------------- 1943 | 1944 | # layer { type: "Unpooling" bottom: "deconv3_sum" bottom: "pool2_mask" top: "unpool2" name: "unpool2" 1945 | # unpooling_param { unpool: MAX kernel_size: 2 stride: 2 } 1946 | # } 1947 | 1948 | # layer { bottom: 'unpool2' top: 'deconv2_3b' name: 'deconv2_3b' type: "Deconvolution" 1949 | # param {lr_mult: 1} param {lr_mult: 2} 1950 | # convolution_param { engine:CUDNN num_output: 64 pad: 0 kernel_size: 1 1951 | # weight_filler { type: "gaussian" std: 0.01 } 1952 | # bias_filler { type: "constant" value: 0 }} } 1953 | # layer { 1954 | # bottom: "deconv2_3b" top: "deconv2_3b" name: "bn_deconv2_3b" type: "BatchNorm" 1955 | # # # batch_norm_param {use_global_stats: true} 1956 | # } 1957 | 1958 | # layer { 1959 | # bottom: "deconv2_3b" top: "deconv2_3b" name: "scale_deconv2_3b" type: "Scale" 1960 | # scale_param { bias_term: true} 1961 | # } 1962 | # layer { 1963 | # name: "relu_deconv2_3b" type: "ReLU" bottom: "deconv2_3b" top: "deconv2_3b" 1964 | # } 1965 | 1966 | # layer { bottom: 'unpool2' top: 'deconv2_3' name: 'deconv2_3' type: "Deconvolution" 1967 | # param {lr_mult: 1} param {lr_mult: 2} 1968 | # convolution_param {engine:CUDNN num_output: 64 pad: 1 kernel_size: 3 1969 | # weight_filler { type: "gaussian" std: 0.01 } 1970 | # bias_filler { type: "constant" value: 0 }} } 1971 | # layer { 1972 | # bottom: "deconv2_3" top: "deconv2_3" name: "bn_deconv2_3" type: "BatchNorm" 1973 | # # # batch_norm_param {use_global_stats: true} 1974 | # } 1975 | 1976 | # layer { 1977 | # bottom: "deconv2_3" top: "deconv2_3" name: "scale_deconv2_3" type: "Scale" 1978 | # scale_param { bias_term: true} 1979 | # } 1980 | # layer { 1981 | # name: "relu_deconv2_3" type: "ReLU" bottom: "deconv2_3" top: "deconv2_3" 1982 | # } 1983 | 1984 | # layer { bottom: 'deconv2_3' top: 'deconv2_2' name: 'deconv2_2' type: "Deconvolution" 1985 | # param {lr_mult: 1} param {lr_mult: 2} 1986 | # convolution_param { engine:CUDNN num_output: 64 pad: 1 kernel_size: 3 1987 | # weight_filler { type: "gaussian" std: 0.01 } 1988 | # bias_filler { type: "constant" value: 0 }} } 1989 | # layer { 1990 | # bottom: "deconv2_2" top: "deconv2_2" name: "bn_deconv2_2" type: "BatchNorm" 1991 | # # # batch_norm_param {use_global_stats: true} 1992 | # } 1993 | 1994 | # layer { 1995 | # bottom: "deconv2_2" top: "deconv2_2" name: "scale_deconv2_2" type: "Scale" 1996 | # scale_param { bias_term: true} 1997 | # } 1998 | 1999 | # layer{ 2000 | # name: "deconv2_sum" type: "Eltwise" bottom:"deconv2_3b" bottom: "deconv2_2" top:"deconv2_sum" 2001 | # eltwise_param {operation: SUM } 2002 | # } 2003 | 2004 | # # layer { 2005 | # # bottom: "deconv2_sum" top: "deconv2_sum" name: "bn_deconv2_sum" type: "BatchNorm" 2006 | # # # # batch_norm_param {use_global_stats: true} 2007 | # # } 2008 | 2009 | # # layer { 2010 | # # bottom: "deconv2_sum" top: "deconv2_sum" name: "scale_deconv2_sum" type: "Scale" 2011 | # # scale_param { bias_term: true} 2012 | # # } 2013 | # layer { 2014 | # name: "relu2_sum" type: "ReLU" bottom: "deconv2_sum" top: "deconv2_sum" 2015 | # } 2016 | 2017 | 2018 | # #-------- deconv_1------------------------------------- 2019 | 2020 | # #layer { type: "Unpooling" bottom: "deconv2_sum" bottom: "pool1_mask" top: "unpool1" name: "unpool1" 2021 | # # unpooling_param { unpool: MAX kernel_size: 2 stride: 2 } 2022 | # #} 2023 | 2024 | # # layer { bottom: 'unpool1' top: 'deconv1_3b' name: 'deconv1_3b' type: "Deconvolution" 2025 | # # param {lr_mult: 1} param {lr_mult: 2} 2026 | # # convolution_param { num_output: 64 pad: 0 kernel_size: 1 2027 | # # weight_filler { type: "gaussian" std: 0.01 } 2028 | # # bias_filler { type: "constant" value: 0 }} } 2029 | 2030 | 2031 | 2032 | # layer { bottom: 'deconv2_sum' top: 'deconv1_3b' name: 'deconv1_3b' type: "Deconvolution" 2033 | # param {lr_mult: 1} param {lr_mult: 2} 2034 | # convolution_param { engine:CUDNN num_output: 64 pad: 1 kernel_size: 4 stride:2 2035 | # weight_filler { type: "gaussian" std: 0.01 } 2036 | # bias_filler { type: "constant" value: 0 }} } 2037 | # layer { 2038 | # bottom: "deconv1_3b" top: "deconv1_3b" name: "bn_deconv1_3b" type: "BatchNorm" 2039 | # # # batch_norm_param {use_global_stats: true} 2040 | # } 2041 | 2042 | # layer { 2043 | # bottom: "deconv1_3b" top: "deconv1_3b" name: "scale_deconv1_3b" type: "Scale" 2044 | # scale_param { bias_term: true} 2045 | # } 2046 | # # layer { 2047 | # # name: "relu1_3b" type: "ReLU" bottom: "deconv1_3b" top: "deconv1_3b" } 2048 | 2049 | 2050 | # layer { bottom: 'deconv2_sum' top: 'deconv1_3' name: 'deconv1_3' type: "Deconvolution" 2051 | # param {lr_mult: 1} param {lr_mult: 2} 2052 | # convolution_param {engine:CUDNN num_output: 64 pad: 3 kernel_size: 8 stride:2 2053 | # weight_filler { type: "gaussian" std: 0.01 } 2054 | # bias_filler { type: "constant" value: 0 }} } 2055 | # layer { 2056 | # bottom: "deconv1_3" top: "deconv1_3" name: "bn_deconv1_3" type: "BatchNorm" 2057 | # # # batch_norm_param {use_global_stats: true} 2058 | # } 2059 | 2060 | # layer { 2061 | # bottom: "deconv1_3" top: "deconv1_3" name: "scale_deconv1_3" type: "Scale" 2062 | # scale_param { bias_term: true} 2063 | # } 2064 | # layer { 2065 | # name: "relu1_3" type: "ReLU" bottom: "deconv1_3" top: "deconv1_3" 2066 | # } 2067 | 2068 | 2069 | # layer { bottom: 'deconv1_3' top: 'deconv1_2' name: 'deconv1_2' type: "Deconvolution" 2070 | # param {lr_mult: 1} param {lr_mult: 2} 2071 | # convolution_param { engine:CUDNN num_output: 64 pad: 1 kernel_size: 3 2072 | # weight_filler { type: "gaussian" std: 0.01 } 2073 | # bias_filler { type: "constant" value: 0 }} } 2074 | 2075 | # layer { 2076 | # bottom: "deconv1_2" top: "deconv1_2" name: "bn_deconv1_2" type: "BatchNorm" 2077 | # # # batch_norm_param {use_global_stats: true} 2078 | # } 2079 | 2080 | # layer { 2081 | # bottom: "deconv1_2" top: "deconv1_2" name: "scale_deconv1_2" type: "Scale" 2082 | # scale_param { bias_term: true} 2083 | # } 2084 | 2085 | # layer{ 2086 | # name: "deconv1_sum" type: "Eltwise" bottom:"deconv1_2" bottom: "deconv1_3b" top:"deconv1_sum" 2087 | # eltwise_param {operation: SUM } 2088 | # } 2089 | 2090 | # # layer { 2091 | # # bottom: "deconv1_sum" top: "deconv1_sum" name: "bn_deconv1_sum" type: "BatchNorm" 2092 | # # # # batch_norm_param {use_global_stats: true} 2093 | # # } 2094 | 2095 | # # layer { 2096 | # # bottom: "deconv1_sum" top: "deconv1_sum" name: "scale_deconv1_sum" type: "Scale" 2097 | # # scale_param { bias_term: true} 2098 | # # } 2099 | # layer { 2100 | # name: "relu1_sum" type: "ReLU" bottom: "deconv1_sum" top: "deconv1_sum" 2101 | # } 2102 | 2103 | 2104 | # # layer { name: 'seg_score' type: "Deconvolution" bottom: 'deconv1_sum' top: 'seg_score' 2105 | # # param {lr_mult: 1} param {lr_mult: 2} 2106 | # # convolution_param { num_output: 2 kernel_size: 4 stride:2 2107 | # # weight_filler { type: "gaussian" std: 0.01 } 2108 | # # bias_filler { type: "constant" value: 0 }} } 2109 | 2110 | 2111 | 2112 | # layer { name: 'seg_score' type: "Convolution" bottom: 'deconv1_sum' top: 'seg_score' 2113 | # param {lr_mult: 1} param {lr_mult: 2} 2114 | # convolution_param { num_output: 2 kernel_size: 1 2115 | # weight_filler { type: "gaussian" std: 0.01 } 2116 | # bias_filler { type: "constant" value: 0 }} } 2117 | 2118 | 2119 | 2120 | 2121 | # ----------------------------------deconv once--------------------------- 2122 | layer{ 2123 | name: "deconv5_16x" 2124 | type: "Deconvolution" 2125 | bottom: "conv5_sum" 2126 | top: "deconv5_16x" 2127 | param {lr_mult: 1} param {lr_mult: 2} 2128 | convolution_param { 2129 | num_output: 2 kernel_size: 32 stride: 16 pad:8 2130 | weight_filler {type: "bilinear"} 2131 | } 2132 | } 2133 | 2134 | 2135 | # ============================ Dilated multiple upsampling as classifier for 2x 4x and 8x , leave 16x for single deconvolution ============================== 2136 | layer{ 2137 | name: "deconv2_2x_d1" 2138 | type: "Deconvolution" 2139 | bottom: "concat_stem_1" 2140 | top: "deconv2_2x_d1" 2141 | param {lr_mult: 1} param {lr_mult: 2} 2142 | convolution_param { 2143 | num_output: 2 kernel_size: 4 stride: 2 pad:1 2144 | weight_filler {type: "bilinear"} 2145 | } 2146 | } 2147 | 2148 | layer { 2149 | name: "fc1_2x_c0" 2150 | type: "Deconvolution" 2151 | bottom: "concat_stem_1" 2152 | top: "deconv2_2x_c0" 2153 | param { 2154 | name: "fc1_2x_c0_w" 2155 | lr_mult: 1 2156 | decay_mult: 1 2157 | } 2158 | param { 2159 | name: "fc1_2x_c0_b" 2160 | lr_mult: 2 2161 | decay_mult: 0 2162 | } 2163 | convolution_param { 2164 | num_output: 2 2165 | kernel_size: 4 2166 | stride: 2 2167 | pad: 7 2168 | dilation:5 2169 | weight_filler { 2170 | type: "gaussian" 2171 | std: 0.01 2172 | } 2173 | bias_filler { 2174 | type: "constant" 2175 | value: 0 2176 | } 2177 | } 2178 | } 2179 | 2180 | layer { 2181 | name: "fc1_2x_c1" 2182 | type: "Deconvolution" 2183 | bottom: "concat_stem_1" 2184 | top: "deconv2_2x_c1" 2185 | param { 2186 | name: "fc1_2x_c1_w" 2187 | lr_mult: 1 2188 | decay_mult: 1 2189 | } 2190 | param { 2191 | name: "fc1_2x_c1_b" 2192 | lr_mult: 2 2193 | decay_mult: 0 2194 | } 2195 | convolution_param { 2196 | num_output: 2 2197 | kernel_size: 4 2198 | stride: 2 2199 | pad: 16 2200 | dilation: 11 2201 | weight_filler { 2202 | type: "gaussian" 2203 | std: 0.01 2204 | } 2205 | bias_filler { 2206 | type: "constant" 2207 | value: 0 2208 | } 2209 | } 2210 | } 2211 | 2212 | layer { 2213 | name: "fc1_2x_c2" 2214 | type: "Deconvolution" 2215 | bottom: "concat_stem_1" 2216 | top: "deconv2_2x_c2" 2217 | param { 2218 | name: "fc1_2x_c2_w" 2219 | lr_mult: 1 2220 | decay_mult: 1 2221 | } 2222 | param { 2223 | name: "fc1_2x_c2_b" 2224 | lr_mult: 2 2225 | decay_mult: 0 2226 | } 2227 | convolution_param { 2228 | num_output: 2 2229 | kernel_size: 4 2230 | stride: 2 2231 | pad: 25 2232 | dilation: 17 2233 | weight_filler { 2234 | type: "gaussian" 2235 | std: 0.01 2236 | } 2237 | bias_filler { 2238 | type: "constant" 2239 | value: 0 2240 | } 2241 | } 2242 | } 2243 | 2244 | layer { 2245 | name: "fc1_2x_c3" 2246 | type: "Deconvolution" 2247 | bottom: "concat_stem_1" 2248 | top: "deconv2_2x_c3" 2249 | param { 2250 | name: "fc1_2x_c3_w" 2251 | lr_mult: 1 2252 | decay_mult: 1 2253 | } 2254 | param { 2255 | name: "fc1_2x_c3_b" 2256 | lr_mult: 2 2257 | decay_mult: 0 2258 | } 2259 | convolution_param { 2260 | num_output: 2 2261 | kernel_size: 4 2262 | stride: 2 2263 | pad: 34 2264 | dilation: 23 2265 | weight_filler { 2266 | type: "gaussian" 2267 | std: 0.01 2268 | } 2269 | bias_filler { 2270 | type: "constant" 2271 | value: 0 2272 | } 2273 | } 2274 | } 2275 | 2276 | ### SUM the all branches 2277 | 2278 | layer { 2279 | bottom: "deconv2_2x_d1" 2280 | bottom: "deconv2_2x_c0" 2281 | bottom: "deconv2_2x_c1" 2282 | bottom: "deconv2_2x_c2" 2283 | bottom: "deconv2_2x_c3" 2284 | top: "deconv2_2x" 2285 | name: "fc1_2x" 2286 | type: "Eltwise" 2287 | eltwise_param { 2288 | operation: SUM 2289 | } 2290 | } 2291 | 2292 | 2293 | layer { 2294 | name: "fc1_4x_c0" 2295 | type: "Deconvolution" 2296 | bottom: "conv3_sum" 2297 | top: "deconv3_4x_c0" 2298 | param { 2299 | name: "fc1_4x_c0_w" 2300 | lr_mult: 1 2301 | decay_mult: 1 2302 | } 2303 | param { 2304 | name: "fc1_4x_c0_b" 2305 | lr_mult: 2 2306 | decay_mult: 0 2307 | } 2308 | convolution_param { 2309 | num_output: 2 2310 | kernel_size: 8 2311 | stride: 4 2312 | pad: 16 2313 | dilation:5 2314 | weight_filler { 2315 | type: "gaussian" 2316 | std: 0.01 2317 | } 2318 | bias_filler { 2319 | type: "constant" 2320 | value: 0 2321 | } 2322 | } 2323 | } 2324 | #--------------------4x upsampling & classfier--------------------- 2325 | layer{ 2326 | name: "deconv3_4x_d1" 2327 | type: "Deconvolution" 2328 | bottom: "conv3_sum" 2329 | top: "deconv3_4x_d1" 2330 | param {lr_mult: 1} param {lr_mult: 2} 2331 | convolution_param { 2332 | num_output: 2 kernel_size: 8 stride: 4 pad:2 2333 | weight_filler {type: "bilinear"} 2334 | } 2335 | } 2336 | 2337 | layer { 2338 | name: "fc1_4x_c1" 2339 | type: "Deconvolution" 2340 | bottom: "conv3_sum" 2341 | top: "deconv3_4x_c1" 2342 | param { 2343 | name: "fc1_4x_c1_w" 2344 | lr_mult: 1 2345 | decay_mult: 1 2346 | } 2347 | param { 2348 | name: "fc1_4x_c1_b" 2349 | lr_mult: 2 2350 | decay_mult: 0 2351 | } 2352 | convolution_param { 2353 | num_output: 2 2354 | kernel_size: 8 2355 | stride: 4 2356 | pad: 37 2357 | dilation: 11 2358 | weight_filler { 2359 | type: "gaussian" 2360 | std: 0.01 2361 | } 2362 | bias_filler { 2363 | type: "constant" 2364 | value: 0 2365 | } 2366 | } 2367 | } 2368 | 2369 | layer { 2370 | name: "fc1_4x_c2" 2371 | type: "Deconvolution" 2372 | bottom: "conv3_sum" 2373 | top: "deconv3_4x_c2" 2374 | param { 2375 | name: "fc1_4x_c2_w" 2376 | lr_mult: 1 2377 | decay_mult: 1 2378 | } 2379 | param { 2380 | name: "fc1_4x_c2_b" 2381 | lr_mult: 2 2382 | decay_mult: 0 2383 | } 2384 | convolution_param { 2385 | num_output: 2 2386 | kernel_size: 8 2387 | stride: 4 2388 | pad: 58 2389 | dilation: 17 2390 | weight_filler { 2391 | type: "gaussian" 2392 | std: 0.01 2393 | } 2394 | bias_filler { 2395 | type: "constant" 2396 | value: 0 2397 | } 2398 | } 2399 | } 2400 | 2401 | layer { 2402 | name: "fc1_4x_c3" 2403 | type: "Deconvolution" 2404 | bottom: "conv3_sum" 2405 | top: "deconv3_4x_c3" 2406 | param { 2407 | name: "fc1_4x_c3_w" 2408 | lr_mult: 1 2409 | decay_mult: 1 2410 | } 2411 | param { 2412 | name: "fc1_4x_c3_b" 2413 | lr_mult: 2 2414 | decay_mult: 0 2415 | } 2416 | convolution_param { 2417 | num_output: 2 2418 | kernel_size: 8 2419 | stride: 4 2420 | pad: 79 2421 | dilation: 23 2422 | weight_filler { 2423 | type: "gaussian" 2424 | std: 0.01 2425 | } 2426 | bias_filler { 2427 | type: "constant" 2428 | value: 0 2429 | } 2430 | } 2431 | } 2432 | 2433 | 2434 | layer { 2435 | bottom: "deconv3_4x_d1" 2436 | bottom: "deconv3_4x_c0" 2437 | bottom: "deconv3_4x_c1" 2438 | bottom: "deconv3_4x_c2" 2439 | bottom: "deconv3_4x_c3" 2440 | top: "deconv3_4x" 2441 | name: "fc1_4x" 2442 | type: "Eltwise" 2443 | eltwise_param { 2444 | operation: SUM 2445 | } 2446 | } 2447 | 2448 | 2449 | 2450 | 2451 | 2452 | 2453 | 2454 | 2455 | 2456 | 2457 | 2458 | 2459 | #------------------------8x upsampling ----------------------- 2460 | layer{ 2461 | name: "deconv4_8x_d1" 2462 | type: "Deconvolution" 2463 | bottom: "conv4_sum" 2464 | top: "deconv4_8x_d1" 2465 | param {lr_mult: 1} param {lr_mult: 2} 2466 | convolution_param { 2467 | num_output: 2 kernel_size: 16 stride: 8 pad:4 2468 | weight_filler {type: "bilinear"} 2469 | } 2470 | } 2471 | layer { 2472 | name: "fc1_8x_c0" 2473 | type: "Deconvolution" 2474 | bottom: "conv4_sum" 2475 | top: "deconv4_8x_c0" 2476 | param { 2477 | name: "fc1_8x_c0_w" 2478 | lr_mult: 1 2479 | decay_mult: 1 2480 | } 2481 | param { 2482 | name: "fc1_8x_c0_b" 2483 | lr_mult: 2 2484 | decay_mult: 0 2485 | } 2486 | convolution_param { 2487 | num_output: 2 2488 | kernel_size: 16 2489 | stride: 8 2490 | pad: 34 2491 | dilation: 5 2492 | weight_filler { 2493 | type: "gaussian" 2494 | std: 0.01 2495 | } 2496 | bias_filler { 2497 | type: "constant" 2498 | value: 0 2499 | } 2500 | } 2501 | } 2502 | 2503 | 2504 | layer { 2505 | name: "fc1_8x_c1" 2506 | type: "Deconvolution" 2507 | bottom: "conv4_sum" 2508 | top: "deconv4_8x_c1" 2509 | param { 2510 | name: "fc1_8x_c1_w" 2511 | lr_mult: 1 2512 | decay_mult: 1 2513 | } 2514 | param { 2515 | name: "fc1_8x_c1_b" 2516 | lr_mult: 2 2517 | decay_mult: 0 2518 | } 2519 | convolution_param { 2520 | num_output: 2 2521 | kernel_size: 16 2522 | stride: 8 2523 | pad: 79 2524 | dilation: 11 2525 | weight_filler { 2526 | type: "gaussian" 2527 | std: 0.01 2528 | } 2529 | bias_filler { 2530 | type: "constant" 2531 | value: 0 2532 | } 2533 | } 2534 | } 2535 | 2536 | layer { 2537 | name: "fc1_8x_c2" 2538 | type: "Deconvolution" 2539 | bottom: "conv4_sum" 2540 | top: "deconv4_8x_c2" 2541 | param { 2542 | name: "fc1_8x_c2_w" 2543 | lr_mult: 1 2544 | decay_mult: 1 2545 | } 2546 | param { 2547 | name: "fc1_8x_c2_b" 2548 | lr_mult: 2 2549 | decay_mult: 0 2550 | } 2551 | convolution_param { 2552 | num_output: 2 2553 | kernel_size: 16 2554 | stride: 8 2555 | pad: 124 2556 | dilation: 17 2557 | weight_filler { 2558 | type: "gaussian" 2559 | std: 0.01 2560 | } 2561 | bias_filler { 2562 | type: "constant" 2563 | value: 0 2564 | } 2565 | } 2566 | } 2567 | 2568 | 2569 | layer { 2570 | name: "fc1_8x_c3" 2571 | type: "Deconvolution" 2572 | bottom: "conv4_sum" 2573 | top: "deconv4_8x_c3" 2574 | param { 2575 | name: "fc1_8x_c3_w" 2576 | lr_mult: 1 2577 | decay_mult: 1 2578 | } 2579 | param { 2580 | name: "fc1_8x_c3_b" 2581 | lr_mult: 2 2582 | decay_mult: 0 2583 | } 2584 | convolution_param { 2585 | num_output: 2 2586 | kernel_size: 16 2587 | stride: 8 2588 | pad: 169 2589 | dilation: 23 2590 | weight_filler { 2591 | type: "gaussian" 2592 | std: 0.01 2593 | } 2594 | bias_filler { 2595 | type: "constant" 2596 | value: 0 2597 | } 2598 | } 2599 | } 2600 | 2601 | 2602 | layer { 2603 | bottom: "deconv4_8x_d1" 2604 | bottom: "deconv4_8x_c0" 2605 | bottom: "deconv4_8x_c1" 2606 | bottom: "deconv4_8x_c2" 2607 | bottom: "deconv4_8x_c3" 2608 | top: "deconv4_8x" 2609 | name: "fc1_8x" 2610 | type: "Eltwise" 2611 | eltwise_param { 2612 | operation: SUM 2613 | } 2614 | } 2615 | 2616 | layer { 2617 | bottom: "deconv5_16x" top: "deconv5_16x" name: "bn_deconv5_16x" type: "BatchNorm" 2618 | } 2619 | 2620 | layer { 2621 | bottom: "deconv5_16x" top: "deconv5_16x" name: "scale_deconv5_16x" type: "Scale" 2622 | scale_param { bias_term: true} 2623 | } 2624 | layer { name: "relu_deconv5_16x" type: "ReLU" bottom: "deconv5_16x" top: "deconv5_16x"} 2625 | 2626 | layer { 2627 | bottom: "deconv4_8x" top: "deconv4_8x" name: "bn_deconv4_8x" type: "BatchNorm" 2628 | } 2629 | 2630 | layer { 2631 | bottom: "deconv4_8x" top: "deconv4_8x" name: "scale_deconv4_8x" type: "Scale" 2632 | scale_param { bias_term: true} 2633 | } 2634 | layer { name: "relu_deconv4_8x" type: "ReLU" bottom: "deconv4_8x" top: "deconv4_8x"} 2635 | 2636 | layer { 2637 | bottom: "deconv3_4x" top: "deconv3_4x" name: "bn_deconv3_4x" type: "BatchNorm" 2638 | } 2639 | 2640 | layer { 2641 | bottom: "deconv3_4x" top: "deconv3_4x" name: "scale_deconv3_4x" type: "Scale" 2642 | scale_param { bias_term: true} 2643 | } 2644 | layer { name: "relu_deconv3_4x" type: "ReLU" bottom: "deconv3_4x" top: "deconv3_4x"} 2645 | 2646 | layer { 2647 | bottom: "deconv2_2x" top: "deconv2_2x" name: "bn_deconv2_2x" type: "BatchNorm" 2648 | } 2649 | 2650 | layer { 2651 | bottom: "deconv2_2x" top: "deconv2_2x" name: "scale_deconv2_2x" type: "Scale" 2652 | scale_param { bias_term: true} 2653 | } 2654 | layer { name: "relu_deconv2_2x" type: "ReLU" bottom: "deconv2_2x" top: "deconv2_2x"} 2655 | 2656 | layer { 2657 | name: "conv_deconv5_16x" 2658 | type: "Convolution" 2659 | bottom: "deconv5_16x" 2660 | top: "conv_deconv5_16x" 2661 | phase:PREDICT 2662 | param {lr_mult: 1} param {lr_mult: 2} 2663 | convolution_param { 2664 | num_output: 2 kernel_size: 3 stride: 1 pad: 1 2665 | weight_filler {type: "gaussian" std: 0.001} 2666 | bias_filler {type: "constant"value: 0} 2667 | } 2668 | } 2669 | 2670 | 2671 | 2672 | 2673 | 2674 | 2675 | layer { 2676 | name: "conv_deconv4_8x" 2677 | type: "Convolution" 2678 | bottom: "deconv4_8x" 2679 | top: "conv_deconv4_8x" 2680 | phase:PREDICT 2681 | param {lr_mult: 1} param {lr_mult: 2} 2682 | convolution_param { 2683 | num_output: 2 kernel_size: 3 stride: 1 pad: 1 2684 | weight_filler {type: "gaussian" std: 0.001} 2685 | bias_filler {type: "constant"value: 0} 2686 | } 2687 | } 2688 | 2689 | layer { 2690 | name: "conv_deconv3_4x" 2691 | type: "Convolution" 2692 | bottom: "deconv3_4x" 2693 | top: "conv_deconv3_4x" 2694 | phase:PREDICT 2695 | param {lr_mult: 1} param {lr_mult: 2} 2696 | convolution_param { 2697 | num_output: 2 kernel_size: 3 stride: 1 pad:1 2698 | weight_filler {type: "gaussian" std: 0.001} 2699 | bias_filler {type: "constant"value: 0} 2700 | } 2701 | } 2702 | 2703 | layer { 2704 | name: "conv_deconv2_2x" 2705 | type: "Convolution" 2706 | bottom: "deconv2_2x" 2707 | top: "conv_deconv2_2x" 2708 | phase:PREDICT 2709 | param {lr_mult: 1} param {lr_mult: 2} 2710 | convolution_param { 2711 | num_output: 2 kernel_size: 3 stride: 1 pad:1 2712 | weight_filler {type: "gaussian" std: 0.001} 2713 | bias_filler {type: "constant"value: 0} 2714 | } 2715 | } 2716 | layer { 2717 | bottom:"conv_deconv5_16x" top:"conv_deconv5_16x" name: "bn_conv_deconv5_16x" type: "BatchNorm" 2718 | } 2719 | layer { 2720 | bottom:"conv_deconv5_16x" top:"conv_deconv5_16x" name: "scale_conv_deconv5_16x" type: "Scale" 2721 | scale_param { bias_term: true} 2722 | } 2723 | layer { name:"relu_conv_deconv5_16x" type: "ReLU" bottom:"conv_deconv5_16x" top:"conv_deconv5_16x"} 2724 | 2725 | 2726 | layer { 2727 | bottom:"conv_deconv4_8x" top:"conv_deconv4_8x" name: "bn_conv_deconv4_8x" type: "BatchNorm" 2728 | } 2729 | layer { 2730 | bottom:"conv_deconv4_8x" top:"conv_deconv4_8x" name: "scale_conv_deconv4_8x" type: "Scale" 2731 | scale_param { bias_term: true} 2732 | } 2733 | layer { name:"relu_conv_deconv4_8x" type: "ReLU" bottom:"conv_deconv4_8x" top:"conv_deconv4_8x"} 2734 | 2735 | 2736 | layer { 2737 | bottom:"conv_deconv3_4x" top:"conv_deconv3_4x" name: "bn_conv_deconv3_4x" type: "BatchNorm" 2738 | } 2739 | layer { 2740 | bottom:"conv_deconv3_4x" top:"conv_deconv3_4x" name: "scale_conv_deconv3_4x" type: "Scale" 2741 | scale_param { bias_term: true} 2742 | } 2743 | layer { name:"relu_conv_deconv3_4x" type: "ReLU" bottom:"conv_deconv3_4x" top:"conv_deconv3_4x"} 2744 | 2745 | layer { 2746 | bottom:"conv_deconv2_2x" top:"conv_deconv2_2x" name: "bn_conv_deconv2_2x" type: "BatchNorm" 2747 | } 2748 | layer { 2749 | bottom:"conv_deconv2_2x" top:"conv_deconv2_2x" name: "scale_conv_deconv2_2x" type: "Scale" 2750 | scale_param { bias_term: true} 2751 | } 2752 | layer { name:"relu_conv_deconv2_2x" type: "ReLU" bottom:"conv_deconv2_2x" top:"conv_deconv2_2x"} 2753 | 2754 | # layer { 2755 | # name: "relu1_3b" type: "ReLU" bottom: "deconv1_3b" top: "deconv1_3b" } 2756 | 2757 | 2758 | 2759 | 2760 | layer{ 2761 | name: "deconv_all_sum" 2762 | type: "Eltwise" 2763 | bottom: "conv_deconv2_2x" 2764 | bottom: "conv_deconv3_4x" 2765 | bottom: "conv_deconv4_8x" 2766 | bottom: "conv_deconv5_16x" 2767 | # bottom: "de_deconv4_8x" 2768 | #bottom: "de_deconv3_4x" 2769 | #bottom: "de_deconv2_2x" 2770 | phase:PREDICT 2771 | top:"deconv_all_sum" 2772 | eltwise_param{ 2773 | operation: SUM 2774 | coeff: 1 2775 | coeff: 1 2776 | coeff: 1 2777 | coeff: 1 2778 | } 2779 | 2780 | } 2781 | 2782 | layer{ 2783 | name:"softmaxout" 2784 | type: "Softmax" 2785 | bottom:"deconv_all_sum" 2786 | top:"prob" 2787 | } 2788 | --------------------------------------------------------------------------------