├── README.md ├── amitpc-HP-Z620-Workstation_1454011145_1.dat ├── caffe-scripts ├── nuclei │ ├── cifar_on_nuclei.prototxt │ ├── cifar_on_nuclei_solver.prototxt │ ├── deploy.prototxt │ ├── multi_class_nuclei │ │ ├── multi_class_nuclei_1_iter_10000.caffemodel │ │ ├── multi_class_nuclei_1_iter_10000.solverstate │ │ ├── multi_class_nuclei_solver.prototxt │ │ ├── multi_class_nuclei_train_test.prototxt │ │ ├── train_multi_class_nuclei.sh │ │ └── use_multi_class │ │ │ ├── deploy.prototxt │ │ │ ├── train_use_multi_class.sh │ │ │ ├── use_multi_class_nuclei_big_1_iter_51647.caffemodel │ │ │ ├── use_multi_class_nuclei_big_1_iter_51647.solverstate │ │ │ ├── use_multi_class_solver.prototxt │ │ │ └── use_multi_class_test_train.prototxt │ ├── nuclei_quick_relative_iter_60000.caffemodel │ ├── nuclei_quick_relative_iter_60000.solverstate │ ├── nuclei_quick_solver.prototxt │ ├── nuclei_quick_train_test.prototxt │ ├── three_class │ │ ├── arch_1 │ │ │ ├── output_1.txt │ │ │ ├── three_class_nuclei_snap_iter_120000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_120000.solverstate │ │ │ ├── three_class_nuclei_snap_iter_30000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_30000.solverstate │ │ │ ├── three_class_nuclei_snap_iter_60000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_60000.solverstate │ │ │ ├── three_class_nuclei_snap_iter_90000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_90000.solverstate │ │ │ ├── three_class_nuclei_solver.prototxt │ │ │ ├── three_class_nuclei_train_test.prototxt │ │ │ └── train_three_class_nuclei.sh │ │ ├── arch_2 │ │ │ ├── output_1.txt │ │ │ ├── three_class_nuclei_snap_iter_120000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_120000.solverstate │ │ │ ├── three_class_nuclei_snap_iter_30000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_30000.solverstate │ │ │ ├── three_class_nuclei_snap_iter_60000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_60000.solverstate │ │ │ ├── three_class_nuclei_snap_iter_90000.caffemodel │ │ │ ├── three_class_nuclei_snap_iter_90000.solverstate │ │ │ ├── three_class_nuclei_solver.prototxt │ │ │ ├── three_class_nuclei_train_test.prototxt │ │ │ └── train_three_class_nuclei.sh │ │ ├── arch_3 │ │ │ ├── output_1.txt │ │ │ ├── solver.prototxt │ │ │ ├── train_test.prototxt │ │ │ └── train_test.sh │ │ └── arch_4 │ │ │ ├── output_1.txt │ │ │ ├── snap_iter_10000.caffemodel │ │ │ ├── snap_iter_10000.solverstate │ │ │ ├── snap_iter_20000.caffemodel │ │ │ ├── snap_iter_20000.solverstate │ │ │ ├── snap_iter_30000.caffemodel │ │ │ ├── snap_iter_30000.solverstate │ │ │ ├── solver.prototxt │ │ │ ├── train_test.prototxt │ │ │ └── train_test.sh │ ├── train_cifar │ │ ├── 92_accu_use_cifar_model │ │ │ ├── use_cifar_2_iter_10000.caffemodel │ │ │ ├── use_cifar_2_iter_10000.solverstate │ │ │ ├── use_cifar_2_iter_20000.caffemodel │ │ │ └── use_cifar_2_iter_20000.solverstate │ │ ├── cifar_nuclei_quick1_iter_40000.caffemodel │ │ ├── cifar_nuclei_quick1_iter_40000.solverstate │ │ ├── cifar_nuclei_solver.prototxt │ │ ├── cifar_nuclei_train_test.prototxt │ │ ├── deploy.prototxt │ │ ├── deploy1.prototxt │ │ ├── train_quick.sh │ │ ├── train_use_cifar.sh │ │ ├── use_cifar_solver.prototxt │ │ └── use_cifar_train_test.prototxt │ ├── train_cifar_on_nuclei.sh │ └── train_quick.sh └── predict.py ├── data ├── testing-data │ ├── 20x │ │ ├── PrognosisTMABlock1_A_3_1_H&E.jpg │ │ ├── tmp │ │ │ ├── 1.png │ │ │ ├── 2.png │ │ │ └── 3.png │ │ ├── tmp_20x │ │ │ ├── 1.png │ │ │ ├── 2.png │ │ │ └── 3.png │ │ ├── tmp_40x │ │ │ ├── 1.png │ │ │ ├── 2.png │ │ │ └── 3.png │ │ └── tmp_old │ │ │ ├── 1.png │ │ │ ├── 2.png │ │ │ └── 3.png │ └── 40x │ │ ├── 63_LLM_YR4_cropped.jpg │ │ ├── 81_LLM_YR4.jpg │ │ ├── 84_LLM_YR4.jpg │ │ ├── 84_LLM_YR4 │ │ ├── 1.png │ │ ├── 2.png │ │ ├── 3.png │ │ └── 84_LLM_YR4.jpg │ │ ├── 84_LLM_YR4_002.tif │ │ ├── 89_LLM_YR4.jpg │ │ ├── 92_LLM_YR4.jpg │ │ └── tmp │ │ ├── 1.png │ │ ├── 2.png │ │ ├── 3.png │ │ └── 84_LLM_YR4_002.png └── training-data │ ├── 20x │ ├── 2px │ │ ├── PrognosisTMABlock1_A_3_1_H&E.jpg │ │ ├── PrognosisTMABlock1_E_4_5_H&E.jpg │ │ ├── PrognosisTMABlock3_A_2_1_H&E.jpg │ │ ├── PrognosisTMABlock3_A_2_1_H&E_1.jpg │ │ ├── color-normalized │ │ │ ├── PrognosisTMABlock1_A_1_1_H&E.png │ │ │ ├── PrognosisTMABlock1_A_1_4_H&E.png │ │ │ ├── PrognosisTMABlock1_A_3_3_H&E.png │ │ │ ├── PrognosisTMABlock1_A_4_1_H&E.png │ │ │ ├── PrognosisTMABlock1_B_2_2_H&E.png │ │ │ ├── PrognosisTMABlock1_C_3_4_H&E.jpg │ │ │ ├── PrognosisTMABlock1_E_4_5_H&E.png │ │ │ ├── PrognosisTMABlock3_A_2_1_H&E.png │ │ │ ├── PrognosisTMABlock3_A_2_1_H&E_1.png │ │ │ ├── TM_PrognosisTMABlock1_A_1_1_H&E.png │ │ │ ├── TM_PrognosisTMABlock1_A_1_4_H&E.png │ │ │ ├── TM_PrognosisTMABlock1_A_3_3_H&E.png │ │ │ ├── TM_PrognosisTMABlock1_A_4_1_H&E.png │ │ │ ├── TM_PrognosisTMABlock1_B_2_2_H&E.png │ │ │ ├── TM_PrognosisTMABlock1_C_3_4_H&E.png │ │ │ ├── TM_PrognosisTMABlock1_E_4_5_H&E.png │ │ │ ├── TM_PrognosisTMABlock3_A_2_1_H&E.png │ │ │ └── TM_PrognosisTMABlock3_A_2_1_H&E_1.png │ │ ├── tm_PrognosisTMABlock1_A_3_1_H&E.png │ │ ├── tm_PrognosisTMABlock1_E_4_5_H&E.png │ │ ├── tm_PrognosisTMABlock3_A_2_1_H&E.png │ │ └── tm_PrognosisTMABlock3_A_2_1_H&E_1.png │ └── 4px │ │ ├── PrognosisTMABlock1_A_3_1_H&E.jpg │ │ ├── PrognosisTMABlock1_E_4_5_H&E.jpg │ │ ├── PrognosisTMABlock3_A_2_1_H&E.jpg │ │ ├── maskWithBoundaryPrognosisTMABlock1_A_3_1_H&E.png │ │ ├── tm_PrognosisTMABlock1_E_4_5_H&E.png │ │ └── tm_PrognosisTMABlock3_A_2_1_H&E.png │ ├── 40x │ ├── 63_LLM_YR4.jpg │ ├── 78_RLM_YR4.jpg │ ├── bm_63_LLM_YR4.png │ ├── bm_78_RLM_YR4.png │ ├── tm_63_LLM_YR4.png │ └── tm_78_RLM_YR4.png │ ├── caffe_data_prep.py │ ├── mat_to_torch.lua │ ├── theano_data_prep.py │ └── torch_data_prep.lua ├── predict_full_mask.lua └── torch-scripts ├── cnn_train_test_valid.lua ├── cnn_train_test_valid_old.lua ├── layer_architecture.txt ├── predict.lua ├── predict_full.lua ├── predict_full_mask.lua └── three_class └── arch_1 └── three_class_nuclei_train_test.lua /README.md: -------------------------------------------------------------------------------- 1 | # Nuclei Segmentation 2 | Following are the instructions to test our state-of-the art deep learning based nuclei segmentation software using an AWS EC2 instance- 3 | 4 | Step 1 5 | --- 6 | Create an instance and configure it for using CUDA enabled Torch (refer to https://drive.google.com/file/d/0ByERBiBsEbuTUS0wdWQ2NUZGTm8/view) 7 | 8 | Step 2 9 | --- 10 | Get the software from Github 11 | 12 | git clone https://github.com/neerajkumarvaid/NucleiSegmentation 13 | --- 14 | Step 3 15 | --- 16 | Test our state-of-the art nuclei segmentation model 17 | 18 | cd NucleiSegmentation 19 | --- 20 | th predict_full_mask.lua 21 | --- 22 | Results will be saved in the /data/testing-data/40x/results folder 23 | 24 | Step 4 25 | --- 26 | Zip the results folder and download in your laptop's "Downloads" folder 27 | 28 | zip -r results.zip results 29 | --- 30 | scp –i key.pem user@ip:~/NucleiSegmentation/data/testing-data/40x/results.zip ~/Downloads/ 31 | --- 32 | This will give you three images from CNN output 1.png, 2.png and 3.png. Please refer to nucleisegmentationbenchmark.weebly.com for post-processing and model details. 33 | 34 | Support 35 | --- 36 | If you found this useful, please consider starring(★) the repo so that it can reach a broader audience. 37 | -------------------------------------------------------------------------------- /amitpc-HP-Z620-Workstation_1454011145_1.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/amitpc-HP-Z620-Workstation_1454011145_1.dat -------------------------------------------------------------------------------- /caffe-scripts/nuclei/cifar_on_nuclei.prototxt: -------------------------------------------------------------------------------- 1 | name: "CIFAR_ON_NULCEI" 2 | layer { 3 | name: "nuclei" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | image_data_param { 11 | source: "/home/sanuj/temp_63_LLM_YR4/train.txt" 12 | batch_size: 100 13 | } 14 | } 15 | layer { 16 | name: "nuclei" 17 | type: "ImageData" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | image_data_param { 24 | source: "/home/sanuj/temp_63_LLM_YR4/63_LLM_YR4_6/meta.txt" 25 | batch_size: 100 26 | } 27 | } 28 | layer { 29 | name: "conv1" 30 | type: "Convolution" 31 | bottom: "data" 32 | top: "conv1" 33 | param { 34 | lr_mult: 1 35 | } 36 | param { 37 | lr_mult: 2 38 | } 39 | convolution_param { 40 | num_output: 32 41 | pad: 2 42 | kernel_size: 5 43 | stride: 1 44 | weight_filler { 45 | type: "gaussian" 46 | std: 0.0001 47 | } 48 | bias_filler { 49 | type: "constant" 50 | } 51 | } 52 | } 53 | layer { 54 | name: "pool1" 55 | type: "Pooling" 56 | bottom: "conv1" 57 | top: "pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 3 61 | stride: 2 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "pool1" 68 | top: "pool1" 69 | } 70 | layer { 71 | name: "conv2" 72 | type: "Convolution" 73 | bottom: "pool1" 74 | top: "conv2" 75 | param { 76 | lr_mult: 1 77 | } 78 | param { 79 | lr_mult: 2 80 | } 81 | convolution_param { 82 | num_output: 32 83 | pad: 2 84 | kernel_size: 5 85 | stride: 1 86 | weight_filler { 87 | type: "gaussian" 88 | std: 0.01 89 | } 90 | bias_filler { 91 | type: "constant" 92 | } 93 | } 94 | } 95 | layer { 96 | name: "relu2" 97 | type: "ReLU" 98 | bottom: "conv2" 99 | top: "conv2" 100 | } 101 | layer { 102 | name: "pool2" 103 | type: "Pooling" 104 | bottom: "conv2" 105 | top: "pool2" 106 | pooling_param { 107 | pool: AVE 108 | kernel_size: 3 109 | stride: 2 110 | } 111 | } 112 | layer { 113 | name: "conv3" 114 | type: "Convolution" 115 | bottom: "pool2" 116 | top: "conv3" 117 | param { 118 | lr_mult: 1 119 | } 120 | param { 121 | lr_mult: 2 122 | } 123 | convolution_param { 124 | num_output: 64 125 | pad: 2 126 | kernel_size: 5 127 | stride: 1 128 | weight_filler { 129 | type: "gaussian" 130 | std: 0.01 131 | } 132 | bias_filler { 133 | type: "constant" 134 | } 135 | } 136 | } 137 | layer { 138 | name: "relu3" 139 | type: "ReLU" 140 | bottom: "conv3" 141 | top: "conv3" 142 | } 143 | layer { 144 | name: "pool3" 145 | type: "Pooling" 146 | bottom: "conv3" 147 | top: "pool3" 148 | pooling_param { 149 | pool: AVE 150 | kernel_size: 3 151 | stride: 2 152 | } 153 | } 154 | layer { 155 | name: "ip1" 156 | type: "InnerProduct" 157 | bottom: "pool3" 158 | top: "ip1" 159 | param { 160 | lr_mult: 1 161 | } 162 | param { 163 | lr_mult: 2 164 | } 165 | inner_product_param { 166 | num_output: 64 167 | weight_filler { 168 | type: "gaussian" 169 | std: 0.1 170 | } 171 | bias_filler { 172 | type: "constant" 173 | } 174 | } 175 | } 176 | layer { 177 | name: "ip2" 178 | type: "InnerProduct" 179 | bottom: "ip1" 180 | top: "ip2" 181 | param { 182 | lr_mult: 1 183 | } 184 | param { 185 | lr_mult: 2 186 | } 187 | inner_product_param { 188 | num_output: 2 189 | weight_filler { 190 | type: "gaussian" 191 | std: 0.1 192 | } 193 | bias_filler { 194 | type: "constant" 195 | } 196 | } 197 | } 198 | layer { 199 | name: "accuracy" 200 | type: "Accuracy" 201 | bottom: "ip2" 202 | bottom: "label" 203 | top: "accuracy" 204 | include { 205 | phase: TEST 206 | } 207 | } 208 | layer { 209 | name: "loss" 210 | type: "SoftmaxWithLoss" 211 | bottom: "ip2" 212 | bottom: "label" 213 | top: "loss" 214 | } 215 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/cifar_on_nuclei_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/nuclei/cifar_on_nuclei.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 40000 21 | # snapshot intermediate results 22 | snapshot: 10000 23 | snapshot_format: HDF5 24 | snapshot_prefix: "examples/cifar10/cifar_on_nuclei" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "NULCEI_quick" 2 | input: "data" 3 | input_shape { 4 | dim: 1 5 | dim: 3 6 | dim: 33 7 | dim: 33 8 | } 9 | layer { 10 | name: "conv1" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv1" 14 | convolution_param { 15 | num_output: 48 16 | pad: 0 17 | kernel_size: 6 18 | stride: 1 19 | } 20 | } 21 | layer { 22 | name: "pool1" 23 | type: "Pooling" 24 | bottom: "conv1" 25 | top: "pool1" 26 | pooling_param { 27 | pool: MAX 28 | kernel_size: 2 29 | stride: 2 30 | } 31 | } 32 | layer { 33 | name: "relu1" 34 | type: "ReLU" 35 | bottom: "pool1" 36 | top: "pool1" 37 | } 38 | layer { 39 | name: "conv2" 40 | type: "Convolution" 41 | bottom: "pool1" 42 | top: "conv2" 43 | convolution_param { 44 | num_output: 48 45 | pad: 0 46 | kernel_size: 4 47 | stride: 1 48 | } 49 | } 50 | layer { 51 | name: "relu2" 52 | type: "ReLU" 53 | bottom: "conv2" 54 | top: "conv2" 55 | } 56 | layer { 57 | name: "pool2" 58 | type: "Pooling" 59 | bottom: "conv2" 60 | top: "pool2" 61 | pooling_param { 62 | pool: MAX 63 | kernel_size: 2 64 | stride: 2 65 | } 66 | } 67 | layer { 68 | name: "ip1" 69 | type: "InnerProduct" 70 | bottom: "pool2" 71 | top: "ip1" 72 | inner_product_param { 73 | num_output: 48 74 | } 75 | } 76 | layer { 77 | name: "relu1" 78 | type: "ReLU" 79 | bottom: "ip1" 80 | top: "ip1" 81 | } 82 | layer { 83 | name: "ip2" 84 | type: "InnerProduct" 85 | bottom: "ip1" 86 | top: "ip2" 87 | inner_product_param { 88 | num_output: 2 89 | } 90 | } 91 | layer { 92 | name: "prob" 93 | type: "Softmax" 94 | bottom: "ip2" 95 | top: "prob" 96 | } 97 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/multi_class_nuclei_1_iter_10000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/multi_class_nuclei/multi_class_nuclei_1_iter_10000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/multi_class_nuclei_1_iter_10000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/multi_class_nuclei/multi_class_nuclei_1_iter_10000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/multi_class_nuclei_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/nuclei/multi_class_nuclei/multi_class_nuclei_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 70 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 1000 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "step" 17 | stepsize: 20000 18 | gamma: 0.1 19 | # Display every 100 iterations 20 | display: 100 21 | # The maximum number of iterations 22 | max_iter: 40000 23 | # snapshot intermediate results 24 | snapshot: 10000 25 | snapshot_prefix: "examples/nuclei/multi_class_nuclei/multi_class_nuclei_1" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/multi_class_nuclei_train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "NULCEI_quick" 2 | layer { 3 | name: "nuclei" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | image_data_param { 11 | source: "/home/sanuj/Projects/BTP/data/10_class_images/train.txt" 12 | batch_size: 100 13 | } 14 | } 15 | layer { 16 | name: "nuclei" 17 | type: "ImageData" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | image_data_param { 24 | source: "/home/sanuj/Projects/BTP/data/10_class_images/test.txt" 25 | batch_size: 500 26 | } 27 | } 28 | layer { 29 | name: "conv1" 30 | type: "Convolution" 31 | bottom: "data" 32 | top: "conv1" 33 | param { 34 | lr_mult: 0.1 35 | } 36 | param { 37 | lr_mult: 0.2 38 | } 39 | convolution_param { 40 | num_output: 48 41 | pad: 0 42 | kernel_size: 4 43 | stride: 1 44 | weight_filler { 45 | type: "gaussian" 46 | std: 0.0001 47 | } 48 | bias_filler { 49 | type: "constant" 50 | } 51 | } 52 | } 53 | layer { 54 | name: "pool1" 55 | type: "Pooling" 56 | bottom: "conv1" 57 | top: "pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 2 61 | stride: 2 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "pool1" 68 | top: "pool1" 69 | } 70 | layer { 71 | name: "conv2" 72 | type: "Convolution" 73 | bottom: "pool1" 74 | top: "conv2" 75 | param { 76 | lr_mult: 0.1 77 | } 78 | param { 79 | lr_mult: 0.2 80 | } 81 | convolution_param { 82 | num_output: 48 83 | pad: 0 84 | kernel_size: 6 85 | stride: 1 86 | weight_filler { 87 | type: "gaussian" 88 | std: 0.01 89 | } 90 | bias_filler { 91 | type: "constant" 92 | } 93 | } 94 | } 95 | layer { 96 | name: "relu2" 97 | type: "ReLU" 98 | bottom: "conv2" 99 | top: "conv2" 100 | } 101 | layer { 102 | name: "pool2" 103 | type: "Pooling" 104 | bottom: "conv2" 105 | top: "pool2" 106 | pooling_param { 107 | pool: MAX 108 | kernel_size: 2 109 | stride: 2 110 | } 111 | } 112 | layer { 113 | name: "ip_1" 114 | type: "InnerProduct" 115 | bottom: "pool2" 116 | top: "ip1" 117 | param { 118 | lr_mult: 1 119 | } 120 | param { 121 | lr_mult: 2 122 | } 123 | inner_product_param { 124 | num_output: 48 125 | weight_filler { 126 | type: "gaussian" 127 | std: 0.1 128 | } 129 | bias_filler { 130 | type: "constant" 131 | } 132 | } 133 | } 134 | layer { 135 | name: "relu1" 136 | type: "ReLU" 137 | bottom: "ip1" 138 | top: "ip1" 139 | } 140 | layer { 141 | name: "ip_2" 142 | type: "InnerProduct" 143 | bottom: "ip1" 144 | top: "ip2" 145 | param { 146 | lr_mult: 1 147 | } 148 | param { 149 | lr_mult: 2 150 | } 151 | inner_product_param { 152 | num_output: 10 153 | weight_filler { 154 | type: "gaussian" 155 | std: 0.1 156 | } 157 | bias_filler { 158 | type: "constant" 159 | } 160 | } 161 | } 162 | layer { 163 | name: "accuracy" 164 | type: "Accuracy" 165 | bottom: "ip2" 166 | bottom: "label" 167 | top: "accuracy" 168 | include { 169 | phase: TEST 170 | } 171 | } 172 | layer { 173 | name: "loss" 174 | type: "SoftmaxWithLoss" 175 | bottom: "ip2" 176 | bottom: "label" 177 | top: "loss" 178 | } 179 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/train_multi_class_nuclei.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/nuclei/multi_class_nuclei/multi_class_nuclei_solver.prototxt \ 7 | --snapshot=examples/nuclei/multi_class_nuclei/multi_class_nuclei_1_iter_10000.solverstate 8 | 2>&1 | tee examples/nuclei/multi_class_nuclei/train_multi_class_nuclei_2_output.txt 9 | #--weights=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel 10 | 11 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "NULCEI_quick" 2 | input: "data" 3 | input_shape { 4 | dim: 500 5 | dim: 3 6 | dim: 33 7 | dim: 33 8 | } 9 | layer { 10 | name: "conv1" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv1" 14 | convolution_param { 15 | num_output: 48 16 | pad: 0 17 | kernel_size: 4 18 | stride: 1 19 | } 20 | } 21 | layer { 22 | name: "pool1" 23 | type: "Pooling" 24 | bottom: "conv1" 25 | top: "pool1" 26 | pooling_param { 27 | pool: MAX 28 | kernel_size: 2 29 | stride: 2 30 | } 31 | } 32 | layer { 33 | name: "relu1" 34 | type: "ReLU" 35 | bottom: "pool1" 36 | top: "pool1" 37 | } 38 | layer { 39 | name: "conv2" 40 | type: "Convolution" 41 | bottom: "pool1" 42 | top: "conv2" 43 | convolution_param { 44 | num_output: 48 45 | pad: 0 46 | kernel_size: 6 47 | stride: 1 48 | } 49 | } 50 | layer { 51 | name: "relu2" 52 | type: "ReLU" 53 | bottom: "conv2" 54 | top: "conv2" 55 | } 56 | layer { 57 | name: "pool2" 58 | type: "Pooling" 59 | bottom: "conv2" 60 | top: "pool2" 61 | pooling_param { 62 | pool: MAX 63 | kernel_size: 2 64 | stride: 2 65 | } 66 | } 67 | layer { 68 | name: "ip1" 69 | type: "InnerProduct" 70 | bottom: "pool2" 71 | top: "ip1" 72 | inner_product_param { 73 | num_output: 48 74 | } 75 | } 76 | layer { 77 | name: "relu1" 78 | type: "ReLU" 79 | bottom: "ip1" 80 | top: "ip1" 81 | } 82 | layer { 83 | name: "ip2" 84 | type: "InnerProduct" 85 | bottom: "ip1" 86 | top: "ip2" 87 | inner_product_param { 88 | num_output: 2 89 | } 90 | } 91 | layer { 92 | name: "prob" 93 | type: "Softmax" 94 | bottom: "ip2" 95 | top: "prob" 96 | } 97 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/train_use_multi_class.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_solver.prototxt \ 7 | --snapshot=examples/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_nuclei_big_1_iter_51647.solverstate \ 8 | 2>&1 | tee examples/nuclei/multi_class_nuclei/use_multi_class/train_use_multi_class_2_output.txt 9 | #--snapshot=examples/nuclei/multi_class_nuclei/multi_class_nuclei_1_iter_10000.solverstate 10 | #--weights=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel 11 | 12 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_nuclei_big_1_iter_51647.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_nuclei_big_1_iter_51647.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_nuclei_big_1_iter_51647.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_nuclei_big_1_iter_51647.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_test_train.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 700 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 6665 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "step" 17 | stepsize: 166625 18 | gamma: 0.1 19 | # Display every 100 iterations 20 | display: 100 21 | # The maximum number of iterations 22 | max_iter: 333250 23 | # snapshot intermediate results 24 | snapshot: 66650 25 | snapshot_prefix: "examples/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_nuclei_big_1" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_test_train.prototxt: -------------------------------------------------------------------------------- 1 | name: "NULCEI_quick" 2 | layer { 3 | name: "nuclei" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | image_data_param { 11 | source: "/home/sanuj/temp_63_LLM_YR4_33/train_big.txt" 12 | batch_size: 100 13 | } 14 | } 15 | layer { 16 | name: "nuclei" 17 | type: "ImageData" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | image_data_param { 24 | source: "/home/sanuj/temp_63_LLM_YR4_33/test_big.txt" 25 | batch_size: 500 26 | } 27 | } 28 | layer { 29 | name: "conv1" 30 | type: "Convolution" 31 | bottom: "data" 32 | top: "conv1" 33 | param { 34 | lr_mult: 0.1 35 | } 36 | param { 37 | lr_mult: 0.2 38 | } 39 | convolution_param { 40 | num_output: 48 41 | pad: 0 42 | kernel_size: 4 43 | stride: 1 44 | weight_filler { 45 | type: "gaussian" 46 | std: 0.0001 47 | } 48 | bias_filler { 49 | type: "constant" 50 | } 51 | } 52 | } 53 | layer { 54 | name: "pool1" 55 | type: "Pooling" 56 | bottom: "conv1" 57 | top: "pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 2 61 | stride: 2 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "pool1" 68 | top: "pool1" 69 | } 70 | layer { 71 | name: "conv2" 72 | type: "Convolution" 73 | bottom: "pool1" 74 | top: "conv2" 75 | param { 76 | lr_mult: 0.1 77 | } 78 | param { 79 | lr_mult: 0.2 80 | } 81 | convolution_param { 82 | num_output: 48 83 | pad: 0 84 | kernel_size: 6 85 | stride: 1 86 | weight_filler { 87 | type: "gaussian" 88 | std: 0.01 89 | } 90 | bias_filler { 91 | type: "constant" 92 | } 93 | } 94 | } 95 | layer { 96 | name: "relu2" 97 | type: "ReLU" 98 | bottom: "conv2" 99 | top: "conv2" 100 | } 101 | layer { 102 | name: "pool2" 103 | type: "Pooling" 104 | bottom: "conv2" 105 | top: "pool2" 106 | pooling_param { 107 | pool: MAX 108 | kernel_size: 2 109 | stride: 2 110 | } 111 | } 112 | layer { 113 | name: "ip1" 114 | type: "InnerProduct" 115 | bottom: "pool2" 116 | top: "ip1" 117 | param { 118 | lr_mult: 1 119 | } 120 | param { 121 | lr_mult: 2 122 | } 123 | inner_product_param { 124 | num_output: 48 125 | weight_filler { 126 | type: "gaussian" 127 | std: 0.1 128 | } 129 | bias_filler { 130 | type: "constant" 131 | } 132 | } 133 | } 134 | layer { 135 | name: "relu1" 136 | type: "ReLU" 137 | bottom: "ip1" 138 | top: "ip1" 139 | } 140 | layer { 141 | name: "ip2" 142 | type: "InnerProduct" 143 | bottom: "ip1" 144 | top: "ip2" 145 | param { 146 | lr_mult: 1 147 | } 148 | param { 149 | lr_mult: 2 150 | } 151 | inner_product_param { 152 | num_output: 2 153 | weight_filler { 154 | type: "gaussian" 155 | std: 0.1 156 | } 157 | bias_filler { 158 | type: "constant" 159 | } 160 | } 161 | } 162 | layer { 163 | name: "accuracy" 164 | type: "Accuracy" 165 | bottom: "ip2" 166 | bottom: "label" 167 | top: "accuracy" 168 | include { 169 | phase: TEST 170 | } 171 | } 172 | layer { 173 | name: "loss" 174 | type: "SoftmaxWithLoss" 175 | bottom: "ip2" 176 | bottom: "label" 177 | top: "loss" 178 | } 179 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/nuclei_quick_relative_iter_60000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/nuclei_quick_relative_iter_60000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/nuclei_quick_relative_iter_60000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/nuclei_quick_relative_iter_60000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/nuclei_quick_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/nuclei/nuclei_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 200 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 1000 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "step" 17 | stepsize: 60000 18 | gamma: 0.1 19 | # Display every 100 iterations 20 | display: 100 21 | # The maximum number of iterations 22 | max_iter: 120000 23 | # snapshot intermediate results 24 | snapshot: 10000 25 | snapshot_prefix: "examples/nuclei/nuclei_quick_relative" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/nuclei_quick_train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "NUCLEI_quick" 2 | layer { 3 | name: "nuclei" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | image_data_param { 11 | source: "/home/sanuj/temp_63_LLM_YR4_33/train.txt" 12 | batch_size: 200 13 | } 14 | } 15 | layer { 16 | name: "nuclei" 17 | type: "ImageData" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | image_data_param { 24 | source: "/home/sanuj/temp_63_LLM_YR4_33/test.txt" 25 | batch_size: 500 26 | } 27 | } 28 | layer { 29 | name: "conv1" 30 | type: "Convolution" 31 | bottom: "data" 32 | top: "conv1" 33 | param { 34 | lr_mult: 0.1 35 | } 36 | param { 37 | lr_mult: 0.2 38 | } 39 | convolution_param { 40 | num_output: 48 41 | pad: 0 42 | kernel_size: 6 43 | stride: 1 44 | weight_filler { 45 | type: "gaussian" 46 | std: 0.0001 47 | } 48 | bias_filler { 49 | type: "constant" 50 | } 51 | } 52 | } 53 | layer { 54 | name: "pool1" 55 | type: "Pooling" 56 | bottom: "conv1" 57 | top: "pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 2 61 | stride: 2 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "pool1" 68 | top: "pool1" 69 | } 70 | layer { 71 | name: "conv2" 72 | type: "Convolution" 73 | bottom: "pool1" 74 | top: "conv2" 75 | param { 76 | lr_mult: 0.1 77 | } 78 | param { 79 | lr_mult: 0.2 80 | } 81 | convolution_param { 82 | num_output: 48 83 | pad: 0 84 | kernel_size: 4 85 | stride: 1 86 | weight_filler { 87 | type: "gaussian" 88 | std: 0.01 89 | } 90 | bias_filler { 91 | type: "constant" 92 | } 93 | } 94 | } 95 | layer { 96 | name: "relu2" 97 | type: "ReLU" 98 | bottom: "conv2" 99 | top: "conv2" 100 | } 101 | layer { 102 | name: "pool2" 103 | type: "Pooling" 104 | bottom: "conv2" 105 | top: "pool2" 106 | pooling_param { 107 | pool: MAX 108 | kernel_size: 2 109 | stride: 2 110 | } 111 | } 112 | layer { 113 | name: "ip1" 114 | type: "InnerProduct" 115 | bottom: "pool2" 116 | top: "ip1" 117 | param { 118 | lr_mult: 1 119 | } 120 | param { 121 | lr_mult: 2 122 | } 123 | inner_product_param { 124 | num_output: 48 125 | weight_filler { 126 | type: "gaussian" 127 | std: 0.1 128 | } 129 | bias_filler { 130 | type: "constant" 131 | } 132 | } 133 | } 134 | layer { 135 | name: "relu1" 136 | type: "ReLU" 137 | bottom: "ip1" 138 | top: "ip1" 139 | } 140 | layer { 141 | name: "ip2" 142 | type: "InnerProduct" 143 | bottom: "ip1" 144 | top: "ip2" 145 | param { 146 | lr_mult: 1 147 | } 148 | param { 149 | lr_mult: 2 150 | } 151 | inner_product_param { 152 | num_output: 2 153 | weight_filler { 154 | type: "gaussian" 155 | std: 0.1 156 | } 157 | bias_filler { 158 | type: "constant" 159 | } 160 | } 161 | } 162 | layer { 163 | name: "accuracy" 164 | type: "Accuracy" 165 | bottom: "ip2" 166 | bottom: "label" 167 | top: "accuracy" 168 | include { 169 | phase: TEST 170 | } 171 | } 172 | layer { 173 | name: "loss" 174 | type: "SoftmaxWithLoss" 175 | bottom: "ip2" 176 | bottom: "label" 177 | top: "loss" 178 | } 179 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/output_1.txt: -------------------------------------------------------------------------------- 1 | I0116 21:12:13.839344 12881 caffe.cpp:184] Using GPUs 0 2 | I0116 21:12:14.020617 12881 solver.cpp:54] Initializing solver from parameters: 3 | test_iter: 600 4 | test_interval: 30000 5 | base_lr: 0.001 6 | display: 3000 7 | max_iter: 150000 8 | lr_policy: "fixed" 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | snapshot: 30000 12 | snapshot_prefix: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap" 13 | solver_mode: GPU 14 | device_id: 0 15 | net: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_train_test.prototxt" 16 | I0116 21:12:14.020787 12881 solver.cpp:97] Creating training net from net file: /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_train_test.prototxt 17 | I0116 21:12:14.021124 12881 net.cpp:339] The NetState phase (0) differed from the phase (1) specified by a rule in layer nuclei 18 | I0116 21:12:14.021152 12881 net.cpp:339] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy 19 | I0116 21:12:14.021246 12881 net.cpp:50] Initializing net from parameters: 20 | name: "NUCLEI_three_class" 21 | state { 22 | phase: TRAIN 23 | } 24 | layer { 25 | name: "nuclei" 26 | type: "ImageData" 27 | top: "data" 28 | top: "label" 29 | include { 30 | phase: TRAIN 31 | } 32 | image_data_param { 33 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class/train.txt" 34 | batch_size: 100 35 | } 36 | } 37 | layer { 38 | name: "conv1" 39 | type: "Convolution" 40 | bottom: "data" 41 | top: "conv1" 42 | param { 43 | lr_mult: 0.1 44 | } 45 | param { 46 | lr_mult: 0.2 47 | } 48 | convolution_param { 49 | num_output: 48 50 | pad: 0 51 | kernel_size: 4 52 | stride: 1 53 | weight_filler { 54 | type: "gaussian" 55 | std: 0.0001 56 | } 57 | bias_filler { 58 | type: "constant" 59 | } 60 | } 61 | } 62 | layer { 63 | name: "pool1" 64 | type: "Pooling" 65 | bottom: "conv1" 66 | top: "pool1" 67 | pooling_param { 68 | pool: MAX 69 | kernel_size: 2 70 | stride: 2 71 | } 72 | } 73 | layer { 74 | name: "relu1" 75 | type: "ReLU" 76 | bottom: "pool1" 77 | top: "pool1" 78 | } 79 | layer { 80 | name: "conv2" 81 | type: "Convolution" 82 | bottom: "pool1" 83 | top: "conv2" 84 | param { 85 | lr_mult: 0.1 86 | } 87 | param { 88 | lr_mult: 0.2 89 | } 90 | convolution_param { 91 | num_output: 48 92 | pad: 0 93 | kernel_size: 6 94 | stride: 1 95 | weight_filler { 96 | type: "gaussian" 97 | std: 0.01 98 | } 99 | bias_filler { 100 | type: "constant" 101 | } 102 | } 103 | } 104 | layer { 105 | name: "relu2" 106 | type: "ReLU" 107 | bottom: "conv2" 108 | top: "conv2" 109 | } 110 | layer { 111 | name: "pool2" 112 | type: "Pooling" 113 | bottom: "conv2" 114 | top: "pool2" 115 | pooling_param { 116 | pool: MAX 117 | kernel_size: 2 118 | stride: 2 119 | } 120 | } 121 | layer { 122 | name: "ip_1" 123 | type: "InnerProduct" 124 | bottom: "pool2" 125 | top: "ip1" 126 | param { 127 | lr_mult: 1 128 | } 129 | param { 130 | lr_mult: 2 131 | } 132 | inner_product_param { 133 | num_output: 30 134 | weight_filler { 135 | type: "gaussian" 136 | std: 0.1 137 | } 138 | bias_filler { 139 | type: "constant" 140 | } 141 | } 142 | } 143 | layer { 144 | name: "relu1" 145 | type: "ReLU" 146 | bottom: "ip1" 147 | top: "ip1" 148 | } 149 | layer { 150 | name: "ip_2" 151 | type: "InnerProduct" 152 | bottom: "ip1" 153 | top: "ip2" 154 | param { 155 | lr_mult: 1 156 | } 157 | param { 158 | lr_mult: 2 159 | } 160 | inner_product_param { 161 | num_output: 3 162 | weight_filler { 163 | type: "gaussian" 164 | std: 0.1 165 | } 166 | bias_filler { 167 | type: "constant" 168 | } 169 | } 170 | } 171 | layer { 172 | name: "loss" 173 | type: "SoftmaxWithLoss" 174 | bottom: "ip2" 175 | bottom: "label" 176 | top: "loss" 177 | } 178 | I0116 21:12:14.021323 12881 layer_factory.hpp:76] Creating layer nuclei 179 | I0116 21:12:14.021360 12881 net.cpp:110] Creating Layer nuclei 180 | I0116 21:12:14.021373 12881 net.cpp:433] nuclei -> data 181 | I0116 21:12:14.021399 12881 net.cpp:433] nuclei -> label 182 | I0116 21:12:14.021417 12881 image_data_layer.cpp:37] Opening file /home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class/train.txt 183 | I0116 21:12:14.167819 12881 image_data_layer.cpp:52] A total of 300000 images. 184 | I0116 21:12:14.174240 12881 image_data_layer.cpp:79] output data size: 100,3,51,51 185 | I0116 21:12:14.187820 12881 net.cpp:155] Setting up nuclei 186 | I0116 21:12:14.187861 12881 net.cpp:163] Top shape: 100 3 51 51 (780300) 187 | I0116 21:12:14.187871 12881 net.cpp:163] Top shape: 100 (100) 188 | I0116 21:12:14.187881 12881 layer_factory.hpp:76] Creating layer conv1 189 | I0116 21:12:14.187912 12881 net.cpp:110] Creating Layer conv1 190 | I0116 21:12:14.187922 12881 net.cpp:477] conv1 <- data 191 | I0116 21:12:14.187960 12881 net.cpp:433] conv1 -> conv1 192 | I0116 21:12:14.188781 12881 net.cpp:155] Setting up conv1 193 | I0116 21:12:14.188799 12881 net.cpp:163] Top shape: 100 48 48 48 (11059200) 194 | I0116 21:12:14.188822 12881 layer_factory.hpp:76] Creating layer pool1 195 | I0116 21:12:14.188838 12881 net.cpp:110] Creating Layer pool1 196 | I0116 21:12:14.188845 12881 net.cpp:477] pool1 <- conv1 197 | I0116 21:12:14.188856 12881 net.cpp:433] pool1 -> pool1 198 | I0116 21:12:14.188912 12881 net.cpp:155] Setting up pool1 199 | I0116 21:12:14.188923 12881 net.cpp:163] Top shape: 100 48 24 24 (2764800) 200 | I0116 21:12:14.188930 12881 layer_factory.hpp:76] Creating layer relu1 201 | I0116 21:12:14.188941 12881 net.cpp:110] Creating Layer relu1 202 | I0116 21:12:14.188947 12881 net.cpp:477] relu1 <- pool1 203 | I0116 21:12:14.188956 12881 net.cpp:419] relu1 -> pool1 (in-place) 204 | I0116 21:12:14.188964 12881 net.cpp:155] Setting up relu1 205 | I0116 21:12:14.188973 12881 net.cpp:163] Top shape: 100 48 24 24 (2764800) 206 | I0116 21:12:14.188979 12881 layer_factory.hpp:76] Creating layer conv2 207 | I0116 21:12:14.188997 12881 net.cpp:110] Creating Layer conv2 208 | I0116 21:12:14.189003 12881 net.cpp:477] conv2 <- pool1 209 | I0116 21:12:14.189013 12881 net.cpp:433] conv2 -> conv2 210 | I0116 21:12:14.193909 12881 net.cpp:155] Setting up conv2 211 | I0116 21:12:14.193930 12881 net.cpp:163] Top shape: 100 48 19 19 (1732800) 212 | I0116 21:12:14.193989 12881 layer_factory.hpp:76] Creating layer relu2 213 | I0116 21:12:14.194026 12881 net.cpp:110] Creating Layer relu2 214 | I0116 21:12:14.194048 12881 net.cpp:477] relu2 <- conv2 215 | I0116 21:12:14.194070 12881 net.cpp:419] relu2 -> conv2 (in-place) 216 | I0116 21:12:14.194097 12881 net.cpp:155] Setting up relu2 217 | I0116 21:12:14.194119 12881 net.cpp:163] Top shape: 100 48 19 19 (1732800) 218 | I0116 21:12:14.194140 12881 layer_factory.hpp:76] Creating layer pool2 219 | I0116 21:12:14.194172 12881 net.cpp:110] Creating Layer pool2 220 | I0116 21:12:14.194193 12881 net.cpp:477] pool2 <- conv2 221 | I0116 21:12:14.194214 12881 net.cpp:433] pool2 -> pool2 222 | I0116 21:12:14.194288 12881 net.cpp:155] Setting up pool2 223 | I0116 21:12:14.194315 12881 net.cpp:163] Top shape: 100 48 10 10 (480000) 224 | I0116 21:12:14.194337 12881 layer_factory.hpp:76] Creating layer ip_1 225 | I0116 21:12:14.194365 12881 net.cpp:110] Creating Layer ip_1 226 | I0116 21:12:14.194387 12881 net.cpp:477] ip_1 <- pool2 227 | I0116 21:12:14.194413 12881 net.cpp:433] ip_1 -> ip1 228 | I0116 21:12:14.199700 12881 net.cpp:155] Setting up ip_1 229 | I0116 21:12:14.199728 12881 net.cpp:163] Top shape: 100 30 (3000) 230 | I0116 21:12:14.199750 12881 layer_factory.hpp:76] Creating layer relu1 231 | I0116 21:12:14.199774 12881 net.cpp:110] Creating Layer relu1 232 | I0116 21:12:14.199782 12881 net.cpp:477] relu1 <- ip1 233 | I0116 21:12:14.199792 12881 net.cpp:419] relu1 -> ip1 (in-place) 234 | I0116 21:12:14.199805 12881 net.cpp:155] Setting up relu1 235 | I0116 21:12:14.199816 12881 net.cpp:163] Top shape: 100 30 (3000) 236 | I0116 21:12:14.199823 12881 layer_factory.hpp:76] Creating layer ip_2 237 | I0116 21:12:14.199836 12881 net.cpp:110] Creating Layer ip_2 238 | I0116 21:12:14.199841 12881 net.cpp:477] ip_2 <- ip1 239 | I0116 21:12:14.199852 12881 net.cpp:433] ip_2 -> ip2 240 | I0116 21:12:14.200043 12881 net.cpp:155] Setting up ip_2 241 | I0116 21:12:14.200055 12881 net.cpp:163] Top shape: 100 3 (300) 242 | I0116 21:12:14.200067 12881 layer_factory.hpp:76] Creating layer loss 243 | I0116 21:12:14.200080 12881 net.cpp:110] Creating Layer loss 244 | I0116 21:12:14.200089 12881 net.cpp:477] loss <- ip2 245 | I0116 21:12:14.200096 12881 net.cpp:477] loss <- label 246 | I0116 21:12:14.200109 12881 net.cpp:433] loss -> loss 247 | I0116 21:12:14.200124 12881 layer_factory.hpp:76] Creating layer loss 248 | I0116 21:12:14.200256 12881 net.cpp:155] Setting up loss 249 | I0116 21:12:14.200268 12881 net.cpp:163] Top shape: (1) 250 | I0116 21:12:14.200273 12881 net.cpp:168] with loss weight 1 251 | I0116 21:12:14.200296 12881 net.cpp:236] loss needs backward computation. 252 | I0116 21:12:14.200304 12881 net.cpp:236] ip_2 needs backward computation. 253 | I0116 21:12:14.200310 12881 net.cpp:236] relu1 needs backward computation. 254 | I0116 21:12:14.200315 12881 net.cpp:236] ip_1 needs backward computation. 255 | I0116 21:12:14.200321 12881 net.cpp:236] pool2 needs backward computation. 256 | I0116 21:12:14.200350 12881 net.cpp:236] relu2 needs backward computation. 257 | I0116 21:12:14.200357 12881 net.cpp:236] conv2 needs backward computation. 258 | I0116 21:12:14.200363 12881 net.cpp:236] relu1 needs backward computation. 259 | I0116 21:12:14.200369 12881 net.cpp:236] pool1 needs backward computation. 260 | I0116 21:12:14.200376 12881 net.cpp:236] conv1 needs backward computation. 261 | I0116 21:12:14.200383 12881 net.cpp:240] nuclei does not need backward computation. 262 | I0116 21:12:14.200388 12881 net.cpp:283] This network produces output loss 263 | I0116 21:12:14.200403 12881 net.cpp:297] Network initialization done. 264 | I0116 21:12:14.200410 12881 net.cpp:298] Memory required for data: 85284404 265 | I0116 21:12:14.200853 12881 solver.cpp:187] Creating test net (#0) specified by net file: /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_train_test.prototxt 266 | I0116 21:12:14.200896 12881 net.cpp:339] The NetState phase (1) differed from the phase (0) specified by a rule in layer nuclei 267 | I0116 21:12:14.201042 12881 net.cpp:50] Initializing net from parameters: 268 | name: "NUCLEI_three_class" 269 | state { 270 | phase: TEST 271 | } 272 | layer { 273 | name: "nuclei" 274 | type: "ImageData" 275 | top: "data" 276 | top: "label" 277 | include { 278 | phase: TEST 279 | } 280 | image_data_param { 281 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class/test.txt" 282 | batch_size: 200 283 | } 284 | } 285 | layer { 286 | name: "conv1" 287 | type: "Convolution" 288 | bottom: "data" 289 | top: "conv1" 290 | param { 291 | lr_mult: 0.1 292 | } 293 | param { 294 | lr_mult: 0.2 295 | } 296 | convolution_param { 297 | num_output: 48 298 | pad: 0 299 | kernel_size: 4 300 | stride: 1 301 | weight_filler { 302 | type: "gaussian" 303 | std: 0.0001 304 | } 305 | bias_filler { 306 | type: "constant" 307 | } 308 | } 309 | } 310 | layer { 311 | name: "pool1" 312 | type: "Pooling" 313 | bottom: "conv1" 314 | top: "pool1" 315 | pooling_param { 316 | pool: MAX 317 | kernel_size: 2 318 | stride: 2 319 | } 320 | } 321 | layer { 322 | name: "relu1" 323 | type: "ReLU" 324 | bottom: "pool1" 325 | top: "pool1" 326 | } 327 | layer { 328 | name: "conv2" 329 | type: "Convolution" 330 | bottom: "pool1" 331 | top: "conv2" 332 | param { 333 | lr_mult: 0.1 334 | } 335 | param { 336 | lr_mult: 0.2 337 | } 338 | convolution_param { 339 | num_output: 48 340 | pad: 0 341 | kernel_size: 6 342 | stride: 1 343 | weight_filler { 344 | type: "gaussian" 345 | std: 0.01 346 | } 347 | bias_filler { 348 | type: "constant" 349 | } 350 | } 351 | } 352 | layer { 353 | name: "relu2" 354 | type: "ReLU" 355 | bottom: "conv2" 356 | top: "conv2" 357 | } 358 | layer { 359 | name: "pool2" 360 | type: "Pooling" 361 | bottom: "conv2" 362 | top: "pool2" 363 | pooling_param { 364 | pool: MAX 365 | kernel_size: 2 366 | stride: 2 367 | } 368 | } 369 | layer { 370 | name: "ip_1" 371 | type: "InnerProduct" 372 | bottom: "pool2" 373 | top: "ip1" 374 | param { 375 | lr_mult: 1 376 | } 377 | param { 378 | lr_mult: 2 379 | } 380 | inner_product_param { 381 | num_output: 30 382 | weight_filler { 383 | type: "gaussian" 384 | std: 0.1 385 | } 386 | bias_filler { 387 | type: "constant" 388 | } 389 | } 390 | } 391 | layer { 392 | name: "relu1" 393 | type: "ReLU" 394 | bottom: "ip1" 395 | top: "ip1" 396 | } 397 | layer { 398 | name: "ip_2" 399 | type: "InnerProduct" 400 | bottom: "ip1" 401 | top: "ip2" 402 | param { 403 | lr_mult: 1 404 | } 405 | param { 406 | lr_mult: 2 407 | } 408 | inner_product_param { 409 | num_output: 3 410 | weight_filler { 411 | type: "gaussian" 412 | std: 0.1 413 | } 414 | bias_filler { 415 | type: "constant" 416 | } 417 | } 418 | } 419 | layer { 420 | name: "accuracy" 421 | type: "Accuracy" 422 | bottom: "ip2" 423 | bottom: "label" 424 | top: "accuracy" 425 | include { 426 | phase: TEST 427 | } 428 | } 429 | layer { 430 | name: "loss" 431 | type: "SoftmaxWithLoss" 432 | bottom: "ip2" 433 | bottom: "label" 434 | top: "loss" 435 | } 436 | I0116 21:12:14.201143 12881 layer_factory.hpp:76] Creating layer nuclei 437 | I0116 21:12:14.201164 12881 net.cpp:110] Creating Layer nuclei 438 | I0116 21:12:14.201171 12881 net.cpp:433] nuclei -> data 439 | I0116 21:12:14.201184 12881 net.cpp:433] nuclei -> label 440 | I0116 21:12:14.201197 12881 image_data_layer.cpp:37] Opening file /home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class/test.txt 441 | I0116 21:12:14.258472 12881 image_data_layer.cpp:52] A total of 120000 images. 442 | I0116 21:12:14.258759 12881 image_data_layer.cpp:79] output data size: 200,3,51,51 443 | I0116 21:12:14.283301 12881 net.cpp:155] Setting up nuclei 444 | I0116 21:12:14.283335 12881 net.cpp:163] Top shape: 200 3 51 51 (1560600) 445 | I0116 21:12:14.283370 12881 net.cpp:163] Top shape: 200 (200) 446 | I0116 21:12:14.283409 12881 layer_factory.hpp:76] Creating layer label_nuclei_1_split 447 | I0116 21:12:14.283432 12881 net.cpp:110] Creating Layer label_nuclei_1_split 448 | I0116 21:12:14.283440 12881 net.cpp:477] label_nuclei_1_split <- label 449 | I0116 21:12:14.283452 12881 net.cpp:433] label_nuclei_1_split -> label_nuclei_1_split_0 450 | I0116 21:12:14.283464 12881 net.cpp:433] label_nuclei_1_split -> label_nuclei_1_split_1 451 | I0116 21:12:14.283519 12881 net.cpp:155] Setting up label_nuclei_1_split 452 | I0116 21:12:14.283527 12881 net.cpp:163] Top shape: 200 (200) 453 | I0116 21:12:14.283535 12881 net.cpp:163] Top shape: 200 (200) 454 | I0116 21:12:14.283541 12881 layer_factory.hpp:76] Creating layer conv1 455 | I0116 21:12:14.283556 12881 net.cpp:110] Creating Layer conv1 456 | I0116 21:12:14.283566 12881 net.cpp:477] conv1 <- data 457 | I0116 21:12:14.283578 12881 net.cpp:433] conv1 -> conv1 458 | I0116 21:12:14.283821 12881 net.cpp:155] Setting up conv1 459 | I0116 21:12:14.283833 12881 net.cpp:163] Top shape: 200 48 48 48 (22118400) 460 | I0116 21:12:14.283850 12881 layer_factory.hpp:76] Creating layer pool1 461 | I0116 21:12:14.283861 12881 net.cpp:110] Creating Layer pool1 462 | I0116 21:12:14.283869 12881 net.cpp:477] pool1 <- conv1 463 | I0116 21:12:14.283879 12881 net.cpp:433] pool1 -> pool1 464 | I0116 21:12:14.283915 12881 net.cpp:155] Setting up pool1 465 | I0116 21:12:14.283922 12881 net.cpp:163] Top shape: 200 48 24 24 (5529600) 466 | I0116 21:12:14.283929 12881 layer_factory.hpp:76] Creating layer relu1 467 | I0116 21:12:14.283941 12881 net.cpp:110] Creating Layer relu1 468 | I0116 21:12:14.283947 12881 net.cpp:477] relu1 <- pool1 469 | I0116 21:12:14.283956 12881 net.cpp:419] relu1 -> pool1 (in-place) 470 | I0116 21:12:14.283964 12881 net.cpp:155] Setting up relu1 471 | I0116 21:12:14.283973 12881 net.cpp:163] Top shape: 200 48 24 24 (5529600) 472 | I0116 21:12:14.283979 12881 layer_factory.hpp:76] Creating layer conv2 473 | I0116 21:12:14.283992 12881 net.cpp:110] Creating Layer conv2 474 | I0116 21:12:14.283999 12881 net.cpp:477] conv2 <- pool1 475 | I0116 21:12:14.284010 12881 net.cpp:433] conv2 -> conv2 476 | I0116 21:12:14.292260 12881 net.cpp:155] Setting up conv2 477 | I0116 21:12:14.292289 12881 net.cpp:163] Top shape: 200 48 19 19 (3465600) 478 | I0116 21:12:14.292309 12881 layer_factory.hpp:76] Creating layer relu2 479 | I0116 21:12:14.292321 12881 net.cpp:110] Creating Layer relu2 480 | I0116 21:12:14.292327 12881 net.cpp:477] relu2 <- conv2 481 | I0116 21:12:14.292336 12881 net.cpp:419] relu2 -> conv2 (in-place) 482 | I0116 21:12:14.292347 12881 net.cpp:155] Setting up relu2 483 | I0116 21:12:14.292354 12881 net.cpp:163] Top shape: 200 48 19 19 (3465600) 484 | I0116 21:12:14.292361 12881 layer_factory.hpp:76] Creating layer pool2 485 | I0116 21:12:14.292371 12881 net.cpp:110] Creating Layer pool2 486 | I0116 21:12:14.292377 12881 net.cpp:477] pool2 <- conv2 487 | I0116 21:12:14.292385 12881 net.cpp:433] pool2 -> pool2 488 | I0116 21:12:14.292428 12881 net.cpp:155] Setting up pool2 489 | I0116 21:12:14.292440 12881 net.cpp:163] Top shape: 200 48 10 10 (960000) 490 | I0116 21:12:14.292445 12881 layer_factory.hpp:76] Creating layer ip_1 491 | I0116 21:12:14.292472 12881 net.cpp:110] Creating Layer ip_1 492 | I0116 21:12:14.292479 12881 net.cpp:477] ip_1 <- pool2 493 | I0116 21:12:14.292490 12881 net.cpp:433] ip_1 -> ip1 494 | I0116 21:12:14.296073 12881 net.cpp:155] Setting up ip_1 495 | I0116 21:12:14.296094 12881 net.cpp:163] Top shape: 200 30 (6000) 496 | I0116 21:12:14.296109 12881 layer_factory.hpp:76] Creating layer relu1 497 | I0116 21:12:14.296121 12881 net.cpp:110] Creating Layer relu1 498 | I0116 21:12:14.296131 12881 net.cpp:477] relu1 <- ip1 499 | I0116 21:12:14.296139 12881 net.cpp:419] relu1 -> ip1 (in-place) 500 | I0116 21:12:14.296149 12881 net.cpp:155] Setting up relu1 501 | I0116 21:12:14.296157 12881 net.cpp:163] Top shape: 200 30 (6000) 502 | I0116 21:12:14.296164 12881 layer_factory.hpp:76] Creating layer ip_2 503 | I0116 21:12:14.296172 12881 net.cpp:110] Creating Layer ip_2 504 | I0116 21:12:14.296178 12881 net.cpp:477] ip_2 <- ip1 505 | I0116 21:12:14.296191 12881 net.cpp:433] ip_2 -> ip2 506 | I0116 21:12:14.296290 12881 net.cpp:155] Setting up ip_2 507 | I0116 21:12:14.296299 12881 net.cpp:163] Top shape: 200 3 (600) 508 | I0116 21:12:14.296308 12881 layer_factory.hpp:76] Creating layer ip2_ip_2_0_split 509 | I0116 21:12:14.296341 12881 net.cpp:110] Creating Layer ip2_ip_2_0_split 510 | I0116 21:12:14.296352 12881 net.cpp:477] ip2_ip_2_0_split <- ip2 511 | I0116 21:12:14.296361 12881 net.cpp:433] ip2_ip_2_0_split -> ip2_ip_2_0_split_0 512 | I0116 21:12:14.296371 12881 net.cpp:433] ip2_ip_2_0_split -> ip2_ip_2_0_split_1 513 | I0116 21:12:14.296411 12881 net.cpp:155] Setting up ip2_ip_2_0_split 514 | I0116 21:12:14.296418 12881 net.cpp:163] Top shape: 200 3 (600) 515 | I0116 21:12:14.296425 12881 net.cpp:163] Top shape: 200 3 (600) 516 | I0116 21:12:14.296432 12881 layer_factory.hpp:76] Creating layer accuracy 517 | I0116 21:12:14.296440 12881 net.cpp:110] Creating Layer accuracy 518 | I0116 21:12:14.296448 12881 net.cpp:477] accuracy <- ip2_ip_2_0_split_0 519 | I0116 21:12:14.296455 12881 net.cpp:477] accuracy <- label_nuclei_1_split_0 520 | I0116 21:12:14.296464 12881 net.cpp:433] accuracy -> accuracy 521 | I0116 21:12:14.296478 12881 net.cpp:155] Setting up accuracy 522 | I0116 21:12:14.296488 12881 net.cpp:163] Top shape: (1) 523 | I0116 21:12:14.296494 12881 layer_factory.hpp:76] Creating layer loss 524 | I0116 21:12:14.296505 12881 net.cpp:110] Creating Layer loss 525 | I0116 21:12:14.296514 12881 net.cpp:477] loss <- ip2_ip_2_0_split_1 526 | I0116 21:12:14.296521 12881 net.cpp:477] loss <- label_nuclei_1_split_1 527 | I0116 21:12:14.296530 12881 net.cpp:433] loss -> loss 528 | I0116 21:12:14.296541 12881 layer_factory.hpp:76] Creating layer loss 529 | I0116 21:12:14.296620 12881 net.cpp:155] Setting up loss 530 | I0116 21:12:14.296640 12881 net.cpp:163] Top shape: (1) 531 | I0116 21:12:14.296646 12881 net.cpp:168] with loss weight 1 532 | I0116 21:12:14.296663 12881 net.cpp:236] loss needs backward computation. 533 | I0116 21:12:14.296670 12881 net.cpp:240] accuracy does not need backward computation. 534 | I0116 21:12:14.296677 12881 net.cpp:236] ip2_ip_2_0_split needs backward computation. 535 | I0116 21:12:14.296684 12881 net.cpp:236] ip_2 needs backward computation. 536 | I0116 21:12:14.296689 12881 net.cpp:236] relu1 needs backward computation. 537 | I0116 21:12:14.296694 12881 net.cpp:236] ip_1 needs backward computation. 538 | I0116 21:12:14.296700 12881 net.cpp:236] pool2 needs backward computation. 539 | I0116 21:12:14.296706 12881 net.cpp:236] relu2 needs backward computation. 540 | I0116 21:12:14.296712 12881 net.cpp:236] conv2 needs backward computation. 541 | I0116 21:12:14.296718 12881 net.cpp:236] relu1 needs backward computation. 542 | I0116 21:12:14.296725 12881 net.cpp:236] pool1 needs backward computation. 543 | I0116 21:12:14.296730 12881 net.cpp:236] conv1 needs backward computation. 544 | I0116 21:12:14.296737 12881 net.cpp:240] label_nuclei_1_split does not need backward computation. 545 | I0116 21:12:14.296743 12881 net.cpp:240] nuclei does not need backward computation. 546 | I0116 21:12:14.296749 12881 net.cpp:283] This network produces output accuracy 547 | I0116 21:12:14.296756 12881 net.cpp:283] This network produces output loss 548 | I0116 21:12:14.296788 12881 net.cpp:297] Network initialization done. 549 | I0116 21:12:14.296794 12881 net.cpp:298] Memory required for data: 170575208 550 | I0116 21:12:14.296846 12881 solver.cpp:66] Solver scaffolding done. 551 | I0116 21:12:14.297072 12881 caffe.cpp:212] Starting Optimization 552 | I0116 21:12:14.297082 12881 solver.cpp:294] Solving NUCLEI_three_class 553 | I0116 21:12:14.297088 12881 solver.cpp:295] Learning Rate Policy: fixed 554 | I0116 21:12:14.297480 12881 solver.cpp:347] Iteration 0, Testing net (#0) 555 | I0116 21:12:14.299542 12881 blocking_queue.cpp:50] Data layer prefetch queue empty 556 | I0116 21:14:10.179394 12881 solver.cpp:415] Test net output #0: accuracy = 0.333333 557 | I0116 21:14:10.179466 12881 solver.cpp:415] Test net output #1: loss = 1.10398 (* 1 = 1.10398 loss) 558 | I0116 21:14:10.521515 12881 solver.cpp:243] Iteration 0, loss = 1.08765 559 | I0116 21:14:10.521556 12881 solver.cpp:259] Train net output #0: loss = 1.08765 (* 1 = 1.08765 loss) 560 | I0116 21:14:10.521572 12881 solver.cpp:590] Iteration 0, lr = 0.001 561 | I0116 21:30:00.158116 12881 solver.cpp:243] Iteration 3000, loss = 0.544146 562 | I0116 21:30:00.158193 12881 solver.cpp:259] Train net output #0: loss = 0.544146 (* 1 = 0.544146 loss) 563 | I0116 21:30:00.158203 12881 solver.cpp:590] Iteration 3000, lr = 0.001 564 | I0116 21:45:53.718721 12881 solver.cpp:243] Iteration 6000, loss = 0.520844 565 | I0116 21:45:53.718785 12881 solver.cpp:259] Train net output #0: loss = 0.520844 (* 1 = 0.520844 loss) 566 | I0116 21:45:53.718793 12881 solver.cpp:590] Iteration 6000, lr = 0.001 567 | I0116 22:01:37.995573 12881 solver.cpp:243] Iteration 9000, loss = 0.49004 568 | I0116 22:01:37.995735 12881 solver.cpp:259] Train net output #0: loss = 0.49004 (* 1 = 0.49004 loss) 569 | I0116 22:01:37.995749 12881 solver.cpp:590] Iteration 9000, lr = 0.001 570 | I0116 22:17:31.574439 12881 solver.cpp:243] Iteration 12000, loss = 0.4899 571 | I0116 22:17:31.574499 12881 solver.cpp:259] Train net output #0: loss = 0.4899 (* 1 = 0.4899 loss) 572 | I0116 22:17:31.574512 12881 solver.cpp:590] Iteration 12000, lr = 0.001 573 | I0116 22:33:26.304510 12881 solver.cpp:243] Iteration 15000, loss = 0.473584 574 | I0116 22:33:26.304594 12881 solver.cpp:259] Train net output #0: loss = 0.473584 (* 1 = 0.473584 loss) 575 | I0116 22:33:26.304608 12881 solver.cpp:590] Iteration 15000, lr = 0.001 576 | I0116 22:49:14.933800 12881 solver.cpp:243] Iteration 18000, loss = 0.480293 577 | I0116 22:49:14.933861 12881 solver.cpp:259] Train net output #0: loss = 0.480293 (* 1 = 0.480293 loss) 578 | I0116 22:49:14.933873 12881 solver.cpp:590] Iteration 18000, lr = 0.001 579 | I0116 23:04:57.942595 12881 solver.cpp:243] Iteration 21000, loss = 0.468498 580 | I0116 23:04:57.942657 12881 solver.cpp:259] Train net output #0: loss = 0.468498 (* 1 = 0.468498 loss) 581 | I0116 23:04:57.942670 12881 solver.cpp:590] Iteration 21000, lr = 0.001 582 | I0116 23:20:50.280272 12881 solver.cpp:243] Iteration 24000, loss = 0.468188 583 | I0116 23:20:50.280333 12881 solver.cpp:259] Train net output #0: loss = 0.468188 (* 1 = 0.468188 loss) 584 | I0116 23:20:50.280341 12881 solver.cpp:590] Iteration 24000, lr = 0.001 585 | I0116 23:36:32.541563 12881 solver.cpp:243] Iteration 27000, loss = 0.467932 586 | I0116 23:36:32.541724 12881 solver.cpp:259] Train net output #0: loss = 0.467932 (* 1 = 0.467932 loss) 587 | I0116 23:36:32.541738 12881 solver.cpp:590] Iteration 27000, lr = 0.001 588 | I0116 23:52:14.555645 12881 solver.cpp:468] Snapshotting to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_30000.caffemodel 589 | I0116 23:52:14.618702 12881 solver.cpp:753] Snapshotting solver state to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_30000.solverstate 590 | I0116 23:52:14.620237 12881 solver.cpp:347] Iteration 30000, Testing net (#0) 591 | I0116 23:54:09.987608 12881 solver.cpp:415] Test net output #0: accuracy = 0.560525 592 | I0116 23:54:09.987684 12881 solver.cpp:415] Test net output #1: loss = 1.28692 (* 1 = 1.28692 loss) 593 | I0116 23:54:10.313585 12881 solver.cpp:243] Iteration 30000, loss = 0.476391 594 | I0116 23:54:10.313626 12881 solver.cpp:259] Train net output #0: loss = 0.476391 (* 1 = 0.476391 loss) 595 | I0116 23:54:10.313637 12881 solver.cpp:590] Iteration 30000, lr = 0.001 596 | I0117 00:10:02.085438 12881 solver.cpp:243] Iteration 33000, loss = 0.435501 597 | I0117 00:10:02.085585 12881 solver.cpp:259] Train net output #0: loss = 0.435501 (* 1 = 0.435501 loss) 598 | I0117 00:10:02.085598 12881 solver.cpp:590] Iteration 33000, lr = 0.001 599 | I0117 00:25:35.217404 12881 solver.cpp:243] Iteration 36000, loss = 0.436308 600 | I0117 00:25:35.217464 12881 solver.cpp:259] Train net output #0: loss = 0.436308 (* 1 = 0.436308 loss) 601 | I0117 00:25:35.217473 12881 solver.cpp:590] Iteration 36000, lr = 0.001 602 | I0117 00:41:13.466948 12881 solver.cpp:243] Iteration 39000, loss = 0.438134 603 | I0117 00:41:13.467103 12881 solver.cpp:259] Train net output #0: loss = 0.438134 (* 1 = 0.438134 loss) 604 | I0117 00:41:13.467120 12881 solver.cpp:590] Iteration 39000, lr = 0.001 605 | I0117 00:56:48.379218 12881 solver.cpp:243] Iteration 42000, loss = 0.418718 606 | I0117 00:56:48.379371 12881 solver.cpp:259] Train net output #0: loss = 0.418718 (* 1 = 0.418718 loss) 607 | I0117 00:56:48.379389 12881 solver.cpp:590] Iteration 42000, lr = 0.001 608 | I0117 01:12:44.831900 12881 solver.cpp:243] Iteration 45000, loss = 0.408234 609 | I0117 01:12:44.831965 12881 solver.cpp:259] Train net output #0: loss = 0.408234 (* 1 = 0.408234 loss) 610 | I0117 01:12:44.831977 12881 solver.cpp:590] Iteration 45000, lr = 0.001 611 | I0117 01:28:42.064648 12881 solver.cpp:243] Iteration 48000, loss = 0.402349 612 | I0117 01:28:42.065037 12881 solver.cpp:259] Train net output #0: loss = 0.402349 (* 1 = 0.402349 loss) 613 | I0117 01:28:42.065052 12881 solver.cpp:590] Iteration 48000, lr = 0.001 614 | I0117 01:46:24.795483 12881 solver.cpp:243] Iteration 51000, loss = 0.390444 615 | I0117 01:46:24.795572 12881 solver.cpp:259] Train net output #0: loss = 0.390444 (* 1 = 0.390444 loss) 616 | I0117 01:46:24.795580 12881 solver.cpp:590] Iteration 51000, lr = 0.001 617 | I0117 02:04:00.188930 12881 solver.cpp:243] Iteration 54000, loss = 0.390611 618 | I0117 02:04:00.188984 12881 solver.cpp:259] Train net output #0: loss = 0.390611 (* 1 = 0.390611 loss) 619 | I0117 02:04:00.188992 12881 solver.cpp:590] Iteration 54000, lr = 0.001 620 | I0117 02:22:04.905829 12881 solver.cpp:243] Iteration 57000, loss = 0.395128 621 | I0117 02:22:04.906045 12881 solver.cpp:259] Train net output #0: loss = 0.395128 (* 1 = 0.395128 loss) 622 | I0117 02:22:04.906060 12881 solver.cpp:590] Iteration 57000, lr = 0.001 623 | I0117 02:38:49.715819 12881 solver.cpp:468] Snapshotting to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_60000.caffemodel 624 | I0117 02:38:49.721066 12881 solver.cpp:753] Snapshotting solver state to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_60000.solverstate 625 | I0117 02:38:49.723857 12881 solver.cpp:347] Iteration 60000, Testing net (#0) 626 | I0117 02:40:45.185480 12881 solver.cpp:415] Test net output #0: accuracy = 0.550633 627 | I0117 02:40:45.185556 12881 solver.cpp:415] Test net output #1: loss = 1.61927 (* 1 = 1.61927 loss) 628 | I0117 02:40:45.505532 12881 solver.cpp:243] Iteration 60000, loss = 0.368442 629 | I0117 02:40:45.505578 12881 solver.cpp:259] Train net output #0: loss = 0.368442 (* 1 = 0.368442 loss) 630 | I0117 02:40:45.505589 12881 solver.cpp:590] Iteration 60000, lr = 0.001 631 | I0117 02:57:25.799528 12881 solver.cpp:243] Iteration 63000, loss = 0.344531 632 | I0117 02:57:25.799624 12881 solver.cpp:259] Train net output #0: loss = 0.344531 (* 1 = 0.344531 loss) 633 | I0117 02:57:25.799634 12881 solver.cpp:590] Iteration 63000, lr = 0.001 634 | I0117 03:15:02.022908 12881 solver.cpp:243] Iteration 66000, loss = 0.36179 635 | I0117 03:15:02.022976 12881 solver.cpp:259] Train net output #0: loss = 0.36179 (* 1 = 0.36179 loss) 636 | I0117 03:15:02.022989 12881 solver.cpp:590] Iteration 66000, lr = 0.001 637 | I0117 03:33:22.108813 12881 solver.cpp:243] Iteration 69000, loss = 0.341526 638 | I0117 03:33:22.108891 12881 solver.cpp:259] Train net output #0: loss = 0.341526 (* 1 = 0.341526 loss) 639 | I0117 03:33:22.108899 12881 solver.cpp:590] Iteration 69000, lr = 0.001 640 | I0117 03:51:16.566756 12881 solver.cpp:243] Iteration 72000, loss = 0.320884 641 | I0117 03:51:16.566859 12881 solver.cpp:259] Train net output #0: loss = 0.320884 (* 1 = 0.320884 loss) 642 | I0117 03:51:16.566871 12881 solver.cpp:590] Iteration 72000, lr = 0.001 643 | I0117 03:58:51.936679 12881 blocking_queue.cpp:50] Data layer prefetch queue empty 644 | I0117 04:11:08.354879 12881 solver.cpp:243] Iteration 75000, loss = 0.316633 645 | I0117 04:11:08.354967 12881 solver.cpp:259] Train net output #0: loss = 0.316633 (* 1 = 0.316633 loss) 646 | I0117 04:11:08.354977 12881 solver.cpp:590] Iteration 75000, lr = 0.001 647 | I0117 04:28:49.701539 12881 solver.cpp:243] Iteration 78000, loss = 0.302039 648 | I0117 04:28:49.701701 12881 solver.cpp:259] Train net output #0: loss = 0.302039 (* 1 = 0.302039 loss) 649 | I0117 04:28:49.701715 12881 solver.cpp:590] Iteration 78000, lr = 0.001 650 | I0117 04:44:54.799566 12881 solver.cpp:243] Iteration 81000, loss = 0.274276 651 | I0117 04:44:54.799727 12881 solver.cpp:259] Train net output #0: loss = 0.274277 (* 1 = 0.274277 loss) 652 | I0117 04:44:54.799742 12881 solver.cpp:590] Iteration 81000, lr = 0.001 653 | I0117 05:01:11.117290 12881 solver.cpp:243] Iteration 84000, loss = 0.286923 654 | I0117 05:01:11.117360 12881 solver.cpp:259] Train net output #0: loss = 0.286923 (* 1 = 0.286923 loss) 655 | I0117 05:01:11.117374 12881 solver.cpp:590] Iteration 84000, lr = 0.001 656 | I0117 05:16:51.046028 12881 solver.cpp:243] Iteration 87000, loss = 0.266643 657 | I0117 05:16:51.046098 12881 solver.cpp:259] Train net output #0: loss = 0.266643 (* 1 = 0.266643 loss) 658 | I0117 05:16:51.046113 12881 solver.cpp:590] Iteration 87000, lr = 0.001 659 | I0117 05:32:41.652672 12881 solver.cpp:468] Snapshotting to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_90000.caffemodel 660 | I0117 05:32:41.692188 12881 solver.cpp:753] Snapshotting solver state to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_90000.solverstate 661 | I0117 05:32:41.693541 12881 solver.cpp:347] Iteration 90000, Testing net (#0) 662 | I0117 05:40:16.309430 12881 solver.cpp:415] Test net output #0: accuracy = 0.561092 663 | I0117 05:40:16.309586 12881 solver.cpp:415] Test net output #1: loss = 2.08509 (* 1 = 2.08509 loss) 664 | I0117 05:40:16.627897 12881 solver.cpp:243] Iteration 90000, loss = 0.253357 665 | I0117 05:40:16.627925 12881 solver.cpp:259] Train net output #0: loss = 0.253358 (* 1 = 0.253358 loss) 666 | I0117 05:40:16.627934 12881 solver.cpp:590] Iteration 90000, lr = 0.001 667 | I0117 05:56:08.510033 12881 solver.cpp:243] Iteration 93000, loss = 0.241466 668 | I0117 05:56:08.510133 12881 solver.cpp:259] Train net output #0: loss = 0.241466 (* 1 = 0.241466 loss) 669 | I0117 05:56:08.510148 12881 solver.cpp:590] Iteration 93000, lr = 0.001 670 | I0117 06:12:49.917255 12881 solver.cpp:243] Iteration 96000, loss = 0.232458 671 | I0117 06:12:49.917424 12881 solver.cpp:259] Train net output #0: loss = 0.232458 (* 1 = 0.232458 loss) 672 | I0117 06:12:49.917436 12881 solver.cpp:590] Iteration 96000, lr = 0.001 673 | I0117 06:34:10.859277 12881 solver.cpp:243] Iteration 99000, loss = 0.210353 674 | I0117 06:34:10.859436 12881 solver.cpp:259] Train net output #0: loss = 0.210353 (* 1 = 0.210353 loss) 675 | I0117 06:34:10.859463 12881 solver.cpp:590] Iteration 99000, lr = 0.001 676 | I0117 06:54:22.603512 12881 solver.cpp:243] Iteration 102000, loss = 0.212466 677 | I0117 06:54:22.603663 12881 solver.cpp:259] Train net output #0: loss = 0.212467 (* 1 = 0.212467 loss) 678 | I0117 06:54:22.603677 12881 solver.cpp:590] Iteration 102000, lr = 0.001 679 | I0117 07:11:07.405751 12881 solver.cpp:243] Iteration 105000, loss = 0.211023 680 | I0117 07:11:07.405827 12881 solver.cpp:259] Train net output #0: loss = 0.211024 (* 1 = 0.211024 loss) 681 | I0117 07:11:07.405840 12881 solver.cpp:590] Iteration 105000, lr = 0.001 682 | I0117 07:26:13.518298 12881 solver.cpp:243] Iteration 108000, loss = 0.221168 683 | I0117 07:26:13.518371 12881 solver.cpp:259] Train net output #0: loss = 0.221168 (* 1 = 0.221168 loss) 684 | I0117 07:26:13.518380 12881 solver.cpp:590] Iteration 108000, lr = 0.001 685 | I0117 07:41:56.980661 12881 solver.cpp:243] Iteration 111000, loss = 0.221228 686 | I0117 07:41:56.980751 12881 solver.cpp:259] Train net output #0: loss = 0.221228 (* 1 = 0.221228 loss) 687 | I0117 07:41:56.980762 12881 solver.cpp:590] Iteration 111000, lr = 0.001 688 | I0117 07:58:24.708037 12881 solver.cpp:243] Iteration 114000, loss = 0.231869 689 | I0117 07:58:24.708183 12881 solver.cpp:259] Train net output #0: loss = 0.231869 (* 1 = 0.231869 loss) 690 | I0117 07:58:24.708199 12881 solver.cpp:590] Iteration 114000, lr = 0.001 691 | I0117 08:04:54.425634 12881 blocking_queue.cpp:50] Data layer prefetch queue empty 692 | I0117 08:20:30.154167 12881 solver.cpp:243] Iteration 117000, loss = 0.215031 693 | I0117 08:20:30.154268 12881 solver.cpp:259] Train net output #0: loss = 0.215031 (* 1 = 0.215031 loss) 694 | I0117 08:20:30.154284 12881 solver.cpp:590] Iteration 117000, lr = 0.001 695 | I0117 08:40:00.659829 12881 solver.cpp:468] Snapshotting to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_120000.caffemodel 696 | I0117 08:40:03.308351 12881 solver.cpp:753] Snapshotting solver state to binary proto file /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap_iter_120000.solverstate 697 | I0117 08:40:03.310046 12881 solver.cpp:347] Iteration 120000, Testing net (#0) 698 | I0117 08:43:44.633774 12881 solver.cpp:415] Test net output #0: accuracy = 0.5928 699 | I0117 08:43:44.633860 12881 solver.cpp:415] Test net output #1: loss = 2.76577 (* 1 = 2.76577 loss) 700 | I0117 08:43:44.967195 12881 solver.cpp:243] Iteration 120000, loss = 0.223773 701 | I0117 08:43:44.967238 12881 solver.cpp:259] Train net output #0: loss = 0.223773 (* 1 = 0.223773 loss) 702 | I0117 08:43:44.967245 12881 solver.cpp:590] Iteration 120000, lr = 0.001 703 | I0117 09:01:14.980782 12881 solver.cpp:243] Iteration 123000, loss = 0.217715 704 | I0117 09:01:14.980937 12881 solver.cpp:259] Train net output #0: loss = 0.217715 (* 1 = 0.217715 loss) 705 | I0117 09:01:14.980950 12881 solver.cpp:590] Iteration 123000, lr = 0.001 706 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_120000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_120000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_120000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_120000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_30000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_30000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_30000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_30000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_60000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_60000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_60000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_60000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_90000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_90000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_90000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_snap_iter_90000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 600 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 30000 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # lr_policy: "step" 18 | # stepsize: 20000 19 | # gamma: 0.1 20 | # Display every 100 iterations 21 | display: 3000 22 | # The maximum number of iterations 23 | max_iter: 150000 24 | # snapshot intermediate results 25 | snapshot: 30000 26 | snapshot_prefix: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_snap" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/three_class_nuclei_train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "NUCLEI_three_class" 2 | layer { 3 | name: "nuclei" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | image_data_param { 11 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class/train.txt" 12 | batch_size: 100 13 | } 14 | } 15 | layer { 16 | name: "nuclei" 17 | type: "ImageData" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | image_data_param { 24 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class/test.txt" 25 | batch_size: 200 26 | } 27 | } 28 | layer { 29 | name: "conv1" 30 | type: "Convolution" 31 | bottom: "data" 32 | top: "conv1" 33 | param { 34 | lr_mult: 0.1 35 | } 36 | param { 37 | lr_mult: 0.2 38 | } 39 | convolution_param { 40 | num_output: 48 41 | pad: 0 42 | kernel_size: 4 43 | stride: 1 44 | weight_filler { 45 | type: "gaussian" 46 | std: 0.0001 47 | } 48 | bias_filler { 49 | type: "constant" 50 | } 51 | } 52 | } 53 | layer { 54 | name: "pool1" 55 | type: "Pooling" 56 | bottom: "conv1" 57 | top: "pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 2 61 | stride: 2 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "pool1" 68 | top: "pool1" 69 | } 70 | layer { 71 | name: "conv2" 72 | type: "Convolution" 73 | bottom: "pool1" 74 | top: "conv2" 75 | param { 76 | lr_mult: 0.1 77 | } 78 | param { 79 | lr_mult: 0.2 80 | } 81 | convolution_param { 82 | num_output: 48 83 | pad: 0 84 | kernel_size: 6 85 | stride: 1 86 | weight_filler { 87 | type: "gaussian" 88 | std: 0.01 89 | } 90 | bias_filler { 91 | type: "constant" 92 | } 93 | } 94 | } 95 | layer { 96 | name: "relu2" 97 | type: "ReLU" 98 | bottom: "conv2" 99 | top: "conv2" 100 | } 101 | layer { 102 | name: "pool2" 103 | type: "Pooling" 104 | bottom: "conv2" 105 | top: "pool2" 106 | pooling_param { 107 | pool: MAX 108 | kernel_size: 2 109 | stride: 2 110 | } 111 | } 112 | layer { 113 | name: "ip_1" 114 | type: "InnerProduct" 115 | bottom: "pool2" 116 | top: "ip1" 117 | param { 118 | lr_mult: 1 119 | } 120 | param { 121 | lr_mult: 2 122 | } 123 | inner_product_param { 124 | num_output: 30 125 | weight_filler { 126 | type: "gaussian" 127 | std: 0.1 128 | } 129 | bias_filler { 130 | type: "constant" 131 | } 132 | } 133 | } 134 | layer { 135 | name: "relu1" 136 | type: "ReLU" 137 | bottom: "ip1" 138 | top: "ip1" 139 | } 140 | layer { 141 | name: "ip_2" 142 | type: "InnerProduct" 143 | bottom: "ip1" 144 | top: "ip2" 145 | param { 146 | lr_mult: 1 147 | } 148 | param { 149 | lr_mult: 2 150 | } 151 | inner_product_param { 152 | num_output: 3 153 | weight_filler { 154 | type: "gaussian" 155 | std: 0.1 156 | } 157 | bias_filler { 158 | type: "constant" 159 | } 160 | } 161 | } 162 | layer { 163 | name: "accuracy" 164 | type: "Accuracy" 165 | bottom: "ip2" 166 | bottom: "label" 167 | top: "accuracy" 168 | include { 169 | phase: TEST 170 | } 171 | } 172 | layer { 173 | name: "loss" 174 | type: "SoftmaxWithLoss" 175 | bottom: "ip2" 176 | bottom: "label" 177 | top: "loss" 178 | } 179 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_1/train_three_class_nuclei.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/three_class_nuclei_solver.prototxt \ 7 | 2>&1 | tee /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/output_1.txt 8 | #--weights=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel 9 | #--snapshot=~/Projects/nuclei-net/caffe-scripts/nuclei/three_class_nuclei_1_iter_10000.solverstate \ 10 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_120000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_120000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_120000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_120000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_30000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_30000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_30000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_30000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_60000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_60000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_60000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_60000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_90000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_90000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_90000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap_iter_90000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 600 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 30000 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.01 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # lr_policy: "step" 18 | # stepsize: 20000 19 | # gamma: 0.1 20 | # Display every 100 iterations 21 | display: 3000 22 | # The maximum number of iterations 23 | max_iter: 150000 24 | # snapshot intermediate results 25 | snapshot: 30000 26 | snapshot_prefix: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_snap" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "NUCLEI_three_class" 2 | layer { 3 | name: "nuclei" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | image_data_param { 11 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class/train.txt" 12 | batch_size: 100 13 | } 14 | } 15 | layer { 16 | name: "nuclei" 17 | type: "ImageData" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | image_data_param { 24 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class/test.txt" 25 | batch_size: 200 26 | } 27 | } 28 | layer { 29 | name: "conv1" 30 | type: "Convolution" 31 | bottom: "data" 32 | top: "conv1" 33 | param { 34 | lr_mult: 0.1 35 | } 36 | param { 37 | lr_mult: 0.2 38 | } 39 | convolution_param { 40 | num_output: 24 41 | pad: 0 42 | kernel_size: 4 43 | stride: 1 44 | weight_filler { 45 | type: "gaussian" 46 | std: 0.0001 47 | } 48 | bias_filler { 49 | type: "constant" 50 | } 51 | } 52 | } 53 | layer { 54 | name: "pool1" 55 | type: "Pooling" 56 | bottom: "conv1" 57 | top: "pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 2 61 | stride: 2 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "pool1" 68 | top: "pool1" 69 | } 70 | layer { 71 | name: "conv2" 72 | type: "Convolution" 73 | bottom: "pool1" 74 | top: "conv2" 75 | param { 76 | lr_mult: 0.1 77 | } 78 | param { 79 | lr_mult: 0.2 80 | } 81 | convolution_param { 82 | num_output: 48 83 | pad: 0 84 | kernel_size: 6 85 | stride: 1 86 | weight_filler { 87 | type: "gaussian" 88 | std: 0.01 89 | } 90 | bias_filler { 91 | type: "constant" 92 | } 93 | } 94 | } 95 | layer { 96 | name: "relu2" 97 | type: "ReLU" 98 | bottom: "conv2" 99 | top: "conv2" 100 | } 101 | layer { 102 | name: "pool2" 103 | type: "Pooling" 104 | bottom: "conv2" 105 | top: "pool2" 106 | pooling_param { 107 | pool: MAX 108 | kernel_size: 2 109 | stride: 2 110 | } 111 | } 112 | layer { 113 | name: "ip_1" 114 | type: "InnerProduct" 115 | bottom: "pool2" 116 | top: "ip1" 117 | param { 118 | lr_mult: 1 119 | } 120 | param { 121 | lr_mult: 2 122 | } 123 | inner_product_param { 124 | num_output: 500 125 | weight_filler { 126 | type: "gaussian" 127 | std: 0.1 128 | } 129 | bias_filler { 130 | type: "constant" 131 | } 132 | } 133 | } 134 | layer { 135 | name: "relu1" 136 | type: "ReLU" 137 | bottom: "ip1" 138 | top: "ip1" 139 | } 140 | layer { 141 | name: "ip_2" 142 | type: "InnerProduct" 143 | bottom: "ip1" 144 | top: "ip2" 145 | param { 146 | lr_mult: 1 147 | } 148 | param { 149 | lr_mult: 2 150 | } 151 | inner_product_param { 152 | num_output: 3 153 | weight_filler { 154 | type: "gaussian" 155 | std: 0.1 156 | } 157 | bias_filler { 158 | type: "constant" 159 | } 160 | } 161 | } 162 | layer { 163 | name: "accuracy" 164 | type: "Accuracy" 165 | bottom: "ip2" 166 | bottom: "label" 167 | top: "accuracy" 168 | include { 169 | phase: TEST 170 | } 171 | } 172 | layer { 173 | name: "loss" 174 | type: "SoftmaxWithLoss" 175 | bottom: "ip2" 176 | bottom: "label" 177 | top: "loss" 178 | } 179 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_2/train_three_class_nuclei.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_2/three_class_nuclei_solver.prototxt \ 7 | 2>&1 | tee /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_2/output_1.txt 8 | #--weights=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel 9 | #--snapshot=~/Projects/nuclei-net/caffe-scripts/nuclei/three_class_nuclei_1_iter_10000.solverstate \ 10 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_3/output_1.txt: -------------------------------------------------------------------------------- 1 | I0126 12:09:09.265635 5887 caffe.cpp:184] Using GPUs 0 2 | I0126 12:09:09.536996 5887 solver.cpp:54] Initializing solver from parameters: 3 | test_iter: 150 4 | test_interval: 6000 5 | base_lr: 0.01 6 | display: 300 7 | max_iter: 30000 8 | lr_policy: "fixed" 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | snapshot: 10000 12 | snapshot_prefix: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/snap" 13 | solver_mode: GPU 14 | device_id: 0 15 | net: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/train_test.prototxt" 16 | I0126 12:09:09.537160 5887 solver.cpp:97] Creating training net from net file: /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/train_test.prototxt 17 | I0126 12:09:09.570590 5887 net.cpp:339] The NetState phase (0) differed from the phase (1) specified by a rule in layer nuclei 18 | I0126 12:09:09.570646 5887 net.cpp:339] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy 19 | I0126 12:09:09.570801 5887 net.cpp:50] Initializing net from parameters: 20 | name: "NUCLEI_three_class" 21 | state { 22 | phase: TRAIN 23 | } 24 | layer { 25 | name: "nuclei" 26 | type: "Data" 27 | top: "data" 28 | top: "label" 29 | include { 30 | phase: TRAIN 31 | } 32 | data_param { 33 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class_101/train_lmdb" 34 | batch_size: 100 35 | backend: LMDB 36 | } 37 | } 38 | layer { 39 | name: "conv1" 40 | type: "Convolution" 41 | bottom: "data" 42 | top: "conv1" 43 | param { 44 | lr_mult: 1 45 | } 46 | param { 47 | lr_mult: 2 48 | } 49 | convolution_param { 50 | num_output: 16 51 | pad: 0 52 | kernel_size: 4 53 | stride: 1 54 | weight_filler { 55 | type: "gaussian" 56 | std: 0.01 57 | } 58 | bias_filler { 59 | type: "constant" 60 | value: 0.7 61 | } 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "conv1" 68 | top: "conv1" 69 | } 70 | layer { 71 | name: "pool1" 72 | type: "Pooling" 73 | bottom: "conv1" 74 | top: "pool1" 75 | pooling_param { 76 | pool: MAX 77 | kernel_size: 2 78 | stride: 2 79 | } 80 | } 81 | layer { 82 | name: "conv2" 83 | type: "Convolution" 84 | bottom: "pool1" 85 | top: "conv2" 86 | param { 87 | lr_mult: 1 88 | } 89 | param { 90 | lr_mult: 2 91 | } 92 | convolution_param { 93 | num_output: 16 94 | pad: 0 95 | kernel_size: 4 96 | stride: 1 97 | weight_filler { 98 | type: "gaussian" 99 | std: 0.01 100 | } 101 | bias_filler { 102 | type: "constant" 103 | value: 0.7 104 | } 105 | } 106 | } 107 | layer { 108 | name: "relu2" 109 | type: "ReLU" 110 | bottom: "conv2" 111 | top: "conv2" 112 | } 113 | layer { 114 | name: "pool2" 115 | type: "Pooling" 116 | bottom: "conv2" 117 | top: "pool2" 118 | pooling_param { 119 | pool: MAX 120 | kernel_size: 2 121 | stride: 2 122 | } 123 | } 124 | layer { 125 | name: "conv3" 126 | type: "Convolution" 127 | bottom: "pool2" 128 | top: "conv3" 129 | param { 130 | lr_mult: 1 131 | } 132 | param { 133 | lr_mult: 2 134 | } 135 | convolution_param { 136 | num_output: 16 137 | pad: 0 138 | kernel_size: 4 139 | stride: 1 140 | weight_filler { 141 | type: "gaussian" 142 | std: 0.01 143 | } 144 | bias_filler { 145 | type: "constant" 146 | value: 0.7 147 | } 148 | } 149 | } 150 | layer { 151 | name: "relu3" 152 | type: "ReLU" 153 | bottom: "conv3" 154 | top: "conv3" 155 | } 156 | layer { 157 | name: "pool3" 158 | type: "Pooling" 159 | bottom: "conv3" 160 | top: "pool3" 161 | pooling_param { 162 | pool: MAX 163 | kernel_size: 2 164 | stride: 2 165 | } 166 | } 167 | layer { 168 | name: "conv4" 169 | type: "Convolution" 170 | bottom: "pool3" 171 | top: "conv4" 172 | param { 173 | lr_mult: 1 174 | } 175 | param { 176 | lr_mult: 2 177 | } 178 | convolution_param { 179 | num_output: 16 180 | pad: 0 181 | kernel_size: 3 182 | stride: 1 183 | weight_filler { 184 | type: "gaussian" 185 | std: 0.01 186 | } 187 | bias_filler { 188 | type: "constant" 189 | value: 0.7 190 | } 191 | } 192 | } 193 | layer { 194 | name: "relu4" 195 | type: "ReLU" 196 | bottom: "conv4" 197 | top: "conv4" 198 | } 199 | layer { 200 | name: "pool4" 201 | type: "Pooling" 202 | bottom: "conv4" 203 | top: "pool4" 204 | pooling_param { 205 | pool: MAX 206 | kernel_size: 2 207 | stride: 2 208 | } 209 | } 210 | layer { 211 | name: "ip1" 212 | type: "InnerProduct" 213 | bottom: "pool4" 214 | top: "ip1" 215 | param { 216 | lr_mult: 1 217 | } 218 | param { 219 | lr_mult: 2 220 | } 221 | inner_product_param { 222 | num_output: 100 223 | weight_filler { 224 | type: "gaussian" 225 | std: 0.01 226 | } 227 | bias_filler { 228 | type: "constant" 229 | value: 0.1 230 | } 231 | } 232 | } 233 | layer { 234 | name: "relu5" 235 | type: "ReLU" 236 | bottom: "ip1" 237 | top: "ip1" 238 | } 239 | layer { 240 | name: "ip2" 241 | type: "InnerProduct" 242 | bottom: "ip1" 243 | top: "ip2" 244 | param { 245 | lr_mult: 1 246 | } 247 | param { 248 | lr_mult: 2 249 | } 250 | inner_product_param { 251 | num_output: 3 252 | weight_filler { 253 | type: "gaussian" 254 | std: 0.1 255 | } 256 | bias_filler { 257 | type: "constant" 258 | } 259 | } 260 | } 261 | layer { 262 | name: "loss" 263 | type: "SoftmaxWithLoss" 264 | bottom: "ip2" 265 | bottom: "label" 266 | top: "loss" 267 | } 268 | I0126 12:09:09.570899 5887 layer_factory.hpp:76] Creating layer nuclei 269 | I0126 12:09:09.597102 5887 net.cpp:110] Creating Layer nuclei 270 | I0126 12:09:09.597134 5887 net.cpp:433] nuclei -> data 271 | I0126 12:09:09.597373 5887 net.cpp:433] nuclei -> label 272 | I0126 12:09:09.620983 5891 db_lmdb.cpp:23] Opened lmdb /home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class_101/train_lmdb 273 | I0126 12:09:10.218623 5887 data_layer.cpp:45] output data size: 100,3,101,101 274 | I0126 12:09:10.261265 5887 net.cpp:155] Setting up nuclei 275 | I0126 12:09:10.261310 5887 net.cpp:163] Top shape: 100 3 101 101 (3060300) 276 | I0126 12:09:10.261323 5887 net.cpp:163] Top shape: 100 (100) 277 | I0126 12:09:10.261334 5887 layer_factory.hpp:76] Creating layer conv1 278 | I0126 12:09:10.261713 5887 net.cpp:110] Creating Layer conv1 279 | I0126 12:09:10.261725 5887 net.cpp:477] conv1 <- data 280 | I0126 12:09:10.261744 5887 net.cpp:433] conv1 -> conv1 281 | I0126 12:09:10.275872 5887 net.cpp:155] Setting up conv1 282 | I0126 12:09:10.275938 5887 net.cpp:163] Top shape: 100 16 98 98 (15366400) 283 | I0126 12:09:10.276000 5887 layer_factory.hpp:76] Creating layer relu1 284 | I0126 12:09:10.276021 5887 net.cpp:110] Creating Layer relu1 285 | I0126 12:09:10.276031 5887 net.cpp:477] relu1 <- conv1 286 | I0126 12:09:10.276042 5887 net.cpp:419] relu1 -> conv1 (in-place) 287 | I0126 12:09:10.276072 5887 net.cpp:155] Setting up relu1 288 | I0126 12:09:10.276082 5887 net.cpp:163] Top shape: 100 16 98 98 (15366400) 289 | I0126 12:09:10.276087 5887 layer_factory.hpp:76] Creating layer pool1 290 | I0126 12:09:10.276096 5887 net.cpp:110] Creating Layer pool1 291 | I0126 12:09:10.276103 5887 net.cpp:477] pool1 <- conv1 292 | I0126 12:09:10.276111 5887 net.cpp:433] pool1 -> pool1 293 | I0126 12:09:10.276532 5887 net.cpp:155] Setting up pool1 294 | I0126 12:09:10.276579 5887 net.cpp:163] Top shape: 100 16 49 49 (3841600) 295 | I0126 12:09:10.276614 5887 layer_factory.hpp:76] Creating layer conv2 296 | I0126 12:09:10.276659 5887 net.cpp:110] Creating Layer conv2 297 | I0126 12:09:10.276681 5887 net.cpp:477] conv2 <- pool1 298 | I0126 12:09:10.276713 5887 net.cpp:433] conv2 -> conv2 299 | I0126 12:09:10.278671 5887 net.cpp:155] Setting up conv2 300 | I0126 12:09:10.278877 5887 net.cpp:163] Top shape: 100 16 46 46 (3385600) 301 | I0126 12:09:10.278913 5887 layer_factory.hpp:76] Creating layer relu2 302 | I0126 12:09:10.278945 5887 net.cpp:110] Creating Layer relu2 303 | I0126 12:09:10.278970 5887 net.cpp:477] relu2 <- conv2 304 | I0126 12:09:10.278995 5887 net.cpp:419] relu2 -> conv2 (in-place) 305 | I0126 12:09:10.279124 5887 net.cpp:155] Setting up relu2 306 | I0126 12:09:10.279157 5887 net.cpp:163] Top shape: 100 16 46 46 (3385600) 307 | I0126 12:09:10.279181 5887 layer_factory.hpp:76] Creating layer pool2 308 | I0126 12:09:10.279204 5887 net.cpp:110] Creating Layer pool2 309 | I0126 12:09:10.279234 5887 net.cpp:477] pool2 <- conv2 310 | I0126 12:09:10.279260 5887 net.cpp:433] pool2 -> pool2 311 | I0126 12:09:10.279434 5887 net.cpp:155] Setting up pool2 312 | I0126 12:09:10.279466 5887 net.cpp:163] Top shape: 100 16 23 23 (846400) 313 | I0126 12:09:10.279489 5887 layer_factory.hpp:76] Creating layer conv3 314 | I0126 12:09:10.279525 5887 net.cpp:110] Creating Layer conv3 315 | I0126 12:09:10.279548 5887 net.cpp:477] conv3 <- pool2 316 | I0126 12:09:10.279574 5887 net.cpp:433] conv3 -> conv3 317 | I0126 12:09:10.280172 5887 net.cpp:155] Setting up conv3 318 | I0126 12:09:10.280194 5887 net.cpp:163] Top shape: 100 16 20 20 (640000) 319 | I0126 12:09:10.280210 5887 layer_factory.hpp:76] Creating layer relu3 320 | I0126 12:09:10.280225 5887 net.cpp:110] Creating Layer relu3 321 | I0126 12:09:10.280231 5887 net.cpp:477] relu3 <- conv3 322 | I0126 12:09:10.280239 5887 net.cpp:419] relu3 -> conv3 (in-place) 323 | I0126 12:09:10.280256 5887 net.cpp:155] Setting up relu3 324 | I0126 12:09:10.280264 5887 net.cpp:163] Top shape: 100 16 20 20 (640000) 325 | I0126 12:09:10.280271 5887 layer_factory.hpp:76] Creating layer pool3 326 | I0126 12:09:10.280279 5887 net.cpp:110] Creating Layer pool3 327 | I0126 12:09:10.280304 5887 net.cpp:477] pool3 <- conv3 328 | I0126 12:09:10.280315 5887 net.cpp:433] pool3 -> pool3 329 | I0126 12:09:10.280371 5887 net.cpp:155] Setting up pool3 330 | I0126 12:09:10.280385 5887 net.cpp:163] Top shape: 100 16 10 10 (160000) 331 | I0126 12:09:10.280392 5887 layer_factory.hpp:76] Creating layer conv4 332 | I0126 12:09:10.280406 5887 net.cpp:110] Creating Layer conv4 333 | I0126 12:09:10.280414 5887 net.cpp:477] conv4 <- pool3 334 | I0126 12:09:10.280423 5887 net.cpp:433] conv4 -> conv4 335 | I0126 12:09:10.280715 5887 net.cpp:155] Setting up conv4 336 | I0126 12:09:10.280730 5887 net.cpp:163] Top shape: 100 16 8 8 (102400) 337 | I0126 12:09:10.280740 5887 layer_factory.hpp:76] Creating layer relu4 338 | I0126 12:09:10.280750 5887 net.cpp:110] Creating Layer relu4 339 | I0126 12:09:10.280756 5887 net.cpp:477] relu4 <- conv4 340 | I0126 12:09:10.280763 5887 net.cpp:419] relu4 -> conv4 (in-place) 341 | I0126 12:09:10.280772 5887 net.cpp:155] Setting up relu4 342 | I0126 12:09:10.280781 5887 net.cpp:163] Top shape: 100 16 8 8 (102400) 343 | I0126 12:09:10.280786 5887 layer_factory.hpp:76] Creating layer pool4 344 | I0126 12:09:10.280794 5887 net.cpp:110] Creating Layer pool4 345 | I0126 12:09:10.280804 5887 net.cpp:477] pool4 <- conv4 346 | I0126 12:09:10.280823 5887 net.cpp:433] pool4 -> pool4 347 | I0126 12:09:10.280869 5887 net.cpp:155] Setting up pool4 348 | I0126 12:09:10.280879 5887 net.cpp:163] Top shape: 100 16 4 4 (25600) 349 | I0126 12:09:10.280884 5887 layer_factory.hpp:76] Creating layer ip1 350 | I0126 12:09:10.280896 5887 net.cpp:110] Creating Layer ip1 351 | I0126 12:09:10.280902 5887 net.cpp:477] ip1 <- pool4 352 | I0126 12:09:10.280913 5887 net.cpp:433] ip1 -> ip1 353 | I0126 12:09:10.282361 5887 net.cpp:155] Setting up ip1 354 | I0126 12:09:10.282384 5887 net.cpp:163] Top shape: 100 100 (10000) 355 | I0126 12:09:10.282405 5887 layer_factory.hpp:76] Creating layer relu5 356 | I0126 12:09:10.282418 5887 net.cpp:110] Creating Layer relu5 357 | I0126 12:09:10.282423 5887 net.cpp:477] relu5 <- ip1 358 | I0126 12:09:10.282431 5887 net.cpp:419] relu5 -> ip1 (in-place) 359 | I0126 12:09:10.282443 5887 net.cpp:155] Setting up relu5 360 | I0126 12:09:10.282449 5887 net.cpp:163] Top shape: 100 100 (10000) 361 | I0126 12:09:10.282455 5887 layer_factory.hpp:76] Creating layer ip2 362 | I0126 12:09:10.282467 5887 net.cpp:110] Creating Layer ip2 363 | I0126 12:09:10.282474 5887 net.cpp:477] ip2 <- ip1 364 | I0126 12:09:10.282482 5887 net.cpp:433] ip2 -> ip2 365 | I0126 12:09:10.282596 5887 net.cpp:155] Setting up ip2 366 | I0126 12:09:10.282608 5887 net.cpp:163] Top shape: 100 3 (300) 367 | I0126 12:09:10.282618 5887 layer_factory.hpp:76] Creating layer loss 368 | I0126 12:09:10.282631 5887 net.cpp:110] Creating Layer loss 369 | I0126 12:09:10.282639 5887 net.cpp:477] loss <- ip2 370 | I0126 12:09:10.282644 5887 net.cpp:477] loss <- label 371 | I0126 12:09:10.282655 5887 net.cpp:433] loss -> loss 372 | I0126 12:09:10.282666 5887 layer_factory.hpp:76] Creating layer loss 373 | I0126 12:09:10.340960 5887 net.cpp:155] Setting up loss 374 | I0126 12:09:10.340992 5887 net.cpp:163] Top shape: (1) 375 | I0126 12:09:10.340998 5887 net.cpp:168] with loss weight 1 376 | I0126 12:09:10.341029 5887 net.cpp:236] loss needs backward computation. 377 | I0126 12:09:10.341042 5887 net.cpp:236] ip2 needs backward computation. 378 | I0126 12:09:10.341048 5887 net.cpp:236] relu5 needs backward computation. 379 | I0126 12:09:10.341053 5887 net.cpp:236] ip1 needs backward computation. 380 | I0126 12:09:10.341058 5887 net.cpp:236] pool4 needs backward computation. 381 | I0126 12:09:10.341063 5887 net.cpp:236] relu4 needs backward computation. 382 | I0126 12:09:10.341068 5887 net.cpp:236] conv4 needs backward computation. 383 | I0126 12:09:10.341073 5887 net.cpp:236] pool3 needs backward computation. 384 | I0126 12:09:10.341164 5887 net.cpp:236] relu3 needs backward computation. 385 | I0126 12:09:10.341172 5887 net.cpp:236] conv3 needs backward computation. 386 | I0126 12:09:10.341178 5887 net.cpp:236] pool2 needs backward computation. 387 | I0126 12:09:10.341184 5887 net.cpp:236] relu2 needs backward computation. 388 | I0126 12:09:10.341223 5887 net.cpp:236] conv2 needs backward computation. 389 | I0126 12:09:10.341233 5887 net.cpp:236] pool1 needs backward computation. 390 | I0126 12:09:10.341239 5887 net.cpp:236] relu1 needs backward computation. 391 | I0126 12:09:10.341291 5887 net.cpp:236] conv1 needs backward computation. 392 | I0126 12:09:10.341300 5887 net.cpp:240] nuclei does not need backward computation. 393 | I0126 12:09:10.341333 5887 net.cpp:283] This network produces output loss 394 | I0126 12:09:10.341377 5887 net.cpp:297] Network initialization done. 395 | I0126 12:09:10.341382 5887 net.cpp:298] Memory required for data: 187772404 396 | I0126 12:09:10.341821 5887 solver.cpp:187] Creating test net (#0) specified by net file: /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/train_test.prototxt 397 | I0126 12:09:10.341886 5887 net.cpp:339] The NetState phase (1) differed from the phase (0) specified by a rule in layer nuclei 398 | I0126 12:09:10.342036 5887 net.cpp:50] Initializing net from parameters: 399 | name: "NUCLEI_three_class" 400 | state { 401 | phase: TEST 402 | } 403 | layer { 404 | name: "nuclei" 405 | type: "Data" 406 | top: "data" 407 | top: "label" 408 | include { 409 | phase: TEST 410 | } 411 | data_param { 412 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class_101/test_lmdb" 413 | batch_size: 100 414 | backend: LMDB 415 | } 416 | } 417 | layer { 418 | name: "conv1" 419 | type: "Convolution" 420 | bottom: "data" 421 | top: "conv1" 422 | param { 423 | lr_mult: 1 424 | } 425 | param { 426 | lr_mult: 2 427 | } 428 | convolution_param { 429 | num_output: 16 430 | pad: 0 431 | kernel_size: 4 432 | stride: 1 433 | weight_filler { 434 | type: "gaussian" 435 | std: 0.01 436 | } 437 | bias_filler { 438 | type: "constant" 439 | value: 0.7 440 | } 441 | } 442 | } 443 | layer { 444 | name: "relu1" 445 | type: "ReLU" 446 | bottom: "conv1" 447 | top: "conv1" 448 | } 449 | layer { 450 | name: "pool1" 451 | type: "Pooling" 452 | bottom: "conv1" 453 | top: "pool1" 454 | pooling_param { 455 | pool: MAX 456 | kernel_size: 2 457 | stride: 2 458 | } 459 | } 460 | layer { 461 | name: "conv2" 462 | type: "Convolution" 463 | bottom: "pool1" 464 | top: "conv2" 465 | param { 466 | lr_mult: 1 467 | } 468 | param { 469 | lr_mult: 2 470 | } 471 | convolution_param { 472 | num_output: 16 473 | pad: 0 474 | kernel_size: 4 475 | stride: 1 476 | weight_filler { 477 | type: "gaussian" 478 | std: 0.01 479 | } 480 | bias_filler { 481 | type: "constant" 482 | value: 0.7 483 | } 484 | } 485 | } 486 | layer { 487 | name: "relu2" 488 | type: "ReLU" 489 | bottom: "conv2" 490 | top: "conv2" 491 | } 492 | layer { 493 | name: "pool2" 494 | type: "Pooling" 495 | bottom: "conv2" 496 | top: "pool2" 497 | pooling_param { 498 | pool: MAX 499 | kernel_size: 2 500 | stride: 2 501 | } 502 | } 503 | layer { 504 | name: "conv3" 505 | type: "Convolution" 506 | bottom: "pool2" 507 | top: "conv3" 508 | param { 509 | lr_mult: 1 510 | } 511 | param { 512 | lr_mult: 2 513 | } 514 | convolution_param { 515 | num_output: 16 516 | pad: 0 517 | kernel_size: 4 518 | stride: 1 519 | weight_filler { 520 | type: "gaussian" 521 | std: 0.01 522 | } 523 | bias_filler { 524 | type: "constant" 525 | value: 0.7 526 | } 527 | } 528 | } 529 | layer { 530 | name: "relu3" 531 | type: "ReLU" 532 | bottom: "conv3" 533 | top: "conv3" 534 | } 535 | layer { 536 | name: "pool3" 537 | type: "Pooling" 538 | bottom: "conv3" 539 | top: "pool3" 540 | pooling_param { 541 | pool: MAX 542 | kernel_size: 2 543 | stride: 2 544 | } 545 | } 546 | layer { 547 | name: "conv4" 548 | type: "Convolution" 549 | bottom: "pool3" 550 | top: "conv4" 551 | param { 552 | lr_mult: 1 553 | } 554 | param { 555 | lr_mult: 2 556 | } 557 | convolution_param { 558 | num_output: 16 559 | pad: 0 560 | kernel_size: 3 561 | stride: 1 562 | weight_filler { 563 | type: "gaussian" 564 | std: 0.01 565 | } 566 | bias_filler { 567 | type: "constant" 568 | value: 0.7 569 | } 570 | } 571 | } 572 | layer { 573 | name: "relu4" 574 | type: "ReLU" 575 | bottom: "conv4" 576 | top: "conv4" 577 | } 578 | layer { 579 | name: "pool4" 580 | type: "Pooling" 581 | bottom: "conv4" 582 | top: "pool4" 583 | pooling_param { 584 | pool: MAX 585 | kernel_size: 2 586 | stride: 2 587 | } 588 | } 589 | layer { 590 | name: "ip1" 591 | type: "InnerProduct" 592 | bottom: "pool4" 593 | top: "ip1" 594 | param { 595 | lr_mult: 1 596 | } 597 | param { 598 | lr_mult: 2 599 | } 600 | inner_product_param { 601 | num_output: 100 602 | weight_filler { 603 | type: "gaussian" 604 | std: 0.01 605 | } 606 | bias_filler { 607 | type: "constant" 608 | value: 0.1 609 | } 610 | } 611 | } 612 | layer { 613 | name: "relu5" 614 | type: "ReLU" 615 | bottom: "ip1" 616 | top: "ip1" 617 | } 618 | layer { 619 | name: "ip2" 620 | type: "InnerProduct" 621 | bottom: "ip1" 622 | top: "ip2" 623 | param { 624 | lr_mult: 1 625 | } 626 | param { 627 | lr_mult: 2 628 | } 629 | inner_product_param { 630 | num_output: 3 631 | weight_filler { 632 | type: "gaussian" 633 | std: 0.1 634 | } 635 | bias_filler { 636 | type: "constant" 637 | } 638 | } 639 | } 640 | layer { 641 | name: "accuracy" 642 | type: "Accuracy" 643 | bottom: "ip2" 644 | bottom: "label" 645 | top: "accuracy" 646 | include { 647 | phase: TEST 648 | } 649 | } 650 | layer { 651 | name: "loss" 652 | type: "SoftmaxWithLoss" 653 | bottom: "ip2" 654 | bottom: "label" 655 | top: "loss" 656 | } 657 | I0126 12:09:10.342167 5887 layer_factory.hpp:76] Creating layer nuclei 658 | I0126 12:09:10.342308 5887 net.cpp:110] Creating Layer nuclei 659 | I0126 12:09:10.342320 5887 net.cpp:433] nuclei -> data 660 | I0126 12:09:10.342332 5887 net.cpp:433] nuclei -> label 661 | I0126 12:09:10.395839 5894 db_lmdb.cpp:23] Opened lmdb /home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class_101/test_lmdb 662 | I0126 12:09:10.495404 5887 data_layer.cpp:45] output data size: 100,3,101,101 663 | I0126 12:09:10.543620 5887 net.cpp:155] Setting up nuclei 664 | I0126 12:09:10.543663 5887 net.cpp:163] Top shape: 100 3 101 101 (3060300) 665 | I0126 12:09:10.543674 5887 net.cpp:163] Top shape: 100 (100) 666 | I0126 12:09:10.543684 5887 layer_factory.hpp:76] Creating layer label_nuclei_1_split 667 | I0126 12:09:10.543709 5887 net.cpp:110] Creating Layer label_nuclei_1_split 668 | I0126 12:09:10.543716 5887 net.cpp:477] label_nuclei_1_split <- label 669 | I0126 12:09:10.543731 5887 net.cpp:433] label_nuclei_1_split -> label_nuclei_1_split_0 670 | I0126 12:09:10.543750 5887 net.cpp:433] label_nuclei_1_split -> label_nuclei_1_split_1 671 | I0126 12:09:10.543835 5887 net.cpp:155] Setting up label_nuclei_1_split 672 | I0126 12:09:10.543848 5887 net.cpp:163] Top shape: 100 (100) 673 | I0126 12:09:10.543854 5887 net.cpp:163] Top shape: 100 (100) 674 | I0126 12:09:10.543860 5887 layer_factory.hpp:76] Creating layer conv1 675 | I0126 12:09:10.543879 5887 net.cpp:110] Creating Layer conv1 676 | I0126 12:09:10.543892 5887 net.cpp:477] conv1 <- data 677 | I0126 12:09:10.543907 5887 net.cpp:433] conv1 -> conv1 678 | I0126 12:09:10.544143 5887 net.cpp:155] Setting up conv1 679 | I0126 12:09:10.544154 5887 net.cpp:163] Top shape: 100 16 98 98 (15366400) 680 | I0126 12:09:10.544173 5887 layer_factory.hpp:76] Creating layer relu1 681 | I0126 12:09:10.544191 5887 net.cpp:110] Creating Layer relu1 682 | I0126 12:09:10.544199 5887 net.cpp:477] relu1 <- conv1 683 | I0126 12:09:10.544208 5887 net.cpp:419] relu1 -> conv1 (in-place) 684 | I0126 12:09:10.544217 5887 net.cpp:155] Setting up relu1 685 | I0126 12:09:10.544226 5887 net.cpp:163] Top shape: 100 16 98 98 (15366400) 686 | I0126 12:09:10.544232 5887 layer_factory.hpp:76] Creating layer pool1 687 | I0126 12:09:10.544244 5887 net.cpp:110] Creating Layer pool1 688 | I0126 12:09:10.544250 5887 net.cpp:477] pool1 <- conv1 689 | I0126 12:09:10.544260 5887 net.cpp:433] pool1 -> pool1 690 | I0126 12:09:10.544301 5887 net.cpp:155] Setting up pool1 691 | I0126 12:09:10.544309 5887 net.cpp:163] Top shape: 100 16 49 49 (3841600) 692 | I0126 12:09:10.544316 5887 layer_factory.hpp:76] Creating layer conv2 693 | I0126 12:09:10.544330 5887 net.cpp:110] Creating Layer conv2 694 | I0126 12:09:10.544338 5887 net.cpp:477] conv2 <- pool1 695 | I0126 12:09:10.544348 5887 net.cpp:433] conv2 -> conv2 696 | I0126 12:09:10.544607 5887 net.cpp:155] Setting up conv2 697 | I0126 12:09:10.544617 5887 net.cpp:163] Top shape: 100 16 46 46 (3385600) 698 | I0126 12:09:10.544630 5887 layer_factory.hpp:76] Creating layer relu2 699 | I0126 12:09:10.544641 5887 net.cpp:110] Creating Layer relu2 700 | I0126 12:09:10.544648 5887 net.cpp:477] relu2 <- conv2 701 | I0126 12:09:10.544656 5887 net.cpp:419] relu2 -> conv2 (in-place) 702 | I0126 12:09:10.544666 5887 net.cpp:155] Setting up relu2 703 | I0126 12:09:10.544674 5887 net.cpp:163] Top shape: 100 16 46 46 (3385600) 704 | I0126 12:09:10.544680 5887 layer_factory.hpp:76] Creating layer pool2 705 | I0126 12:09:10.544688 5887 net.cpp:110] Creating Layer pool2 706 | I0126 12:09:10.544695 5887 net.cpp:477] pool2 <- conv2 707 | I0126 12:09:10.544703 5887 net.cpp:433] pool2 -> pool2 708 | I0126 12:09:10.544740 5887 net.cpp:155] Setting up pool2 709 | I0126 12:09:10.544750 5887 net.cpp:163] Top shape: 100 16 23 23 (846400) 710 | I0126 12:09:10.544756 5887 layer_factory.hpp:76] Creating layer conv3 711 | I0126 12:09:10.544770 5887 net.cpp:110] Creating Layer conv3 712 | I0126 12:09:10.544776 5887 net.cpp:477] conv3 <- pool2 713 | I0126 12:09:10.544788 5887 net.cpp:433] conv3 -> conv3 714 | I0126 12:09:10.545056 5887 net.cpp:155] Setting up conv3 715 | I0126 12:09:10.545078 5887 net.cpp:163] Top shape: 100 16 20 20 (640000) 716 | I0126 12:09:10.545092 5887 layer_factory.hpp:76] Creating layer relu3 717 | I0126 12:09:10.545104 5887 net.cpp:110] Creating Layer relu3 718 | I0126 12:09:10.545110 5887 net.cpp:477] relu3 <- conv3 719 | I0126 12:09:10.545119 5887 net.cpp:419] relu3 -> conv3 (in-place) 720 | I0126 12:09:10.545128 5887 net.cpp:155] Setting up relu3 721 | I0126 12:09:10.545137 5887 net.cpp:163] Top shape: 100 16 20 20 (640000) 722 | I0126 12:09:10.545145 5887 layer_factory.hpp:76] Creating layer pool3 723 | I0126 12:09:10.545153 5887 net.cpp:110] Creating Layer pool3 724 | I0126 12:09:10.545159 5887 net.cpp:477] pool3 <- conv3 725 | I0126 12:09:10.545167 5887 net.cpp:433] pool3 -> pool3 726 | I0126 12:09:10.545282 5887 net.cpp:155] Setting up pool3 727 | I0126 12:09:10.545301 5887 net.cpp:163] Top shape: 100 16 10 10 (160000) 728 | I0126 12:09:10.545308 5887 layer_factory.hpp:76] Creating layer conv4 729 | I0126 12:09:10.545321 5887 net.cpp:110] Creating Layer conv4 730 | I0126 12:09:10.545330 5887 net.cpp:477] conv4 <- pool3 731 | I0126 12:09:10.545341 5887 net.cpp:433] conv4 -> conv4 732 | I0126 12:09:10.556566 5895 blocking_queue.cpp:50] Waiting for data 733 | I0126 12:09:10.556598 5887 net.cpp:155] Setting up conv4 734 | I0126 12:09:10.556679 5887 net.cpp:163] Top shape: 100 16 8 8 (102400) 735 | I0126 12:09:10.556699 5887 layer_factory.hpp:76] Creating layer relu4 736 | I0126 12:09:10.556710 5887 net.cpp:110] Creating Layer relu4 737 | I0126 12:09:10.556715 5887 net.cpp:477] relu4 <- conv4 738 | I0126 12:09:10.556720 5887 net.cpp:419] relu4 -> conv4 (in-place) 739 | I0126 12:09:10.556727 5887 net.cpp:155] Setting up relu4 740 | I0126 12:09:10.556731 5887 net.cpp:163] Top shape: 100 16 8 8 (102400) 741 | I0126 12:09:10.556735 5887 layer_factory.hpp:76] Creating layer pool4 742 | I0126 12:09:10.556740 5887 net.cpp:110] Creating Layer pool4 743 | I0126 12:09:10.556745 5887 net.cpp:477] pool4 <- conv4 744 | I0126 12:09:10.556748 5887 net.cpp:433] pool4 -> pool4 745 | I0126 12:09:10.556792 5887 net.cpp:155] Setting up pool4 746 | I0126 12:09:10.556799 5887 net.cpp:163] Top shape: 100 16 4 4 (25600) 747 | I0126 12:09:10.556803 5887 layer_factory.hpp:76] Creating layer ip1 748 | I0126 12:09:10.556810 5887 net.cpp:110] Creating Layer ip1 749 | I0126 12:09:10.556813 5887 net.cpp:477] ip1 <- pool4 750 | I0126 12:09:10.556819 5887 net.cpp:433] ip1 -> ip1 751 | I0126 12:09:10.557612 5887 net.cpp:155] Setting up ip1 752 | I0126 12:09:10.557627 5887 net.cpp:163] Top shape: 100 100 (10000) 753 | I0126 12:09:10.557637 5887 layer_factory.hpp:76] Creating layer relu5 754 | I0126 12:09:10.557646 5887 net.cpp:110] Creating Layer relu5 755 | I0126 12:09:10.557651 5887 net.cpp:477] relu5 <- ip1 756 | I0126 12:09:10.557657 5887 net.cpp:419] relu5 -> ip1 (in-place) 757 | I0126 12:09:10.557663 5887 net.cpp:155] Setting up relu5 758 | I0126 12:09:10.557667 5887 net.cpp:163] Top shape: 100 100 (10000) 759 | I0126 12:09:10.557679 5887 layer_factory.hpp:76] Creating layer ip2 760 | I0126 12:09:10.557690 5887 net.cpp:110] Creating Layer ip2 761 | I0126 12:09:10.557694 5887 net.cpp:477] ip2 <- ip1 762 | I0126 12:09:10.557701 5887 net.cpp:433] ip2 -> ip2 763 | I0126 12:09:10.557792 5887 net.cpp:155] Setting up ip2 764 | I0126 12:09:10.557799 5887 net.cpp:163] Top shape: 100 3 (300) 765 | I0126 12:09:10.557804 5887 layer_factory.hpp:76] Creating layer ip2_ip2_0_split 766 | I0126 12:09:10.557811 5887 net.cpp:110] Creating Layer ip2_ip2_0_split 767 | I0126 12:09:10.557813 5887 net.cpp:477] ip2_ip2_0_split <- ip2 768 | I0126 12:09:10.557818 5887 net.cpp:433] ip2_ip2_0_split -> ip2_ip2_0_split_0 769 | I0126 12:09:10.557823 5887 net.cpp:433] ip2_ip2_0_split -> ip2_ip2_0_split_1 770 | I0126 12:09:10.557850 5887 net.cpp:155] Setting up ip2_ip2_0_split 771 | I0126 12:09:10.557857 5887 net.cpp:163] Top shape: 100 3 (300) 772 | I0126 12:09:10.557862 5887 net.cpp:163] Top shape: 100 3 (300) 773 | I0126 12:09:10.557864 5887 layer_factory.hpp:76] Creating layer accuracy 774 | I0126 12:09:10.557871 5887 net.cpp:110] Creating Layer accuracy 775 | I0126 12:09:10.557874 5887 net.cpp:477] accuracy <- ip2_ip2_0_split_0 776 | I0126 12:09:10.557878 5887 net.cpp:477] accuracy <- label_nuclei_1_split_0 777 | I0126 12:09:10.557883 5887 net.cpp:433] accuracy -> accuracy 778 | I0126 12:09:10.557890 5887 net.cpp:155] Setting up accuracy 779 | I0126 12:09:10.557912 5887 net.cpp:163] Top shape: (1) 780 | I0126 12:09:10.557916 5887 layer_factory.hpp:76] Creating layer loss 781 | I0126 12:09:10.557922 5887 net.cpp:110] Creating Layer loss 782 | I0126 12:09:10.557926 5887 net.cpp:477] loss <- ip2_ip2_0_split_1 783 | I0126 12:09:10.557930 5887 net.cpp:477] loss <- label_nuclei_1_split_1 784 | I0126 12:09:10.557934 5887 net.cpp:433] loss -> loss 785 | I0126 12:09:10.557940 5887 layer_factory.hpp:76] Creating layer loss 786 | I0126 12:09:10.558006 5887 net.cpp:155] Setting up loss 787 | I0126 12:09:10.558012 5887 net.cpp:163] Top shape: (1) 788 | I0126 12:09:10.558015 5887 net.cpp:168] with loss weight 1 789 | I0126 12:09:10.558024 5887 net.cpp:236] loss needs backward computation. 790 | I0126 12:09:10.558028 5887 net.cpp:240] accuracy does not need backward computation. 791 | I0126 12:09:10.558032 5887 net.cpp:236] ip2_ip2_0_split needs backward computation. 792 | I0126 12:09:10.558035 5887 net.cpp:236] ip2 needs backward computation. 793 | I0126 12:09:10.558038 5887 net.cpp:236] relu5 needs backward computation. 794 | I0126 12:09:10.558042 5887 net.cpp:236] ip1 needs backward computation. 795 | I0126 12:09:10.558044 5887 net.cpp:236] pool4 needs backward computation. 796 | I0126 12:09:10.558048 5887 net.cpp:236] relu4 needs backward computation. 797 | I0126 12:09:10.558050 5887 net.cpp:236] conv4 needs backward computation. 798 | I0126 12:09:10.558053 5887 net.cpp:236] pool3 needs backward computation. 799 | I0126 12:09:10.558056 5887 net.cpp:236] relu3 needs backward computation. 800 | I0126 12:09:10.558059 5887 net.cpp:236] conv3 needs backward computation. 801 | I0126 12:09:10.558063 5887 net.cpp:236] pool2 needs backward computation. 802 | I0126 12:09:10.558065 5887 net.cpp:236] relu2 needs backward computation. 803 | I0126 12:09:10.558068 5887 net.cpp:236] conv2 needs backward computation. 804 | I0126 12:09:10.558071 5887 net.cpp:236] pool1 needs backward computation. 805 | I0126 12:09:10.558074 5887 net.cpp:236] relu1 needs backward computation. 806 | I0126 12:09:10.558078 5887 net.cpp:236] conv1 needs backward computation. 807 | I0126 12:09:10.558081 5887 net.cpp:240] label_nuclei_1_split does not need backward computation. 808 | I0126 12:09:10.558084 5887 net.cpp:240] nuclei does not need backward computation. 809 | I0126 12:09:10.558087 5887 net.cpp:283] This network produces output accuracy 810 | I0126 12:09:10.558090 5887 net.cpp:283] This network produces output loss 811 | I0126 12:09:10.558106 5887 net.cpp:297] Network initialization done. 812 | I0126 12:09:10.558109 5887 net.cpp:298] Memory required for data: 187775608 813 | I0126 12:09:10.558166 5887 solver.cpp:66] Solver scaffolding done. 814 | I0126 12:09:10.558475 5887 caffe.cpp:212] Starting Optimization 815 | I0126 12:09:10.558485 5887 solver.cpp:294] Solving NUCLEI_three_class 816 | I0126 12:09:10.558487 5887 solver.cpp:295] Learning Rate Policy: fixed 817 | I0126 12:09:10.559027 5887 solver.cpp:347] Iteration 0, Testing net (#0) 818 | I0126 12:09:10.561179 5887 blocking_queue.cpp:50] Data layer prefetch queue empty 819 | I0126 12:09:37.120923 5887 solver.cpp:415] Test net output #0: accuracy = 0.333333 820 | I0126 12:09:37.120959 5887 solver.cpp:415] Test net output #1: loss = 1.10286 (* 1 = 1.10286 loss) 821 | I0126 12:09:37.607393 5887 solver.cpp:243] Iteration 0, loss = 1.10703 822 | I0126 12:09:37.607440 5887 solver.cpp:259] Train net output #0: loss = 1.10703 (* 1 = 1.10703 loss) 823 | I0126 12:09:37.607452 5887 solver.cpp:590] Iteration 0, lr = 0.01 824 | I0126 12:11:54.747372 5887 solver.cpp:243] Iteration 300, loss = 1.09862 825 | I0126 12:11:54.747493 5887 solver.cpp:259] Train net output #0: loss = 1.09862 (* 1 = 1.09862 loss) 826 | I0126 12:11:54.747508 5887 solver.cpp:590] Iteration 300, lr = 0.01 827 | I0126 12:14:15.634610 5887 solver.cpp:243] Iteration 600, loss = 1.09869 828 | I0126 12:14:15.634716 5887 solver.cpp:259] Train net output #0: loss = 1.09869 (* 1 = 1.09869 loss) 829 | I0126 12:14:15.634740 5887 solver.cpp:590] Iteration 600, lr = 0.01 830 | I0126 12:16:31.650321 5887 solver.cpp:243] Iteration 900, loss = 1.09861 831 | I0126 12:16:31.650425 5887 solver.cpp:259] Train net output #0: loss = 1.09861 (* 1 = 1.09861 loss) 832 | I0126 12:16:31.650439 5887 solver.cpp:590] Iteration 900, lr = 0.01 833 | I0126 12:18:51.270839 5887 solver.cpp:243] Iteration 1200, loss = 1.09869 834 | I0126 12:18:51.270907 5887 solver.cpp:259] Train net output #0: loss = 1.09869 (* 1 = 1.09869 loss) 835 | I0126 12:18:51.270920 5887 solver.cpp:590] Iteration 1200, lr = 0.01 836 | I0126 12:21:05.501422 5887 solver.cpp:243] Iteration 1500, loss = 1.09859 837 | I0126 12:21:05.501508 5887 solver.cpp:259] Train net output #0: loss = 1.09859 (* 1 = 1.09859 loss) 838 | I0126 12:21:05.501523 5887 solver.cpp:590] Iteration 1500, lr = 0.01 839 | I0126 12:23:19.424448 5887 solver.cpp:243] Iteration 1800, loss = 1.09869 840 | I0126 12:23:19.424600 5887 solver.cpp:259] Train net output #0: loss = 1.09869 (* 1 = 1.09869 loss) 841 | I0126 12:23:19.424612 5887 solver.cpp:590] Iteration 1800, lr = 0.01 842 | I0126 12:25:33.229750 5887 solver.cpp:243] Iteration 2100, loss = 1.09858 843 | I0126 12:25:33.229835 5887 solver.cpp:259] Train net output #0: loss = 1.09858 (* 1 = 1.09858 loss) 844 | I0126 12:25:33.229846 5887 solver.cpp:590] Iteration 2100, lr = 0.01 845 | I0126 12:27:46.870287 5887 solver.cpp:243] Iteration 2400, loss = 1.09869 846 | I0126 12:27:46.870383 5887 solver.cpp:259] Train net output #0: loss = 1.09869 (* 1 = 1.09869 loss) 847 | I0126 12:27:46.870393 5887 solver.cpp:590] Iteration 2400, lr = 0.01 848 | I0126 12:30:00.493221 5887 solver.cpp:243] Iteration 2700, loss = 1.09856 849 | I0126 12:30:00.493309 5887 solver.cpp:259] Train net output #0: loss = 1.09856 (* 1 = 1.09856 loss) 850 | I0126 12:30:00.493322 5887 solver.cpp:590] Iteration 2700, lr = 0.01 851 | I0126 12:32:13.248606 5887 solver.cpp:243] Iteration 3000, loss = 1.09869 852 | I0126 12:32:13.248683 5887 solver.cpp:259] Train net output #0: loss = 1.09869 (* 1 = 1.09869 loss) 853 | I0126 12:32:13.248698 5887 solver.cpp:590] Iteration 3000, lr = 0.01 854 | I0126 12:34:25.827349 5887 solver.cpp:243] Iteration 3300, loss = 1.09854 855 | I0126 12:34:25.827442 5887 solver.cpp:259] Train net output #0: loss = 1.09854 (* 1 = 1.09854 loss) 856 | I0126 12:34:25.827451 5887 solver.cpp:590] Iteration 3300, lr = 0.01 857 | I0126 12:36:38.454501 5887 solver.cpp:243] Iteration 3600, loss = 1.09868 858 | I0126 12:36:38.454602 5887 solver.cpp:259] Train net output #0: loss = 1.09868 (* 1 = 1.09868 loss) 859 | I0126 12:36:38.454612 5887 solver.cpp:590] Iteration 3600, lr = 0.01 860 | I0126 12:38:51.150221 5887 solver.cpp:243] Iteration 3900, loss = 1.09853 861 | I0126 12:38:51.150302 5887 solver.cpp:259] Train net output #0: loss = 1.09853 (* 1 = 1.09853 loss) 862 | I0126 12:38:51.150311 5887 solver.cpp:590] Iteration 3900, lr = 0.01 863 | I0126 12:41:03.850486 5887 solver.cpp:243] Iteration 4200, loss = 1.09868 864 | I0126 12:41:03.850574 5887 solver.cpp:259] Train net output #0: loss = 1.09868 (* 1 = 1.09868 loss) 865 | I0126 12:41:03.850582 5887 solver.cpp:590] Iteration 4200, lr = 0.01 866 | I0126 12:43:16.505765 5887 solver.cpp:243] Iteration 4500, loss = 1.09851 867 | I0126 12:43:16.505924 5887 solver.cpp:259] Train net output #0: loss = 1.09851 (* 1 = 1.09851 loss) 868 | I0126 12:43:16.505939 5887 solver.cpp:590] Iteration 4500, lr = 0.01 869 | I0126 12:45:29.167649 5887 solver.cpp:243] Iteration 4800, loss = 1.09868 870 | I0126 12:45:29.167747 5887 solver.cpp:259] Train net output #0: loss = 1.09868 (* 1 = 1.09868 loss) 871 | I0126 12:45:29.167762 5887 solver.cpp:590] Iteration 4800, lr = 0.01 872 | I0126 12:47:41.812707 5887 solver.cpp:243] Iteration 5100, loss = 1.0985 873 | I0126 12:47:41.812786 5887 solver.cpp:259] Train net output #0: loss = 1.0985 (* 1 = 1.0985 loss) 874 | I0126 12:47:41.812795 5887 solver.cpp:590] Iteration 5100, lr = 0.01 875 | I0126 12:49:55.429116 5887 solver.cpp:243] Iteration 5400, loss = 1.09867 876 | I0126 12:49:55.429205 5887 solver.cpp:259] Train net output #0: loss = 1.09867 (* 1 = 1.09867 loss) 877 | I0126 12:49:55.429219 5887 solver.cpp:590] Iteration 5400, lr = 0.01 878 | I0126 12:52:17.405570 5887 solver.cpp:243] Iteration 5700, loss = 1.09849 879 | I0126 12:52:17.405657 5887 solver.cpp:259] Train net output #0: loss = 1.09849 (* 1 = 1.09849 loss) 880 | I0126 12:52:17.405665 5887 solver.cpp:590] Iteration 5700, lr = 0.01 881 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_3/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 150 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 6000 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.01 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # lr_policy: "step" 18 | # stepsize: 20000 19 | # gamma: 0.1 20 | # Display every 100 iterations 21 | display: 300 22 | # The maximum number of iterations 23 | max_iter: 30000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/snap" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_3/train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "NUCLEI_three_class" 2 | layer { 3 | name: "nuclei" 4 | type: "Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | data_param { 11 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class_101/train_lmdb" 12 | batch_size: 100 13 | backend: LMDB 14 | } 15 | } 16 | layer { 17 | name: "nuclei" 18 | type: "Data" 19 | top: "data" 20 | top: "label" 21 | include { 22 | phase: TEST 23 | } 24 | data_param { 25 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class_101/test_lmdb" 26 | batch_size: 100 27 | backend: LMDB 28 | } 29 | } 30 | layer { 31 | name: "conv1" 32 | type: "Convolution" 33 | bottom: "data" 34 | top: "conv1" 35 | param { 36 | lr_mult: 1 37 | } 38 | param { 39 | lr_mult: 2 40 | } 41 | convolution_param { 42 | num_output: 16 43 | pad: 0 44 | kernel_size: 4 45 | stride: 1 46 | weight_filler { 47 | type: "gaussian" 48 | std: 0.01 49 | } 50 | bias_filler { 51 | type: "constant" 52 | value: 0.7 53 | } 54 | } 55 | } 56 | layer { 57 | name: "relu1" 58 | type: "ReLU" 59 | bottom: "conv1" 60 | top: "conv1" 61 | } 62 | layer { 63 | name: "pool1" 64 | type: "Pooling" 65 | bottom: "conv1" 66 | top: "pool1" 67 | pooling_param { 68 | pool: MAX 69 | kernel_size: 2 70 | stride: 2 71 | } 72 | } 73 | layer { 74 | name: "conv2" 75 | type: "Convolution" 76 | bottom: "pool1" 77 | top: "conv2" 78 | param { 79 | lr_mult: 1 80 | } 81 | param { 82 | lr_mult: 2 83 | } 84 | convolution_param { 85 | num_output: 16 86 | pad: 0 87 | kernel_size: 4 88 | stride: 1 89 | weight_filler { 90 | type: "gaussian" 91 | std: 0.01 92 | } 93 | bias_filler { 94 | type: "constant" 95 | value: 0.7 96 | } 97 | } 98 | } 99 | layer { 100 | name: "relu2" 101 | type: "ReLU" 102 | bottom: "conv2" 103 | top: "conv2" 104 | } 105 | layer { 106 | name: "pool2" 107 | type: "Pooling" 108 | bottom: "conv2" 109 | top: "pool2" 110 | pooling_param { 111 | pool: MAX 112 | kernel_size: 2 113 | stride: 2 114 | } 115 | } 116 | layer { 117 | name: "conv3" 118 | type: "Convolution" 119 | bottom: "pool2" 120 | top: "conv3" 121 | param { 122 | lr_mult: 1 123 | } 124 | param { 125 | lr_mult: 2 126 | } 127 | convolution_param { 128 | num_output: 16 129 | pad: 0 130 | kernel_size: 4 131 | stride: 1 132 | weight_filler { 133 | type: "gaussian" 134 | std: 0.01 135 | } 136 | bias_filler { 137 | type: "constant" 138 | value: 0.7 139 | } 140 | } 141 | } 142 | layer { 143 | name: "relu3" 144 | type: "ReLU" 145 | bottom: "conv3" 146 | top: "conv3" 147 | } 148 | layer { 149 | name: "pool3" 150 | type: "Pooling" 151 | bottom: "conv3" 152 | top: "pool3" 153 | pooling_param { 154 | pool: MAX 155 | kernel_size: 2 156 | stride: 2 157 | } 158 | } 159 | layer { 160 | name: "conv4" 161 | type: "Convolution" 162 | bottom: "pool3" 163 | top: "conv4" 164 | param { 165 | lr_mult: 1 166 | } 167 | param { 168 | lr_mult: 2 169 | } 170 | convolution_param { 171 | num_output: 16 172 | pad: 0 173 | kernel_size: 3 174 | stride: 1 175 | weight_filler { 176 | type: "gaussian" 177 | std: 0.01 178 | } 179 | bias_filler { 180 | type: "constant" 181 | value: 0.7 182 | } 183 | } 184 | } 185 | layer { 186 | name: "relu4" 187 | type: "ReLU" 188 | bottom: "conv4" 189 | top: "conv4" 190 | } 191 | layer { 192 | name: "pool4" 193 | type: "Pooling" 194 | bottom: "conv4" 195 | top: "pool4" 196 | pooling_param { 197 | pool: MAX 198 | kernel_size: 2 199 | stride: 2 200 | } 201 | } 202 | layer { 203 | name: "ip1" 204 | type: "InnerProduct" 205 | bottom: "pool4" 206 | top: "ip1" 207 | param { 208 | lr_mult: 1 209 | } 210 | param { 211 | lr_mult: 2 212 | } 213 | inner_product_param { 214 | num_output: 100 215 | weight_filler { 216 | type: "gaussian" 217 | std: 0.01 218 | } 219 | bias_filler { 220 | type: "constant" 221 | value: 0.1 222 | } 223 | } 224 | } 225 | layer { 226 | name: "relu5" 227 | type: "ReLU" 228 | bottom: "ip1" 229 | top: "ip1" 230 | } 231 | layer { 232 | name: "ip2" 233 | type: "InnerProduct" 234 | bottom: "ip1" 235 | top: "ip2" 236 | param { 237 | lr_mult: 1 238 | } 239 | param { 240 | lr_mult: 2 241 | } 242 | inner_product_param { 243 | num_output: 3 244 | weight_filler { 245 | type: "gaussian" 246 | std: 0.1 247 | } 248 | bias_filler { 249 | type: "constant" 250 | } 251 | } 252 | } 253 | layer { 254 | name: "accuracy" 255 | type: "Accuracy" 256 | bottom: "ip2" 257 | bottom: "label" 258 | top: "accuracy" 259 | include { 260 | phase: TEST 261 | } 262 | } 263 | layer { 264 | name: "loss" 265 | type: "SoftmaxWithLoss" 266 | bottom: "ip2" 267 | bottom: "label" 268 | top: "loss" 269 | } 270 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_3/train_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/solver.prototxt \ 7 | 2>&1 | tee /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_3/output_1.txt 8 | #--weights=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel 9 | #--snapshot=~/Projects/nuclei-net/caffe-scripts/nuclei/three_class_nuclei_1_iter_10000.solverstate \ 10 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/snap_iter_10000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_4/snap_iter_10000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/snap_iter_10000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_4/snap_iter_10000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/snap_iter_20000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_4/snap_iter_20000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/snap_iter_20000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_4/snap_iter_20000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/snap_iter_30000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_4/snap_iter_30000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/snap_iter_30000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/three_class/arch_4/snap_iter_30000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_4/train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 150 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 6000 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.01 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # lr_policy: "step" 18 | # stepsize: 20000 19 | # gamma: 0.1 20 | # Display every 100 iterations 21 | display: 300 22 | # The maximum number of iterations 23 | max_iter: 30000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_4/snap" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "NUCLEI_three_class" 2 | layer { 3 | name: "nuclei" 4 | type: "Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | data_param { 11 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class_31/train_lmdb" 12 | batch_size: 100 13 | backend: LMDB 14 | } 15 | } 16 | layer { 17 | name: "nuclei" 18 | type: "Data" 19 | top: "data" 20 | top: "label" 21 | include { 22 | phase: TEST 23 | } 24 | data_param { 25 | source: "/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class_31/test_lmdb" 26 | batch_size: 100 27 | backend: LMDB 28 | } 29 | } 30 | layer { 31 | name: "conv1" 32 | type: "Convolution" 33 | bottom: "data" 34 | top: "conv1" 35 | param { 36 | lr_mult: 0.1 37 | } 38 | param { 39 | lr_mult: 0.2 40 | } 41 | convolution_param { 42 | num_output: 48 43 | pad: 0 44 | kernel_size: 6 45 | stride: 1 46 | weight_filler { 47 | type: "gaussian" 48 | std: 0.01 49 | } 50 | bias_filler { 51 | type: "constant" 52 | value: 0.7 53 | } 54 | } 55 | } 56 | layer { 57 | name: "relu1" 58 | type: "ReLU" 59 | bottom: "conv1" 60 | top: "conv1" 61 | } 62 | layer { 63 | name: "pool1" 64 | type: "Pooling" 65 | bottom: "conv1" 66 | top: "pool1" 67 | pooling_param { 68 | pool: MAX 69 | kernel_size: 2 70 | stride: 2 71 | } 72 | } 73 | layer { 74 | name: "conv2" 75 | type: "Convolution" 76 | bottom: "pool1" 77 | top: "conv2" 78 | param { 79 | lr_mult: 0.1 80 | } 81 | param { 82 | lr_mult: 0.2 83 | } 84 | convolution_param { 85 | num_output: 48 86 | pad: 0 87 | kernel_size: 4 88 | stride: 1 89 | weight_filler { 90 | type: "gaussian" 91 | std: 0.01 92 | } 93 | bias_filler { 94 | type: "constant" 95 | value: 0.7 96 | } 97 | } 98 | } 99 | layer { 100 | name: "relu2" 101 | type: "ReLU" 102 | bottom: "conv2" 103 | top: "conv2" 104 | } 105 | layer { 106 | name: "pool2" 107 | type: "Pooling" 108 | bottom: "conv2" 109 | top: "pool2" 110 | pooling_param { 111 | pool: MAX 112 | kernel_size: 2 113 | stride: 2 114 | } 115 | } 116 | layer { 117 | name: "ip1" 118 | type: "InnerProduct" 119 | bottom: "pool2" 120 | top: "ip1" 121 | param { 122 | lr_mult: 1 123 | } 124 | param { 125 | lr_mult: 2 126 | } 127 | inner_product_param { 128 | num_output: 30 129 | weight_filler { 130 | type: "gaussian" 131 | std: 0.01 132 | } 133 | bias_filler { 134 | type: "constant" 135 | value: 0.1 136 | } 137 | } 138 | } 139 | layer { 140 | name: "relu5" 141 | type: "ReLU" 142 | bottom: "ip1" 143 | top: "ip1" 144 | } 145 | layer { 146 | name: "ip2" 147 | type: "InnerProduct" 148 | bottom: "ip1" 149 | top: "ip2" 150 | param { 151 | lr_mult: 1 152 | } 153 | param { 154 | lr_mult: 2 155 | } 156 | inner_product_param { 157 | num_output: 3 158 | weight_filler { 159 | type: "gaussian" 160 | std: 0.1 161 | } 162 | bias_filler { 163 | type: "constant" 164 | } 165 | } 166 | } 167 | layer { 168 | name: "accuracy" 169 | type: "Accuracy" 170 | bottom: "ip2" 171 | bottom: "label" 172 | top: "accuracy" 173 | include { 174 | phase: TEST 175 | } 176 | } 177 | layer { 178 | name: "loss" 179 | type: "SoftmaxWithLoss" 180 | bottom: "ip2" 181 | bottom: "label" 182 | top: "loss" 183 | } 184 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/three_class/arch_4/train_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=/home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_4/solver.prototxt \ 7 | 2>&1 | tee /home/sanuj/Projects/nuclei-net/caffe-scripts/nuclei/three_class/arch_4/output_1.txt 8 | #--weights=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel 9 | #--snapshot=~/Projects/nuclei-net/caffe-scripts/nuclei/three_class_nuclei_1_iter_10000.solverstate \ 10 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_10000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_10000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_10000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_10000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_20000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_20000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_20000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_20000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/caffe-scripts/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.solverstate -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/cifar_nuclei_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/nuclei/train_cifar/cifar_nuclei_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.00001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 80000 21 | # snapshot intermediate results 22 | snapshot: 10000 23 | snapshot_prefix: "examples/nuclei/train_cifar/cifar_nuclei_quick1" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/cifar_nuclei_train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "CIFAR10_full" 2 | layer { 3 | name: "cifar" 4 | type: "Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mean_file: "examples/cifar10/mean.binaryproto" 12 | } 13 | data_param { 14 | source: "examples/cifar10/cifar10_train_lmdb" 15 | batch_size: 100 16 | backend: LMDB 17 | } 18 | } 19 | layer { 20 | name: "cifar" 21 | type: "Data" 22 | top: "data" 23 | top: "label" 24 | include { 25 | phase: TEST 26 | } 27 | transform_param { 28 | mean_file: "examples/cifar10/mean.binaryproto" 29 | } 30 | data_param { 31 | source: "examples/cifar10/cifar10_test_lmdb" 32 | batch_size: 100 33 | backend: LMDB 34 | } 35 | } 36 | layer { 37 | name: "conv1" 38 | type: "Convolution" 39 | bottom: "data" 40 | top: "conv1" 41 | param { 42 | lr_mult: 1 43 | } 44 | param { 45 | lr_mult: 2 46 | } 47 | convolution_param { 48 | num_output: 48 49 | pad: 2 50 | kernel_size: 6 51 | stride: 1 52 | weight_filler { 53 | type: "gaussian" 54 | std: 0.0001 55 | } 56 | bias_filler { 57 | type: "constant" 58 | } 59 | } 60 | } 61 | layer { 62 | name: "pool1" 63 | type: "Pooling" 64 | bottom: "conv1" 65 | top: "pool1" 66 | pooling_param { 67 | pool: MAX 68 | pad: 1 69 | kernel_size: 2 70 | stride: 2 71 | } 72 | } 73 | layer { 74 | name: "relu1" 75 | type: "ReLU" 76 | bottom: "pool1" 77 | top: "pool1" 78 | } 79 | layer { 80 | name: "conv2" 81 | type: "Convolution" 82 | bottom: "pool1" 83 | top: "conv2" 84 | param { 85 | lr_mult: 1 86 | } 87 | param { 88 | lr_mult: 2 89 | } 90 | convolution_param { 91 | num_output: 48 92 | pad: 0 93 | kernel_size: 4 94 | stride: 1 95 | weight_filler { 96 | type: "gaussian" 97 | std: 0.01 98 | } 99 | bias_filler { 100 | type: "constant" 101 | } 102 | } 103 | } 104 | layer { 105 | name: "relu2" 106 | type: "ReLU" 107 | bottom: "conv2" 108 | top: "conv2" 109 | } 110 | layer { 111 | name: "pool2" 112 | type: "Pooling" 113 | bottom: "conv2" 114 | top: "pool2" 115 | pooling_param { 116 | pool: MAX 117 | kernel_size: 2 118 | stride: 2 119 | } 120 | } 121 | layer { 122 | name: "ip1" 123 | type: "InnerProduct" 124 | bottom: "pool2" 125 | top: "ip1" 126 | param { 127 | lr_mult: 1 128 | } 129 | param { 130 | lr_mult: 2 131 | } 132 | inner_product_param { 133 | num_output: 48 134 | weight_filler { 135 | type: "gaussian" 136 | std: 0.1 137 | } 138 | bias_filler { 139 | type: "constant" 140 | } 141 | } 142 | } 143 | layer { 144 | name: "relu1" 145 | type: "ReLU" 146 | bottom: "ip1" 147 | top: "ip1" 148 | } 149 | layer { 150 | name: "ip2" 151 | type: "InnerProduct" 152 | bottom: "ip1" 153 | top: "ip2" 154 | param { 155 | lr_mult: 1 156 | } 157 | param { 158 | lr_mult: 2 159 | } 160 | inner_product_param { 161 | num_output: 10 162 | weight_filler { 163 | type: "gaussian" 164 | std: 0.1 165 | } 166 | bias_filler { 167 | type: "constant" 168 | } 169 | } 170 | } 171 | layer { 172 | name: "accuracy" 173 | type: "Accuracy" 174 | bottom: "ip2" 175 | bottom: "label" 176 | top: "accuracy" 177 | include { 178 | phase: TEST 179 | } 180 | } 181 | layer { 182 | name: "loss" 183 | type: "SoftmaxWithLoss" 184 | bottom: "ip2" 185 | bottom: "label" 186 | top: "loss" 187 | } 188 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "Nucleinet" 2 | input: "data" 3 | input_shape { 4 | dim: 1 5 | dim: 3 6 | dim: 33 7 | dim: 33 8 | } 9 | layer { 10 | name: "conv1" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv1" 14 | convolution_param { 15 | num_output: 48 16 | pad: 0 17 | kernel_size: 6 18 | stride: 1 19 | } 20 | } 21 | layer { 22 | name: "pool1" 23 | type: "Pooling" 24 | bottom: "conv1" 25 | top: "pool1" 26 | pooling_param { 27 | pool: MAX 28 | kernel_size: 2 29 | stride: 2 30 | } 31 | } 32 | layer { 33 | name: "relu1" 34 | type: "ReLU" 35 | bottom: "pool1" 36 | top: "pool1" 37 | } 38 | layer { 39 | name: "conv2" 40 | type: "Convolution" 41 | bottom: "pool1" 42 | top: "conv2" 43 | convolution_param { 44 | num_output: 48 45 | pad: 0 46 | kernel_size: 4 47 | stride: 1 48 | } 49 | } 50 | layer { 51 | name: "relu2" 52 | type: "ReLU" 53 | bottom: "conv2" 54 | top: "conv2" 55 | } 56 | layer { 57 | name: "pool2" 58 | type: "Pooling" 59 | bottom: "conv2" 60 | top: "pool2" 61 | pooling_param { 62 | pool: MAX 63 | kernel_size: 2 64 | stride: 2 65 | } 66 | } 67 | layer { 68 | name: "ip_1" 69 | type: "InnerProduct" 70 | bottom: "pool2" 71 | top: "ip1" 72 | inner_product_param { 73 | num_output: 48 74 | } 75 | } 76 | layer { 77 | name: "relu1" 78 | type: "ReLU" 79 | bottom: "ip1" 80 | top: "ip1" 81 | } 82 | layer { 83 | name: "ip_2" 84 | type: "InnerProduct" 85 | bottom: "ip1" 86 | top: "ip2" 87 | inner_product_param { 88 | num_output: 48 89 | } 90 | } 91 | layer { 92 | name: "relu2" 93 | type: "ReLU" 94 | bottom: "ip2" 95 | top: "ip2" 96 | } 97 | layer { 98 | name: "ip_3" 99 | type: "InnerProduct" 100 | bottom: "ip2" 101 | top: "ip3" 102 | inner_product_param { 103 | num_output: 2 104 | } 105 | } 106 | layer { 107 | name: "prob" 108 | type: "Softmax" 109 | bottom: "ip3" 110 | top: "prob" 111 | } 112 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/deploy1.prototxt: -------------------------------------------------------------------------------- 1 | name: "Nucleinet" 2 | input: "data" 3 | input_shape { 4 | dim: 1 5 | dim: 3 6 | dim: 33 7 | dim: 33 8 | } 9 | layer { 10 | name: "conv1" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv1" 14 | convolution_param { 15 | num_output: 48 16 | pad: 0 17 | kernel_size: 6 18 | stride: 1 19 | } 20 | } 21 | layer { 22 | name: "pool1" 23 | type: "Pooling" 24 | bottom: "conv1" 25 | top: "pool1" 26 | pooling_param { 27 | pool: MAX 28 | kernel_size: 2 29 | stride: 2 30 | } 31 | } 32 | layer { 33 | name: "relu1" 34 | type: "ReLU" 35 | bottom: "pool1" 36 | top: "pool1" 37 | } 38 | layer { 39 | name: "conv2" 40 | type: "Convolution" 41 | bottom: "pool1" 42 | top: "conv2" 43 | convolution_param { 44 | num_output: 48 45 | pad: 0 46 | kernel_size: 4 47 | stride: 1 48 | } 49 | } 50 | layer { 51 | name: "relu2" 52 | type: "ReLU" 53 | bottom: "conv2" 54 | top: "conv2" 55 | } 56 | layer { 57 | name: "pool2" 58 | type: "Pooling" 59 | bottom: "conv2" 60 | top: "pool2" 61 | pooling_param { 62 | pool: MAX 63 | kernel_size: 2 64 | stride: 2 65 | } 66 | } 67 | layer { 68 | name: "ip_1" 69 | type: "InnerProduct" 70 | bottom: "pool2" 71 | top: "ip1" 72 | inner_product_param { 73 | num_output: 48 74 | } 75 | } 76 | layer { 77 | name: "relu1" 78 | type: "ReLU" 79 | bottom: "ip1" 80 | top: "ip1" 81 | } 82 | layer { 83 | name: "ip_2" 84 | type: "InnerProduct" 85 | bottom: "ip1" 86 | top: "ip2" 87 | inner_product_param { 88 | num_output: 2 89 | } 90 | } 91 | layer { 92 | name: "prob" 93 | type: "Softmax" 94 | bottom: "ip2" 95 | top: "prob" 96 | } 97 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/train_quick.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/nuclei/train_cifar/cifar_nuclei_solver.prototxt \ 7 | --snapshot=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_60000.solverstate 2>&1 | tee examples/nuclei/train_run_3_output.txt 8 | 9 | # reduce learning rate by factor of 10 after 8 epochs 10 | # $TOOLS/caffe train \ 11 | # --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ 12 | # --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate.h5 13 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/train_use_cifar.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/nuclei/train_cifar/use_cifar_solver.prototxt \ 7 | --weights=examples/nuclei/train_cifar/cifar_nuclei_quick1_iter_40000.caffemodel 2>&1 | tee examples/nuclei/train_use_cifar_7_output.txt 8 | #--snapshot=examples/nuclei/train_cifar/use_cifar_2_iter_10000.solverstate 9 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/use_cifar_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 20 epochs (40000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/nuclei/train_cifar/use_cifar_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 200 9 | # Carry out testing every 1000 training iterations. 10 | test_interval: 1000 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.0005 15 | # The learning rate policy 16 | lr_policy: "step" 17 | stepsize: 20000 18 | gamma: 0.1 19 | # Display every 100 iterations 20 | display: 100 21 | # The maximum number of iterations 22 | max_iter: 40000 23 | # snapshot intermediate results 24 | snapshot: 10000 25 | snapshot_prefix: "examples/nuclei/train_cifar/use_cifar_7" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar/use_cifar_train_test.prototxt: -------------------------------------------------------------------------------- 1 | name: "NULCEI_quick" 2 | layer { 3 | name: "nuclei" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | image_data_param { 11 | source: "/home/sanuj/temp_63_LLM_YR4_33/train.txt" 12 | batch_size: 200 13 | } 14 | } 15 | layer { 16 | name: "nuclei" 17 | type: "ImageData" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | image_data_param { 24 | source: "/home/sanuj/temp_63_LLM_YR4_33/test.txt" 25 | batch_size: 500 26 | } 27 | } 28 | layer { 29 | name: "conv1" 30 | type: "Convolution" 31 | bottom: "data" 32 | top: "conv1" 33 | param { 34 | lr_mult: 0.1 35 | } 36 | param { 37 | lr_mult: 0.2 38 | } 39 | convolution_param { 40 | num_output: 48 41 | pad: 0 42 | kernel_size: 4 43 | stride: 1 44 | weight_filler { 45 | type: "gaussian" 46 | std: 0.0001 47 | } 48 | bias_filler { 49 | type: "constant" 50 | } 51 | } 52 | } 53 | layer { 54 | name: "pool1" 55 | type: "Pooling" 56 | bottom: "conv1" 57 | top: "pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 2 61 | stride: 2 62 | } 63 | } 64 | layer { 65 | name: "relu1" 66 | type: "ReLU" 67 | bottom: "pool1" 68 | top: "pool1" 69 | } 70 | layer { 71 | name: "conv2" 72 | type: "Convolution" 73 | bottom: "pool1" 74 | top: "conv2" 75 | param { 76 | lr_mult: 0.1 77 | } 78 | param { 79 | lr_mult: 0.2 80 | } 81 | convolution_param { 82 | num_output: 48 83 | pad: 0 84 | kernel_size: 6 85 | stride: 1 86 | weight_filler { 87 | type: "gaussian" 88 | std: 0.01 89 | } 90 | bias_filler { 91 | type: "constant" 92 | } 93 | } 94 | } 95 | layer { 96 | name: "relu2" 97 | type: "ReLU" 98 | bottom: "conv2" 99 | top: "conv2" 100 | } 101 | layer { 102 | name: "pool2" 103 | type: "Pooling" 104 | bottom: "conv2" 105 | top: "pool2" 106 | pooling_param { 107 | pool: MAX 108 | kernel_size: 2 109 | stride: 2 110 | } 111 | } 112 | layer { 113 | name: "ip_1" 114 | type: "InnerProduct" 115 | bottom: "pool2" 116 | top: "ip1" 117 | param { 118 | lr_mult: 1 119 | } 120 | param { 121 | lr_mult: 2 122 | } 123 | inner_product_param { 124 | num_output: 48 125 | weight_filler { 126 | type: "gaussian" 127 | std: 0.1 128 | } 129 | bias_filler { 130 | type: "constant" 131 | } 132 | } 133 | } 134 | layer { 135 | name: "relu1" 136 | type: "ReLU" 137 | bottom: "ip1" 138 | top: "ip1" 139 | } 140 | layer { 141 | name: "ip_2" 142 | type: "InnerProduct" 143 | bottom: "ip1" 144 | top: "ip2" 145 | param { 146 | lr_mult: 1 147 | } 148 | param { 149 | lr_mult: 2 150 | } 151 | inner_product_param { 152 | num_output: 2 153 | weight_filler { 154 | type: "gaussian" 155 | std: 0.1 156 | } 157 | bias_filler { 158 | type: "constant" 159 | } 160 | } 161 | } 162 | layer { 163 | name: "accuracy" 164 | type: "Accuracy" 165 | bottom: "ip2" 166 | bottom: "label" 167 | top: "accuracy" 168 | include { 169 | phase: TEST 170 | } 171 | } 172 | layer { 173 | name: "loss" 174 | type: "SoftmaxWithLoss" 175 | bottom: "ip2" 176 | bottom: "label" 177 | top: "loss" 178 | } 179 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_cifar_on_nuclei.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/nuclei/cifar_on_nuclei_solver.prototxt 7 | #2>&1 | tee examples/nuclei_segment_1/nuclei_1_run_output.txt 8 | 9 | # reduce learning rate by factor of 10 after 8 epochs 10 | # $TOOLS/caffe train \ 11 | # --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ 12 | # --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate.h5 13 | -------------------------------------------------------------------------------- /caffe-scripts/nuclei/train_quick.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/nuclei/nuclei_quick_solver.prototxt \ 7 | --snapshot=examples/nuclei/nuclei_quick_relative_iter_60000.solverstate 2>&1 | tee nuclei_8_run_output.txt 8 | 9 | # reduce learning rate by factor of 10 after 8 epochs 10 | # $TOOLS/caffe train \ 11 | # --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ 12 | # --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate.h5 13 | -------------------------------------------------------------------------------- /caffe-scripts/predict.py: -------------------------------------------------------------------------------- 1 | import sys 2 | caffe_root = '/home/sanuj/Projects/BTP/caffe' 3 | sys.path.insert(0, caffe_root + '/python') 4 | import caffe 5 | import matplotlib 6 | import matplotlib.image as mpimg 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | import scipy 10 | 11 | w = 33 12 | p = (w-1)/2 13 | 14 | caffe.set_mode_gpu() 15 | 16 | # net = caffe.Net(caffe_root+'/examples/nuclei/train_cifar/deploy.prototxt', 17 | # caffe_root+'/examples/nuclei/train_cifar/92_accu_use_cifar_model/use_cifar_2_iter_20000.caffemodel', 18 | # caffe.TEST) 19 | 20 | # net = caffe.Net(caffe_root+'/examples/nuclei/train_cifar/deploy1.prototxt', 21 | # caffe_root+'/examples/nuclei/train_cifar/use_cifar_6_iter_40000.caffemodel', 22 | # caffe.TEST) 23 | 24 | net = caffe.Net(caffe_root+'/examples/nuclei/multi_class_nuclei/use_multi_class/deploy.prototxt', 25 | caffe_root+'/examples/nuclei/multi_class_nuclei/use_multi_class/use_multi_class_nuclei_big_1_iter_19186.caffemodel', 26 | caffe.TEST) 27 | 28 | # net = caffe.Net(caffe_root+'/examples/nuclei/deploy.prototxt', 29 | # caffe_root+'/examples/nuclei/nuclei_quick_relative_iter_100000.caffemodel', 30 | # caffe.TEST) 31 | # 63_LLM_YR4_cropped.jpg 32 | # 81_LLM_YR4.jpg 33 | im = mpimg.imread('/home/sanuj/Projects/BTP/data/63_LLM_YR4_cropped.jpg') 34 | height, width, channel = im.shape 35 | im = np.pad(im, ((p,p),(p,p),(0,0)), 'constant', constant_values=255) 36 | 37 | label_im = np.zeros((height, width, channel)) # label image 38 | prob_im = np.zeros((height, width, channel)) # probability image 39 | aux_im = np.zeros((height, width)) # auxiliary image 40 | # num_zeros = 0 41 | b = 0 42 | prob_x, prob_y = 0, 0 43 | for i in range(p, p + height): 44 | for j in range(p, p + width): 45 | if not (im[i,j] >= [220, 220, 220]).all(): 46 | aux_im[i-p, j-p] = 1 47 | for c in xrange(3): 48 | net.blobs['data'].data[b, c, :, :] = im[i-p:i+p+1, j-p:j+p+1, c] 49 | b += 1 50 | if b == 500: 51 | out = net.forward() 52 | print i, j 53 | 54 | for k in out['prob']: 55 | while not aux_im[prob_x, prob_y]: 56 | prob_y += 1 57 | if prob_y == width: 58 | prob_y = 0 59 | prob_x += 1 60 | 61 | if aux_im[prob_x, prob_y]: 62 | prob_im[prob_x, prob_y, :] = k[1] 63 | prob_y += 1 64 | if prob_y == width: 65 | prob_y = 0 66 | prob_x += 1 67 | 68 | b = 0 69 | # if out['prob'][0][1] > 0.5: 70 | # label_im[i-p,j-p,0]=label_im[i-p,j-p,1]=label_im[i-p,j-p,2] = 255* 71 | 72 | #plots probability mask 73 | prob_im = 255 * prob_im 74 | prob_im = prob_im.astype(int) 75 | scipy.misc.imsave('prob_im.jpg', prob_im) 76 | -------------------------------------------------------------------------------- /data/testing-data/20x/PrognosisTMABlock1_A_3_1_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/PrognosisTMABlock1_A_3_1_H&E.jpg -------------------------------------------------------------------------------- /data/testing-data/20x/tmp/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp/1.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp/2.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp/3.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_20x/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_20x/1.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_20x/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_20x/2.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_20x/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_20x/3.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_40x/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_40x/1.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_40x/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_40x/2.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_40x/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_40x/3.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_old/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_old/1.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_old/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_old/2.png -------------------------------------------------------------------------------- /data/testing-data/20x/tmp_old/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/20x/tmp_old/3.png -------------------------------------------------------------------------------- /data/testing-data/40x/63_LLM_YR4_cropped.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/63_LLM_YR4_cropped.jpg -------------------------------------------------------------------------------- /data/testing-data/40x/81_LLM_YR4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/81_LLM_YR4.jpg -------------------------------------------------------------------------------- /data/testing-data/40x/84_LLM_YR4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/84_LLM_YR4.jpg -------------------------------------------------------------------------------- /data/testing-data/40x/84_LLM_YR4/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/84_LLM_YR4/1.png -------------------------------------------------------------------------------- /data/testing-data/40x/84_LLM_YR4/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/84_LLM_YR4/2.png -------------------------------------------------------------------------------- /data/testing-data/40x/84_LLM_YR4/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/84_LLM_YR4/3.png -------------------------------------------------------------------------------- /data/testing-data/40x/84_LLM_YR4/84_LLM_YR4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/84_LLM_YR4/84_LLM_YR4.jpg -------------------------------------------------------------------------------- /data/testing-data/40x/84_LLM_YR4_002.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/84_LLM_YR4_002.tif -------------------------------------------------------------------------------- /data/testing-data/40x/89_LLM_YR4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/89_LLM_YR4.jpg -------------------------------------------------------------------------------- /data/testing-data/40x/92_LLM_YR4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/92_LLM_YR4.jpg -------------------------------------------------------------------------------- /data/testing-data/40x/tmp/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/tmp/1.png -------------------------------------------------------------------------------- /data/testing-data/40x/tmp/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/tmp/2.png -------------------------------------------------------------------------------- /data/testing-data/40x/tmp/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/tmp/3.png -------------------------------------------------------------------------------- /data/testing-data/40x/tmp/84_LLM_YR4_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/testing-data/40x/tmp/84_LLM_YR4_002.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/PrognosisTMABlock1_A_3_1_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/PrognosisTMABlock1_A_3_1_H&E.jpg -------------------------------------------------------------------------------- /data/training-data/20x/2px/PrognosisTMABlock1_E_4_5_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/PrognosisTMABlock1_E_4_5_H&E.jpg -------------------------------------------------------------------------------- /data/training-data/20x/2px/PrognosisTMABlock3_A_2_1_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/PrognosisTMABlock3_A_2_1_H&E.jpg -------------------------------------------------------------------------------- /data/training-data/20x/2px/PrognosisTMABlock3_A_2_1_H&E_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/PrognosisTMABlock3_A_2_1_H&E_1.jpg -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_1_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_1_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_1_4_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_1_4_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_3_3_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_3_3_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_4_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_A_4_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_B_2_2_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_B_2_2_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_C_3_4_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_C_3_4_H&E.jpg -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_E_4_5_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock1_E_4_5_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock3_A_2_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock3_A_2_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/PrognosisTMABlock3_A_2_1_H&E_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/PrognosisTMABlock3_A_2_1_H&E_1.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_1_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_1_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_1_4_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_1_4_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_3_3_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_3_3_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_4_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_A_4_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_B_2_2_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_B_2_2_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_C_3_4_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_C_3_4_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_E_4_5_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock1_E_4_5_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock3_A_2_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock3_A_2_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock3_A_2_1_H&E_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/color-normalized/TM_PrognosisTMABlock3_A_2_1_H&E_1.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/tm_PrognosisTMABlock1_A_3_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/tm_PrognosisTMABlock1_A_3_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/tm_PrognosisTMABlock1_E_4_5_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/tm_PrognosisTMABlock1_E_4_5_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/tm_PrognosisTMABlock3_A_2_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/tm_PrognosisTMABlock3_A_2_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/2px/tm_PrognosisTMABlock3_A_2_1_H&E_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/2px/tm_PrognosisTMABlock3_A_2_1_H&E_1.png -------------------------------------------------------------------------------- /data/training-data/20x/4px/PrognosisTMABlock1_A_3_1_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/4px/PrognosisTMABlock1_A_3_1_H&E.jpg -------------------------------------------------------------------------------- /data/training-data/20x/4px/PrognosisTMABlock1_E_4_5_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/4px/PrognosisTMABlock1_E_4_5_H&E.jpg -------------------------------------------------------------------------------- /data/training-data/20x/4px/PrognosisTMABlock3_A_2_1_H&E.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/4px/PrognosisTMABlock3_A_2_1_H&E.jpg -------------------------------------------------------------------------------- /data/training-data/20x/4px/maskWithBoundaryPrognosisTMABlock1_A_3_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/4px/maskWithBoundaryPrognosisTMABlock1_A_3_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/4px/tm_PrognosisTMABlock1_E_4_5_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/4px/tm_PrognosisTMABlock1_E_4_5_H&E.png -------------------------------------------------------------------------------- /data/training-data/20x/4px/tm_PrognosisTMABlock3_A_2_1_H&E.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/20x/4px/tm_PrognosisTMABlock3_A_2_1_H&E.png -------------------------------------------------------------------------------- /data/training-data/40x/63_LLM_YR4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/40x/63_LLM_YR4.jpg -------------------------------------------------------------------------------- /data/training-data/40x/78_RLM_YR4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/40x/78_RLM_YR4.jpg -------------------------------------------------------------------------------- /data/training-data/40x/bm_63_LLM_YR4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/40x/bm_63_LLM_YR4.png -------------------------------------------------------------------------------- /data/training-data/40x/bm_78_RLM_YR4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/40x/bm_78_RLM_YR4.png -------------------------------------------------------------------------------- /data/training-data/40x/tm_63_LLM_YR4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/40x/tm_63_LLM_YR4.png -------------------------------------------------------------------------------- /data/training-data/40x/tm_78_RLM_YR4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neerajkumarvaid/Nuclei_Segmentation/e575a0107433758ab4fd49734d93c98a2b7db8e0/data/training-data/40x/tm_78_RLM_YR4.png -------------------------------------------------------------------------------- /data/training-data/caffe_data_prep.py: -------------------------------------------------------------------------------- 1 | # Prepares data by saving patches of size 'w' from the whole slide image. Also creates a 2 | # meta text file for Caffe which has the patch name and it's corresponding label. 3 | import numpy as np 4 | import matplotlib.image as mpimg 5 | import scipy.misc 6 | import os 7 | 8 | root = '/home/sanuj/Projects/' 9 | 10 | def saveIm(im, label, h, i, j, k): 11 | path = root + 'nuclei-net-data/20x/20-patients/PrognosisTMABlock5_1_5_1_H&E_001/' + str(k) + '/' 12 | # path = root + 'nuclei-net/data/training-data/63_LLM_YR4_3_class_31/' + str(k) + '/' 13 | name = str(h)+'_'+str(i)+'_'+str(j)+'_'+str(label)+'.jpg' 14 | if not os.path.exists(path): 15 | os.makedirs(path) 16 | text_file = open(path + 'meta.txt', "a") 17 | scipy.misc.imsave(path + name, im) 18 | text_file.write(path + name + ' ' + str(label) + '\n') 19 | print 'saved ' + name + ' in ' + path 20 | text_file.close() 21 | 22 | text_file = open(path + '../meta.txt', "a") 23 | scipy.misc.imsave(path + name, im) 24 | text_file.write(path + name + ' ' + str(label) + '\n') 25 | print 'saved ' + name + ' in ' + path 26 | text_file.close() 27 | 28 | file_name = root + 'nuclei-net-data/20x/20-patients/norm_PrognosisTMABlock5_1_5_1_H&E_001.tif' 29 | mask_name = root + 'nuclei-net-data/20x/20-patients/PrognosisTMABlock5_1_5_1_H&E_001.png' 30 | 31 | w = 51 #window size 32 | p = (w-1)/2 #padding 33 | 34 | im = mpimg.imread(file_name) 35 | # mask = mpimg.imread(mask_name).astype(int) # 2 classes 36 | mask = (mpimg.imread(mask_name)*2).astype(int) 37 | # mask = mask/np.amax(mask) 38 | height, width, channel = im.shape 39 | 40 | # Pad the image 41 | # image = np.pad(im, ((p,p),(p,p),(0,0)), 'constant', constant_values=255) 42 | # mask = np.pad(mask, p, 'constant') # default constant_values=0 43 | image = im 44 | 45 | h = 0; 46 | k = 0; 47 | # num_l = [0, 0] # 2 classes 48 | num_l = [0, 0, 0] # 3 classes 49 | for i in range(p, height-p, 2): 50 | for j in range(p, width-p, 2): 51 | if h >= 25000: 52 | k = k+1 53 | h = 0 54 | if not (image[i,j] >= [220, 220, 220]).all(): 55 | temp_x = image[i-p:i+p+1, j-p:j+p+1, :] 56 | saveIm(temp_x, mask[i, j], h, i, j, k) 57 | h = h+1 58 | num_l[mask[i, j]] = num_l[mask[i, j]]+1 59 | if mask[i,j] == 1 or mask[i,j] == 2: 60 | # if mask[i,j] == 1: 61 | # saveIm(np.fliplr(temp_x), mask[i, j], h, i, j, k) 62 | # h = h+1 63 | # num_l[mask[i, j]] = num_l[mask[i, j]]+1 64 | 65 | temp_x = np.rot90(temp_x) 66 | saveIm(temp_x, mask[i, j], h, i, j, k) 67 | h = h+1 68 | num_l[mask[i, j]] = num_l[mask[i, j]]+1 69 | 70 | # saveIm(np.fliplr(temp_x), mask[i, j], h, i, j, k) 71 | # h = h+1 72 | # num_l[mask[i, j]] = num_l[mask[i, j]]+1 73 | 74 | # temp_x = np.rot90(temp_x) 75 | # saveIm(temp_x, mask[i, j], h, i, j, k) 76 | # h = h+1 77 | # num_l[mask[i, j]] = num_l[mask[i, j]]+1 78 | 79 | # saveIm(np.fliplr(temp_x), mask[i, j], h, i, j, k) 80 | # h = h+1 81 | # num_l[mask[i, j]] = num_l[mask[i, j]]+1 82 | 83 | temp_x = np.rot90(temp_x) 84 | saveIm(temp_x, mask[i, j], h, i, j, k) 85 | h = h+1 86 | num_l[mask[i, j]] = num_l[mask[i, j]]+1 87 | 88 | # saveIm(np.fliplr(temp_x), mask[i, j], h, i, j, k) 89 | # h = h+1 90 | # num_l[mask[i, j]] = num_l[mask[i, j]]+1 91 | print num_l 92 | -------------------------------------------------------------------------------- /data/training-data/mat_to_torch.lua: -------------------------------------------------------------------------------- 1 | local matio = require 'matio' 2 | 3 | num = 120000 4 | c = 3 5 | w = 51 6 | 7 | ten = {} 8 | ten['data'] = torch.Tensor(num, c, w, w):byte() 9 | ten['label'] = torch.Tensor(num):byte() 10 | 11 | t = matio.load('PrognosisTMABlock1_E_4_5_H&E_51_2px_51000.mat') 12 | t['data'] = torch.reshape(t['data'], 51000, c, w, w) 13 | t['label'] = torch.reshape(t['label'], 51000) 14 | 15 | ten['data'][{ {1, 51000}, {}, {}, {} }] = t['data'] 16 | ten['label'][{ {1, 51000} }] = t['label'] 17 | 18 | t = matio.load('PrognosisTMABlock3_A_2_1_H&E_1_51_2px_51000.mat') 19 | t['data'] = torch.reshape(t['data'], 51000, c, w, w) 20 | t['label'] = torch.reshape(t['label'], 51000) 21 | 22 | ten['data'][{ {51001, 102000}, {}, {}, {} }] = t['data'] 23 | ten['label'][{ {51001, 102000} }] = t['label'] 24 | 25 | t = matio.load('PrognosisTMABlock3_A_2_1_H&E_51_2px_18000.mat') 26 | t['data'] = torch.reshape(t['data'], 18000, c, w, w) 27 | t['label'] = torch.reshape(t['label'], 18000) 28 | 29 | ten['data'][{ {102001, 120000}, {}, {}, {} }] = t['data'] 30 | ten['label'][{ {102001, 120000} }] = t['label'] 31 | 32 | torch.save('/home/sanuj/Projects/20x_2px_train_120000.t7', ten) -------------------------------------------------------------------------------- /data/training-data/theano_data_prep.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.image as mpimg 3 | import scipy.misc 4 | from sklearn.utils import shuffle 5 | import scipy.io as sio 6 | 7 | root = '/home/sanuj/Projects/' 8 | 9 | file_name = root + 'nuclei-net/data/training-data/20x/2px/PrognosisTMABlock3_A_2_1_H&E.jpg' 10 | mask_name = root + 'nuclei-net/data/training-data/20x/2px/tm_PrognosisTMABlock3_A_2_1_H&E.png' 11 | # file_name = root + 'nuclei-net/data/training-data/63_LLM_YR4.jpg' 12 | # mask_name = root + 'nuclei-net/data/training-data/tm_63_LLM_YR4.png' 13 | w = 51 14 | p = (w-1)/2 15 | num = 18000 16 | stride = 8 17 | 18 | im = mpimg.imread(file_name).astype('uint8') 19 | mask = (mpimg.imread(mask_name)*2).astype('uint8') 20 | # mask = mask/np.amax(mask) 21 | height, width, channel = im.shape 22 | image = [] 23 | for i in range(channel): 24 | image.append(im[:,:,i]) 25 | image = np.array(image) 26 | # image = np.pad(image, ((0,0),(p,p),(p,p)), 'constant', constant_values=255) 27 | # mask = np.pad(mask, p, 'constant') # default constant_values=0 28 | 29 | x = [] 30 | 31 | for i in range(0, np.amax(mask)+1): 32 | x.append([]) 33 | 34 | h = 0; 35 | done = False 36 | # for i in range(p, p+height): 37 | for i in range(p, height-p): 38 | # for i in range(325, height-p): 39 | if done: 40 | break 41 | # for j in range(p, p+width): 42 | for j in range(p, width-p, stride): 43 | # for j in range(55, width-p): 44 | total_len = 0 45 | for k in x: 46 | total_len += len(k) 47 | if total_len == num: 48 | done = True 49 | break 50 | if not (image[0,i,j] >= 220 and image[1,i,j] >= 220 and image[2,i,j] >= 220): 51 | temp_x = image[:, i-p:i+p+1, j-p:j+p+1] 52 | if len(x[mask[i,j]]) < num/3: 53 | print 'Label: ' + str(mask[i,j]) + ' len: ' + str(total_len) + ' i: ' + str(i) + ' j: ' + str(j) 54 | x[mask[i,j]].append(temp_x.flatten()) 55 | if mask[i,j]: 56 | for k in range(0, 3): 57 | if len(x[mask[i,j]]) < num/3: 58 | temp_x = np.rot90(temp_x) 59 | print 'Label: ' + str(mask[i,j]) + ' len: ' + str(total_len) + ' i: ' + str(i) + ' j: ' + str(j) 60 | x[mask[i,j]].append(temp_x.flatten()) 61 | else: 62 | break 63 | 64 | data = np.concatenate(x) 65 | label = [] 66 | for i in range(num): 67 | label.append(int(i/(num/3))) 68 | 69 | label = np.array(label).astype('uint8') 70 | 71 | data, label = shuffle(data, label, random_state=0) 72 | 73 | dict = {'data': data, 'label': label} 74 | 75 | sio.savemat(root + 'PrognosisTMABlock3_A_2_1_H&E_' + str(w) + '_2px_18000.mat', dict) -------------------------------------------------------------------------------- /data/training-data/torch_data_prep.lua: -------------------------------------------------------------------------------- 1 | require 'io' 2 | require 'torch' 3 | require 'image' 4 | 5 | file_name = '/home/sanuj/Projects/nuclei-net-data/fine-tune/2/validate.txt' 6 | -- file_name = '/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class_31/train_small.txt' 7 | num_images = 10000*3 8 | num_channels = 3 9 | width = 51 10 | height = 51 11 | 12 | file = io.open(file_name, 'rb') 13 | data = torch.Tensor(num_images, num_channels, width, height):byte() 14 | label = torch.Tensor(num_images):byte() 15 | counter = 1 16 | 17 | for line in file:lines() do 18 | print(counter) 19 | image_name, image_label = line:split(' ')[1], line:split(' ')[2] 20 | data[counter] = image.load(image_name, num_channels, 'byte') 21 | label[counter] = image_label 22 | counter = counter + 1 23 | end 24 | 25 | torch.save('/home/sanuj/Projects/nuclei-net-data/fine-tune/2/validate.t7', {data = data, label = label}) 26 | -- torch.save('/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class_31/train_small.t7', {data = data, label = label}) 27 | -------------------------------------------------------------------------------- /predict_full_mask.lua: -------------------------------------------------------------------------------- 1 | require 'dp' 2 | require 'cutorch' 3 | require 'optim' 4 | require 'image' 5 | require 'cunn' 6 | require 'os' 7 | 8 | -- matio = require 'matio' 9 | 10 | ws = 51 11 | batch_size = 1500 --1600 12 | classes = 3 13 | 14 | -- image_dir = '/home/sanuj/Downloads/anmol_maps' 15 | -- image_name = 'K10_13332_8866.jpg' 16 | 17 | image_dir = './data/testing-data/40x' 18 | image_name = '63_LLM_YR4_cropped.jpg' 19 | 20 | input_image = image.load(image_dir .. '/' .. image_name, 3, 'byte') 21 | channels = (#input_image)[1]; w = (#input_image)[2]; h = (#input_image)[3] 22 | 23 | -- mirror pad 24 | -- p = ws-1 25 | -- im = torch.ByteTensor(channels, w+p, h+p):zero() 26 | -- im[{ {}, {p/2+1, w+p/2}, {p/2+1, h+p/2} }] = input_image 27 | -- h = h+p 28 | -- w = w+p 29 | ------------------------------------------------------------ 30 | p = ws-1 31 | module = nn.SpatialReflectionPadding(p/2, p/2, p/2, p/2) 32 | module:cuda() 33 | im = module:forward(input_image:cuda()) 34 | im = im:byte() 35 | h = h+p 36 | w = w+p 37 | ------------------------------------------------------------ 38 | 39 | -- map_path = '/home/sanuj/Downloads/anmol_maps/K10N_13332_8866_binary.jpg' 40 | -- map = image.load(map_path) 41 | -- map = map:byte() 42 | 43 | -- for x = 1, (#map)[2] do 44 | -- for y = 1, (#map)[3] do 45 | -- print('x: ' .. x .. 'y: ' .. y) 46 | -- if map[1][x][y] == 0 then 47 | -- im[1][x][y] = 1 48 | -- im[2][x][y] = 1 49 | -- im[3][x][y] = 1 50 | -- end 51 | -- end 52 | -- end 53 | 54 | -- print('Done superimposing.') 55 | 56 | -- file_num = 1 57 | -- p = (ws-1)/2 58 | os.execute("mkdir " .. image_dir .. '/' .. 'results') 59 | -- xp = torch.load('/home/sanuj/save/amitpc-HP-Z620-Workstation:1455425081:2.dat') 60 | -- xp = torch.load('/home/sanuj/Projects/models/amitpc-HP-Z620-Workstation:1457692046:1.dat') -- final 20x 61 | -- /home/sanuj/Projects/models/train_7259.dat 62 | --xp = torch.load('/home/sanuj/Projects/models/train_701_val_734.dat') -- latest 20x 63 | -- xp = torch.load('/home/sanuj/save/LYoga:1462988633:1.dat') -- new 20x 64 | xp = torch.load('amitpc-HP-Z620-Workstation_1454011145_1.dat') -- final 40x 65 | -- xp = torch.load('/home/sanuj/save/LYoga:1454304060:1.dat') 66 | model = xp:model() 67 | 68 | -- print((h-ws+1)*(w-ws+1)*channels*ws*ws) 69 | -- cropped = torch.Tensor((h-ws+1)*(w-ws+1), channels, ws, ws):byte() 70 | -- labels = torch.Tensor((h-ws+1)*(w-ws+1), classes) 71 | 72 | cropped = torch.Tensor(batch_size, channels, ws, ws):byte() 73 | labels = torch.Tensor((h-ws+1)*(w-ws+1), classes) 74 | 75 | -- batch_done = true 76 | counter = 0 77 | last_counter = 1 78 | 79 | for x = 0, h-ws do 80 | for y = 0, w-ws do 81 | print('Counter: ' .. counter .. ' cropped: ' .. (counter % batch_size)+1) 82 | cropped[{ {(counter % batch_size)+1}, {}, {}, {} }] = image.crop(im, x, y, x+ws, y+ws) 83 | if (counter+1) % batch_size == 0 then 84 | print('PREDICTING!!!') 85 | temp = model:forward(cropped[{ {1, batch_size}, {}, {}, {} }]):exp() 86 | labels[{ {(counter+1)-batch_size+1, counter+1}, {} }] = temp:double() 87 | last_counter = counter 88 | end 89 | counter = counter + 1 90 | end 91 | end 92 | 93 | if last_counter ~= (counter - 1) then 94 | temp = model:forward(cropped[{ {1, counter % batch_size}, {}, {}, {} }]):exp() 95 | labels[{ {last_counter+2, counter}, {} }] = temp:double() 96 | end 97 | 98 | for i = 1, channels do 99 | image.save(image_dir .. '/results/' .. i .. '.png', image.vflip(torch.reshape(labels[{ {}, {i} }], h-ws+1, w-ws+1))) 100 | end 101 | -------------------------------------------------------------------------------- /torch-scripts/cnn_train_test_valid.lua: -------------------------------------------------------------------------------- 1 | require 'dp' 2 | require 'optim' 3 | require 'cutorch' 4 | require 'cunn' 5 | 6 | --[[command line arguments]]-- 7 | cmd = torch.CmdLine() 8 | cmd:text() 9 | cmd:text('Image Classification using Convolution Neural Network Training/Optimization') 10 | cmd:text('Example:') 11 | cmd:text('$> th convolutionneuralnetwork.lua --batchSize 128 --momentum 0.5') 12 | cmd:text('Options:') 13 | cmd:option('--learningRate', 0.01, 'learning rate at t=0') 14 | cmd:option('--lrDecay', 'none', 'type of learning rate decay : adaptive | linear | schedule | none') 15 | cmd:option('--minLR', 0.00001, 'minimum learning rate') 16 | cmd:option('--saturateEpoch', 3000, 'epoch at which linear decayed LR will reach minLR') 17 | cmd:option('--schedule', '{}', 'learning rate schedule') 18 | cmd:option('--maxWait', 4, 'maximum number of epochs to wait for a new minima to be found. After that, the learning rate is decayed by decayFactor.') 19 | cmd:option('--decayFactor', 0.00004, 'factor by which learning rate is decayed for adaptive decay.') 20 | cmd:option('--maxOutNorm', 1, 'max norm each layers output neuron weights') 21 | cmd:option('--momentum', 0.6, 'momentum') 22 | cmd:option('--channelSize', '{25,50,80}', 'Number of output channels for each convolution layer.') 23 | cmd:option('--kernelSize', '{4,5,6}', 'kernel size of each convolution layer. Height = Width') 24 | cmd:option('--kernelStride', '{1,1,1}', 'kernel stride of each convolution layer. Height = Width') 25 | cmd:option('--poolSize', '{2,2,2}', 'size of the max pooling of each convolution layer. Height = Width') 26 | cmd:option('--poolStride', '{2,2,2}', 'stride of the max pooling of each convolution layer. Height = Width') 27 | cmd:option('--padding', false, 'add math.floor(kernelSize/2) padding to the input of each convolution') 28 | cmd:option('--batchSize', 256, 'number of examples per batch') 29 | cmd:option('--cuda', true, 'use CUDA') 30 | cmd:option('--useDevice', 1, 'sets the device (GPU) to use') 31 | cmd:option('--maxEpoch', 2000, 'maximum number of epochs to run') 32 | cmd:option('--maxTries', 5000, 'maximum number of epochs to try to find a better local minima for early-stopping') 33 | cmd:option('--dataset', 'None', 'Mnist | NotMnist | Cifar10 | Cifar100 | Svhn | ImageSource') 34 | cmd:option('--trainPath', '/home/sanuj/Projects/nuclei-net-data/20x/20-patients/dp-imagesource/train', 'Where to look for training images') 35 | cmd:option('--validPath', '/home/sanuj/Projects/nuclei-net-data/20x/20-patients/dp-imagesource/validate', 'Where to look for validation images') 36 | cmd:option('--metaPath', '/home/sanuj/Projects/nuclei-net-data/20x/20-patients/dp-imagesource', 'Where to cache meta data') 37 | cmd:option('--cacheMode', 'writeonce', 'cache mode of FaceDetection (see SmallImageSource constructor for details)') 38 | cmd:option('--loadSize', '3,51,51', 'Image size') 39 | cmd:option('--sampleSize', '.', 'The size to use for cropped images') 40 | cmd:option('--standardize', true, 'apply Standardize preprocessing') 41 | cmd:option('--zca', false, 'apply Zero-Component Analysis whitening') 42 | cmd:option('--lecunlcn', false, 'apply Yann LeCun Local Contrast Normalization (recommended)') 43 | cmd:option('--activation', 'ReLU', 'transfer function like ReLU, Tanh, Sigmoid') 44 | cmd:option('--hiddenSize', '{1024,1024}', 'size of the dense hidden layers after the convolution') 45 | cmd:option('--batchNorm', false, 'use batch normalization. dropout is mostly redundant with this') 46 | cmd:option('--dropout', true, 'use dropout') 47 | cmd:option('--dropoutProb', '{0.1,0.2,0.25,0.5,0.5}', 'dropout probabilities') 48 | cmd:option('--accUpdate', false, 'accumulate gradients inplace') 49 | cmd:option('--progress', true, 'print progress bar') 50 | cmd:option('--silent', false, 'dont print anything to stdout') 51 | cmd:option('--convertData', true, 'convert data into Data Source') 52 | cmd:option('--loadModel', false, 'resume a previous experiment. Specify below the path to the saved model.') 53 | cmd:option('--loadModelPath', '/home/sanuj/save/LYoga:1462988633:1.dat', 'path from where to load model.') 54 | cmd:text() 55 | opt = cmd:parse(arg or {}) 56 | if not opt.silent then 57 | table.print(opt) 58 | end 59 | 60 | opt.channelSize = table.fromString(opt.channelSize) 61 | opt.kernelSize = table.fromString(opt.kernelSize) 62 | opt.kernelStride = table.fromString(opt.kernelStride) 63 | opt.poolSize = table.fromString(opt.poolSize) 64 | opt.poolStride = table.fromString(opt.poolStride) 65 | opt.dropoutProb = table.fromString(opt.dropoutProb) 66 | opt.hiddenSize = table.fromString(opt.hiddenSize) 67 | opt.loadSize = opt.loadSize:split(',') 68 | for i = 1, #opt.loadSize do 69 | opt.loadSize[i] = tonumber(opt.loadSize[i]) 70 | end 71 | opt.sampleSize = opt.sampleSize:split(',') 72 | for i = 1, #opt.sampleSize do 73 | opt.sampleSize[i] = tonumber(opt.sampleSize[i]) 74 | end 75 | 76 | --[[preprocessing]]-- 77 | local input_preprocess = {} 78 | if opt.standardize then 79 | table.insert(input_preprocess, dp.Standardize()) 80 | end 81 | if opt.zca then 82 | table.insert(input_preprocess, dp.ZCA()) 83 | end 84 | if opt.lecunlcn then 85 | table.insert(input_preprocess, dp.GCN()) 86 | table.insert(input_preprocess, dp.LeCunLCN{progress=true}) 87 | end 88 | 89 | --[[data]]-- 90 | 91 | print(opt.loadSize) 92 | 93 | local ds 94 | if opt.convertData then 95 | nuclei_train = torch.load('/home/sanuj/Projects/nuclei-net-data/fine-tune/dummy/train.t7') 96 | -- nuclei_valid = torch.load('/home/sanuj/Projects/nuclei-net-data/20x/20-patients/validate.t7') 97 | nuclei_train.data = nuclei_train.data:double() 98 | -- nuclei_valid.data = nuclei_valid.data:double() 99 | -- local n_valid = (#nuclei_valid.label)[1] 100 | local n_train = (#nuclei_train.label)[1] 101 | 102 | local train_input = dp.ImageView('bchw', nuclei_train.data:narrow(1, 1, n_train)) 103 | local train_target = dp.ClassView('b', nuclei_train.label:narrow(1, 1, n_train)) 104 | -- local valid_input = dp.ImageView('bchw', nuclei_valid.data:narrow(1, 1, n_valid)) 105 | -- local valid_target = dp.ClassView('b', nuclei_valid.label:narrow(1, 1, n_valid)) 106 | 107 | train_target:setClasses({0, 1, 2}) 108 | -- valid_target:setClasses({0, 1, 2}) 109 | 110 | -- 3. wrap views into datasets 111 | 112 | local train = dp.DataSet{inputs=train_input,targets=train_target,which_set='train'} 113 | -- local valid = dp.DataSet{inputs=valid_input,targets=valid_target,which_set='valid'} 114 | 115 | -- 4. wrap datasets into datasource 116 | 117 | -- ds = dp.DataSource{train_set=train,valid_set=valid} 118 | ds = dp.DataSource{train_set=train} 119 | ds:classes{0, 1, 2} 120 | elseif opt.dataset == 'ImageSource' then 121 | ds = dp.ImageSource{load_size = opt.loadSize, sample_size = opt.loadSize, train_path = opt.trainPath, valid_path = opt.validPath, meta_path = opt.metaPath, verbose = not opt.silent} 122 | else 123 | ds = torch.load('/home/sanuj/Projects/nuclei-net/data/training-data/dp_test_train.t7') 124 | end 125 | 126 | if not opt.loadModel then 127 | function dropout(depth) 128 | return opt.dropout and (opt.dropoutProb[depth] or 0) > 0 and nn.Dropout(opt.dropoutProb[depth]) 129 | end 130 | 131 | --[[Model]]-- 132 | 133 | cnn = nn.Sequential() 134 | 135 | -- convolutional and pooling layers 136 | depth = 1 137 | inputSize = ds:imageSize('c') or opt.loadSize[1] 138 | for i=1,#opt.channelSize do 139 | if opt.dropout and (opt.dropoutProb[depth] or 0) > 0 then 140 | -- dropout can be useful for regularization 141 | cnn:add(nn.SpatialDropout(opt.dropoutProb[depth])) 142 | end 143 | cnn:add(nn.SpatialConvolution( 144 | inputSize, opt.channelSize[i], 145 | opt.kernelSize[i], opt.kernelSize[i], 146 | opt.kernelStride[i], opt.kernelStride[i], 147 | opt.padding and math.floor(opt.kernelSize[i]/2) or 0 148 | )) 149 | if opt.batchNorm then 150 | -- batch normalization can be awesome 151 | cnn:add(nn.SpatialBatchNormalization(opt.channelSize[i])) 152 | end 153 | cnn:add(nn[opt.activation]()) 154 | if opt.poolSize[i] and opt.poolSize[i] > 0 then 155 | cnn:add(nn.SpatialMaxPooling( 156 | opt.poolSize[i], opt.poolSize[i], 157 | opt.poolStride[i] or opt.poolSize[i], 158 | opt.poolStride[i] or opt.poolSize[i] 159 | )) 160 | end 161 | inputSize = opt.channelSize[i] 162 | depth = depth + 1 163 | end 164 | -- get output size of convolutional layers 165 | outsize = cnn:outside{1,ds:imageSize('c'),ds:imageSize('h'),ds:imageSize('w')} 166 | inputSize = outsize[2]*outsize[3]*outsize[4] 167 | dp.vprint(not opt.silent, "input to dense layers has: "..inputSize.." neurons") 168 | 169 | cnn:insert(nn.Convert(ds:ioShapes(), 'bchw'), 1) 170 | 171 | -- dense hidden layers 172 | cnn:add(nn.Collapse(3)) 173 | for i,hiddenSize in ipairs(opt.hiddenSize) do 174 | if opt.dropout and (opt.dropoutProb[depth] or 0) > 0 then 175 | cnn:add(nn.Dropout(opt.dropoutProb[depth])) 176 | end 177 | cnn:add(nn.Linear(inputSize, hiddenSize)) 178 | if opt.batchNorm then 179 | cnn:add(nn.BatchNormalization(hiddenSize)) 180 | end 181 | cnn:add(nn['ReLU']()) 182 | inputSize = hiddenSize 183 | depth = depth + 1 184 | end 185 | 186 | -- output layer 187 | if opt.dropout and (opt.dropoutProb[depth] or 0) > 0 then 188 | cnn:add(nn.Dropout(opt.dropoutProb[depth])) 189 | end 190 | cnn:add(nn.Linear(inputSize, #(ds:classes()))) 191 | cnn:add(nn.LogSoftMax()) 192 | 193 | --[[Propagators]]-- 194 | if opt.lrDecay == 'adaptive' then 195 | ad = dp.AdaptiveDecay{max_wait = opt.maxWait, decay_factor=opt.decayFactor} 196 | elseif opt.lrDecay == 'linear' then 197 | opt.decayFactor = (opt.minLR - opt.learningRate)/opt.saturateEpoch 198 | end 199 | 200 | train = dp.Optimizer{ 201 | acc_update = opt.accUpdate, 202 | loss = nn.ModuleCriterion(nn.ClassNLLCriterion(), nil, nn.Convert()), 203 | epoch_callback = function(model, report) -- called every epoch 204 | if report.epoch > 0 then 205 | if opt.lrDecay == 'adaptive' then 206 | opt.learningRate = opt.learningRate*ad.decay 207 | ad.decay = 1 208 | elseif opt.lrDecay == 'schedule' and opt.schedule[report.epoch] then 209 | opt.learningRate = opt.schedule[report.epoch] 210 | elseif opt.lrDecay == 'linear' then 211 | opt.learningRate = opt.learningRate + opt.decayFactor 212 | end 213 | opt.learningRate = math.max(opt.minLR, opt.learningRate) 214 | if not opt.silent then 215 | print("learningRate", opt.learningRate) 216 | end 217 | end 218 | end, 219 | callback = function(model, report) -- called every batch 220 | -- the ordering here is important 221 | if opt.accUpdate then 222 | model:accUpdateGradParameters(model.dpnn_input, model.output, opt.learningRate) 223 | else 224 | model:updateGradParameters(opt.momentum) -- affects gradParams 225 | model:updateParameters(opt.learningRate) -- affects params 226 | end 227 | model:maxParamNorm(opt.maxOutNorm) -- affects params 228 | model:zeroGradParameters() -- affects gradParams 229 | end, 230 | feedback = dp.Confusion(), 231 | sampler = dp.ShuffleSampler{batch_size = opt.batchSize}, 232 | progress = opt.progress 233 | } 234 | valid = ds:validSet() and dp.Evaluator{ 235 | feedback = dp.Confusion(), 236 | sampler = dp.Sampler{batch_size = opt.batchSize} 237 | } 238 | test = ds:testSet() and dp.Evaluator{ 239 | feedback = dp.Confusion(), 240 | sampler = dp.Sampler{batch_size = opt.batchSize} 241 | } 242 | 243 | --[[Experiment]]-- 244 | xp = dp.Experiment{ 245 | model = cnn, 246 | optimizer = train, 247 | validator = ds:validSet() and valid, 248 | tester = ds:testSet() and test, 249 | observer = { 250 | dp.FileLogger(), 251 | dp.EarlyStopper{ 252 | error_report = {'optimizer','feedback','confusion','accuracy'}, 253 | maximize = true, 254 | max_epochs = opt.maxTries 255 | }, 256 | ad 257 | }, 258 | random_seed = os.time(), 259 | max_epoch = opt.maxEpoch 260 | } 261 | xp:verbose(not opt.silent) 262 | else 263 | train = dp.Optimizer{ 264 | acc_update = opt.accUpdate, 265 | loss = nn.ModuleCriterion(nn.ClassNLLCriterion(), nil, nn.Convert()), 266 | epoch_callback = function(model, report) -- called every epoch 267 | if report.epoch > 0 then 268 | if opt.lrDecay == 'adaptive' then 269 | opt.learningRate = opt.learningRate*ad.decay 270 | ad.decay = 1 271 | elseif opt.lrDecay == 'schedule' and opt.schedule[report.epoch] then 272 | opt.learningRate = opt.schedule[report.epoch] 273 | elseif opt.lrDecay == 'linear' then 274 | opt.learningRate = opt.learningRate + opt.decayFactor 275 | end 276 | opt.learningRate = math.max(opt.minLR, opt.learningRate) 277 | if not opt.silent then 278 | print("learningRate", opt.learningRate) 279 | end 280 | end 281 | end, 282 | callback = function(model, report) -- called every batch 283 | -- the ordering here is important 284 | if opt.accUpdate then 285 | model:accUpdateGradParameters(model.dpnn_input, model.output, opt.learningRate) 286 | else 287 | model:updateGradParameters(opt.momentum) -- affects gradParams 288 | model:updateParameters(opt.learningRate) -- affects params 289 | end 290 | model:maxParamNorm(opt.maxOutNorm) -- affects params 291 | model:zeroGradParameters() -- affects gradParams 292 | end, 293 | feedback = dp.Confusion(), 294 | sampler = dp.ShuffleSampler{batch_size = opt.batchSize}, 295 | progress = opt.progress 296 | } 297 | loaded_xp = torch.load(opt.loadModelPath) 298 | xp = dp.Experiment{ 299 | model = loaded_xp:model(), 300 | optimizer = train, 301 | validator = ds:validSet() and valid, 302 | tester = ds:testSet() and test, 303 | observer = { 304 | dp.FileLogger(), 305 | dp.EarlyStopper{ 306 | error_report = {'optimizer','feedback','confusion','accuracy'}, 307 | maximize = true, 308 | max_epochs = opt.maxTries 309 | }, 310 | ad 311 | }, 312 | random_seed = loaded_xp:randomSeed(), 313 | max_epoch = opt.maxEpoch 314 | } 315 | xp:verbose(not opt.silent) 316 | end 317 | 318 | --[[GPU or CPU]]-- 319 | if opt.cuda then 320 | require 'cutorch' 321 | require 'cunn' 322 | cutorch.setDevice(opt.useDevice) 323 | xp:cuda() 324 | end 325 | 326 | if not opt.silent then 327 | print"Model:" 328 | print(xp:model()) 329 | end 330 | 331 | xp:run(ds) 332 | -------------------------------------------------------------------------------- /torch-scripts/cnn_train_test_valid_old.lua: -------------------------------------------------------------------------------- 1 | require 'dp' 2 | -- require 'os' 3 | 4 | --[[command line arguments]]-- 5 | cmd = torch.CmdLine() 6 | cmd:text() 7 | cmd:text('Image Classification using Convolution Neural Network Training/Optimization') 8 | cmd:text('Example:') 9 | cmd:text('$> th convolutionneuralnetwork.lua --batchSize 128 --momentum 0.5') 10 | cmd:text('Options:') 11 | cmd:option('--learningRate', 0.2, 'learning rate at t=0') 12 | cmd:option('--lrDecay', 'linear', 'type of learning rate decay : adaptive | linear | schedule | none') 13 | cmd:option('--minLR', 0.00001, 'minimum learning rate') 14 | cmd:option('--saturateEpoch', 300, 'epoch at which linear decayed LR will reach minLR') 15 | cmd:option('--schedule', '{}', 'learning rate schedule') 16 | cmd:option('--maxWait', 4, 'maximum number of epochs to wait for a new minima to be found. After that, the learning rate is decayed by decayFactor.') 17 | cmd:option('--decayFactor', 0.001, 'factor by which learning rate is decayed for adaptive decay.') 18 | cmd:option('--maxOutNorm', 1, 'max norm each layers output neuron weights') 19 | cmd:option('--momentum', 0.6, 'momentum') 20 | cmd:option('--channelSize', '{30,60,80}', 'Number of output channels for each convolution layer.') 21 | cmd:option('--kernelSize', '{4,6,5}', 'kernel size of each convolution layer. Height = Width') 22 | cmd:option('--kernelStride', '{1,1,1}', 'kernel stride of each convolution layer. Height = Width') 23 | cmd:option('--poolSize', '{2,2,2}', 'size of the max pooling of each convolution layer. Height = Width') 24 | cmd:option('--poolStride', '{2,2,2}', 'stride of the max pooling of each convolution layer. Height = Width') 25 | cmd:option('--padding', false, 'add math.floor(kernelSize/2) padding to the input of each convolution') 26 | cmd:option('--batchSize', 128, 'number of examples per batch') 27 | cmd:option('--cuda', true, 'use CUDA') 28 | cmd:option('--useDevice', 1, 'sets the device (GPU) to use') 29 | cmd:option('--maxEpoch', 200, 'maximum number of epochs to run') 30 | cmd:option('--maxTries', 500, 'maximum number of epochs to try to find a better local minima for early-stopping') 31 | cmd:option('--dataset', 'Mnist', 'which dataset to use : Mnist | NotMnist | Cifar10 | Cifar100 | Svhn | ImageSource') 32 | cmd:option('--trainPath', '.', 'Where to look for training images') 33 | cmd:option('--validPath', '.', 'Where to look for validation images') 34 | cmd:option('--metaPath', '.', 'Where to cache meta data') 35 | cmd:option('--cacheMode', 'writeonce', 'cache mode of FaceDetection (see SmallImageSource constructor for details)') 36 | cmd:option('--loadSize', '', 'Image size') 37 | cmd:option('--sampleSize', '.', 'The size to use for cropped images') 38 | cmd:option('--standardize', false, 'apply Standardize preprocessing') 39 | cmd:option('--zca', false, 'apply Zero-Component Analysis whitening') 40 | cmd:option('--lecunlcn', false, 'apply Yann LeCun Local Contrast Normalization (recommended)') 41 | cmd:option('--activation', 'ReLU', 'transfer function like ReLU, Tanh, Sigmoid') 42 | cmd:option('--hiddenSize', '{1024,1024}', 'size of the dense hidden layers after the convolution') 43 | cmd:option('--batchNorm', false, 'use batch normalization. dropout is mostly redundant with this') 44 | cmd:option('--dropout', true, 'use dropout') 45 | cmd:option('--dropoutProb', '{0.1,0.2,0.25,0.5,0.5}', 'dropout probabilities') 46 | cmd:option('--accUpdate', false, 'accumulate gradients inplace') 47 | cmd:option('--progress', false, 'print progress bar') 48 | cmd:option('--silent', false, 'dont print anything to stdout') 49 | cmd:option('--convertData', true, 'convert data into Data Source') 50 | cmd:text() 51 | opt = cmd:parse(arg or {}) 52 | if not opt.silent then 53 | table.print(opt) 54 | end 55 | 56 | opt.channelSize = table.fromString(opt.channelSize) 57 | opt.kernelSize = table.fromString(opt.kernelSize) 58 | opt.kernelStride = table.fromString(opt.kernelStride) 59 | opt.poolSize = table.fromString(opt.poolSize) 60 | opt.poolStride = table.fromString(opt.poolStride) 61 | opt.dropoutProb = table.fromString(opt.dropoutProb) 62 | opt.hiddenSize = table.fromString(opt.hiddenSize) 63 | opt.loadSize = opt.loadSize:split(',') 64 | for i = 1, #opt.loadSize do 65 | opt.loadSize[i] = tonumber(opt.loadSize[i]) 66 | end 67 | opt.sampleSize = opt.sampleSize:split(',') 68 | for i = 1, #opt.sampleSize do 69 | opt.sampleSize[i] = tonumber(opt.sampleSize[i]) 70 | end 71 | 72 | --[[preprocessing]]-- 73 | local input_preprocess = {} 74 | if opt.standardize then 75 | table.insert(input_preprocess, dp.Standardize()) 76 | end 77 | if opt.zca then 78 | table.insert(input_preprocess, dp.ZCA()) 79 | end 80 | if opt.lecunlcn then 81 | table.insert(input_preprocess, dp.GCN()) 82 | table.insert(input_preprocess, dp.LeCunLCN{progress=true}) 83 | end 84 | 85 | --[[data]]-- 86 | 87 | local ds 88 | if opt.convertData then 89 | nuclei_train = torch.load('/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class/train_smaller.t7') 90 | nuclei_valid = torch.load('/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class/test_smaller.t7') 91 | nuclei_train.data = nuclei_train.data:double() 92 | nuclei_valid.data = nuclei_valid.data:double() 93 | local n_valid = (#nuclei_valid.label)[1] 94 | local n_train = (#nuclei_train.label)[1] 95 | 96 | local train_input = dp.ImageView('bchw', nuclei_train.data:narrow(1, 1, n_train)) 97 | local train_target = dp.ClassView('b', nuclei_train.label:narrow(1, 1, n_train)) 98 | local valid_input = dp.ImageView('bchw', nuclei_valid.data:narrow(1, 1, n_valid)) 99 | local valid_target = dp.ClassView('b', nuclei_valid.label:narrow(1, 1, n_valid)) 100 | 101 | train_target:setClasses({0, 1, 2}) 102 | valid_target:setClasses({0, 1, 2}) 103 | 104 | -- 3. wrap views into datasets 105 | 106 | local train = dp.DataSet{inputs=train_input,targets=train_target,which_set='train'} 107 | local valid = dp.DataSet{inputs=valid_input,targets=valid_target,which_set='valid'} 108 | 109 | -- 4. wrap datasets into datasource 110 | 111 | ds = dp.DataSource{train_set=train,valid_set=valid} 112 | ds:classes{0, 1, 2} 113 | else 114 | ds = torch.load('/home/sanuj/Projects/nuclei-net/data/training-data/dp_test_train.t7') 115 | end 116 | 117 | -- os.exit() 118 | 119 | function dropout(depth) 120 | return opt.dropout and (opt.dropoutProb[depth] or 0) > 0 and nn.Dropout(opt.dropoutProb[depth]) 121 | end 122 | 123 | --[[Model]]-- 124 | 125 | cnn = nn.Sequential() 126 | 127 | -- convolutional and pooling layers 128 | depth = 1 129 | inputSize = ds:imageSize('c') or opt.loadSize[1] 130 | for i=1,#opt.channelSize do 131 | if opt.dropout and (opt.dropoutProb[depth] or 0) > 0 then 132 | -- dropout can be useful for regularization 133 | cnn:add(nn.SpatialDropout(opt.dropoutProb[depth])) 134 | end 135 | cnn:add(nn.SpatialConvolution( 136 | inputSize, opt.channelSize[i], 137 | opt.kernelSize[i], opt.kernelSize[i], 138 | opt.kernelStride[i], opt.kernelStride[i], 139 | opt.padding and math.floor(opt.kernelSize[i]/2) or 0 140 | )) 141 | if opt.batchNorm then 142 | -- batch normalization can be awesome 143 | cnn:add(nn.SpatialBatchNormalization(opt.channelSize[i])) 144 | end 145 | cnn:add(nn[opt.activation]()) 146 | if opt.poolSize[i] and opt.poolSize[i] > 0 then 147 | cnn:add(nn.SpatialMaxPooling( 148 | opt.poolSize[i], opt.poolSize[i], 149 | opt.poolStride[i] or opt.poolSize[i], 150 | opt.poolStride[i] or opt.poolSize[i] 151 | )) 152 | end 153 | inputSize = opt.channelSize[i] 154 | depth = depth + 1 155 | end 156 | -- get output size of convolutional layers 157 | outsize = cnn:outside{1,ds:imageSize('c'),ds:imageSize('h'),ds:imageSize('w')} 158 | inputSize = outsize[2]*outsize[3]*outsize[4] 159 | dp.vprint(not opt.silent, "input to dense layers has: "..inputSize.." neurons") 160 | 161 | cnn:insert(nn.Convert(ds:ioShapes(), 'bchw'), 1) 162 | 163 | -- dense hidden layers 164 | cnn:add(nn.Collapse(3)) 165 | for i,hiddenSize in ipairs(opt.hiddenSize) do 166 | if opt.dropout and (opt.dropoutProb[depth] or 0) > 0 then 167 | cnn:add(nn.Dropout(opt.dropoutProb[depth])) 168 | end 169 | cnn:add(nn.Linear(inputSize, hiddenSize)) 170 | if opt.batchNorm then 171 | cnn:add(nn.BatchNormalization(hiddenSize)) 172 | end 173 | cnn:add(nn['Sigmoid']()) 174 | inputSize = hiddenSize 175 | depth = depth + 1 176 | end 177 | 178 | -- output layer 179 | if opt.dropout and (opt.dropoutProb[depth] or 0) > 0 then 180 | cnn:add(nn.Dropout(opt.dropoutProb[depth])) 181 | end 182 | cnn:add(nn.Linear(inputSize, #(ds:classes()))) 183 | cnn:add(nn.LogSoftMax()) 184 | 185 | --[[Propagators]]-- 186 | if opt.lrDecay == 'adaptive' then 187 | ad = dp.AdaptiveDecay{max_wait = opt.maxWait, decay_factor=opt.decayFactor} 188 | elseif opt.lrDecay == 'linear' then 189 | opt.decayFactor = (opt.minLR - opt.learningRate)/opt.saturateEpoch 190 | end 191 | 192 | train = dp.Optimizer{ 193 | acc_update = opt.accUpdate, 194 | loss = nn.ModuleCriterion(nn.ClassNLLCriterion(), nil, nn.Convert()), 195 | epoch_callback = function(model, report) -- called every epoch 196 | if report.epoch > 0 then 197 | if opt.lrDecay == 'adaptive' then 198 | opt.learningRate = opt.learningRate*ad.decay 199 | ad.decay = 1 200 | elseif opt.lrDecay == 'schedule' and opt.schedule[report.epoch] then 201 | opt.learningRate = opt.schedule[report.epoch] 202 | elseif opt.lrDecay == 'linear' then 203 | opt.learningRate = opt.learningRate + opt.decayFactor 204 | end 205 | opt.learningRate = math.max(opt.minLR, opt.learningRate) 206 | if not opt.silent then 207 | print("learningRate", opt.learningRate) 208 | end 209 | end 210 | end, 211 | callback = function(model, report) -- called every batch 212 | -- the ordering here is important 213 | if opt.accUpdate then 214 | model:accUpdateGradParameters(model.dpnn_input, model.output, opt.learningRate) 215 | else 216 | model:updateGradParameters(opt.momentum) -- affects gradParams 217 | model:updateParameters(opt.learningRate) -- affects params 218 | end 219 | model:maxParamNorm(opt.maxOutNorm) -- affects params 220 | model:zeroGradParameters() -- affects gradParams 221 | end, 222 | feedback = dp.Confusion(), 223 | sampler = dp.ShuffleSampler{batch_size = opt.batchSize}, 224 | progress = opt.progress 225 | } 226 | valid = ds:validSet() and dp.Evaluator{ 227 | feedback = dp.Confusion(), 228 | sampler = dp.Sampler{batch_size = opt.batchSize} 229 | } 230 | test = ds:testSet() and dp.Evaluator{ 231 | feedback = dp.Confusion(), 232 | sampler = dp.Sampler{batch_size = opt.batchSize} 233 | } 234 | 235 | --[[Experiment]]-- 236 | xp = dp.Experiment{ 237 | model = cnn, 238 | optimizer = train, 239 | validator = ds:validSet() and valid, 240 | tester = ds:testSet() and test, 241 | observer = { 242 | dp.FileLogger(), 243 | dp.EarlyStopper{ 244 | error_report = {'validator','feedback','confusion','accuracy'}, 245 | maximize = true, 246 | max_epochs = opt.maxTries 247 | }, 248 | ad 249 | }, 250 | random_seed = os.time(), 251 | max_epoch = opt.maxEpoch 252 | } 253 | 254 | --[[GPU or CPU]]-- 255 | if opt.cuda then 256 | require 'cutorch' 257 | require 'cunn' 258 | cutorch.setDevice(opt.useDevice) 259 | xp:cuda() 260 | end 261 | 262 | if not opt.silent then 263 | print"Model:" 264 | print(cnn) 265 | end 266 | xp:verbose(not opt.silent) 267 | 268 | xp:run(ds) 269 | -------------------------------------------------------------------------------- /torch-scripts/layer_architecture.txt: -------------------------------------------------------------------------------- 1 | layer 1: 9, 24, 0.1 2 | layer 2: 5, 50, 0.2 3 | layer 3: 5, 80, 0.25 4 | layer 4: 1024, 0.5 5 | layer 5: 1024, 0.5 6 | -------------------------------------------------------------------------------- /torch-scripts/predict.lua: -------------------------------------------------------------------------------- 1 | require 'dp' 2 | require 'cutorch' 3 | require 'optim' 4 | require 'image' 5 | require 'cunn' 6 | matio = require 'matio' 7 | -- input_image = image.load('/home/sanuj/Projects/nuclei-net/data/testing-data/84_LLM_YR4.jpg', 3, 'byte') 8 | -- s = 51 9 | -- c = (#im)[1]; h = (#im)[2]; w = (#im)[3] 10 | 11 | -- out = model:forward(im_tensor):exp() 12 | 13 | ws = 51 14 | channels = 3 15 | batch_size = 2000 16 | classes = 3 17 | for n = 1, 4 do 18 | -- n = 1 19 | file_name = 'd' .. n 20 | index = 'a' .. n 21 | 22 | input_mat = matio.load('/home/sanuj/Projects/nuclei-net/data/testing-data/d/'.. file_name ..'_02.mat') 23 | num_images = (#input_mat[index])[1] 24 | 25 | image_tensor = torch.Tensor(num_images, channels, ws, ws) 26 | for i = 1, num_images do 27 | image_tensor[{i, {}, {}, {}}] = torch.reshape(input_mat[index][i], 1, channels, ws, ws)[1] 28 | end 29 | 30 | xp = torch.load('/home/sanuj/save/amitpc-HP-Z620-Workstation_1454011145_1.dat') 31 | model = xp:model() 32 | 33 | labels = torch.Tensor(num_images, classes) 34 | for i = 1, num_images, batch_size do 35 | temp = model:forward(image_tensor[{ {i, i+batch_size-1}, {}, {}, {} }]):exp() 36 | labels[{ {i, i+batch_size-1}, {} }] = temp:double() 37 | end 38 | 39 | for i = 1, channels do 40 | matio.save('/home/sanuj/Projects/nuclei-net/data/testing-data/d/' .. file_name .. '_02_' .. i .. '.mat', labels[{ {}, {i} }]) 41 | end 42 | end -------------------------------------------------------------------------------- /torch-scripts/predict_full.lua: -------------------------------------------------------------------------------- 1 | require 'dp' 2 | require 'cutorch' 3 | require 'optim' 4 | require 'image' 5 | require 'cunn' 6 | require 'os' 7 | 8 | matio = require 'matio' 9 | 10 | ws = 51 11 | batch_size = 2000 12 | classes = 3 13 | image_dir = '/data/testing-data/40x' 14 | image_name = 'PrognosisTMABlock1_A_3_1_H&E.png' 15 | -- image_dir = '/home/sanuj/Projects/nuclei-net/data/testing-data' 16 | -- image_name = '84_LLM_YR4_002.png' 17 | 18 | input_image = image.load(image_dir .. '/' .. image_name, 3, 'byte') 19 | channels = (#input_image)[1]; w = (#input_image)[2]; h = (#input_image)[3] 20 | 21 | -- file_num = 1 22 | -- p = (ws-1)/2 23 | os.execute("mkdir " .. image_dir .. '/' .. 'tmp') 24 | xp = torch.load('/home/sanuj/save/amitpc-HP-Z620-Workstation:1455425081:2.dat') 25 | -- xp = torch.load('/home/sanuj/save/LYoga:1454304060:1.dat') 26 | model = xp:model() 27 | 28 | -- print((h-ws+1)*(w-ws+1)*channels*ws*ws) 29 | cropped = torch.Tensor((h-ws+1)*(w-ws+1), channels, ws, ws):byte() 30 | labels = torch.Tensor((h-ws+1)*(w-ws+1), classes) 31 | -- batch_done = true 32 | counter = 1 33 | last_counter = 1 34 | for x = 0, h-ws do 35 | for y = 0, w-ws do 36 | -- if temp ~= nil then 37 | -- print('Cropping at x: ' .. x .. ', y: ' .. y .. ', len: ' .. (#temp)[1]) 38 | -- end 39 | print('Counter: ' .. counter) 40 | cropped[{ {counter}, {}, {}, {} }] = image.crop(input_image, x, y, x+ws, y+ws) 41 | -- cropped = torch.reshape(cropped, 1, channels, ws, ws) 42 | -- if batch_done then 43 | -- temp = cropped 44 | -- batch_done = false 45 | -- else 46 | -- temp = torch.cat(temp, cropped, 1) 47 | -- end 48 | -- if (#temp)[1] == batch_size then 49 | -- -- print('SAVING File Number: ' .. file_num) 50 | -- -- matio.save(image_dir .. '/tmp/' .. file_num .. '.mat', temp) 51 | -- -- file_num = file_num + 1 52 | -- print('PREDICTING!!') 53 | -- temp_labels = model:forward(temp):exp() 54 | -- if labels == nil then 55 | -- labels = temp_labels 56 | -- else 57 | -- labels = torch.cat(labels, temp_labels) 58 | -- end 59 | -- -- temp = nil 60 | -- batch_done = true 61 | -- end 62 | if counter % batch_size == 0 then 63 | print('PREDICTING!!!') 64 | temp = model:forward(cropped[{ {counter-batch_size+1, counter}, {}, {}, {} }]):exp() 65 | labels[{ {counter-batch_size+1, counter}, {} }] = temp:double() 66 | last_counter = counter 67 | end 68 | counter = counter + 1 69 | end 70 | end 71 | 72 | if last_counter ~= (counter - 1) then 73 | temp = model:forward(cropped[{ {last_counter+1, counter-1}, {}, {}, {} }]):exp() 74 | labels[{ {last_counter+1, counter-1}, {} }] = temp:double() 75 | end 76 | -- if temp ~= nil then 77 | -- print('PREDICTING!!') 78 | -- temp_labels = model:forward(temp):exp() 79 | -- if labels == nil then 80 | -- labels = temp_labels 81 | -- else 82 | -- labels = torch.cat(labels, temp_labels) 83 | -- end 84 | -- temp = nil 85 | -- end 86 | 87 | for i = 1, channels do 88 | image.save(image_dir .. '/tmp/' .. i .. '.png', image.vflip(torch.reshape(labels[{ {}, {i} }], h-ws+1, w-ws+1))) 89 | -- image.save(image_dir .. '/tmp/' .. i .. '.png', image.hflip(image.rotate(torch.reshape(labels[{ {}, {i} }], h-ws+1, w-ws+1), 3*math.pi/2))) 90 | -- matio.save(image_dir .. '/tmp/' .. i .. '.mat', labels[{ {}, {i} }]) 91 | end 92 | 93 | -- for n = 1, 4 do 94 | -- -- n = 1 95 | -- file_name = 'd' .. n 96 | -- index = 'a' .. n 97 | 98 | -- input_mat = matio.load('/home/sanuj/Projects/nuclei-net/data/testing-data/d/'.. file_name ..'_02.mat') 99 | -- num_images = (#input_mat[index])[1] 100 | 101 | -- image_tensor = torch.Tensor(num_images, channels, ws, ws) 102 | -- for i = 1, num_images do 103 | -- image_tensor[{i, {}, {}, {}}] = torch.reshape(input_mat[index][i], 1, channels, ws, ws)[1] 104 | -- end 105 | 106 | -- xp = torch.load('/home/sanuj/save/amitpc-HP-Z620-Workstation_1454011145_1.dat') 107 | -- model = xp:model() 108 | 109 | -- labels = torch.Tensor(num_images, classes) 110 | -- for i = 1, num_images, batch_size do 111 | -- if i+batch_size-1 <= num_images then 112 | -- temp = model:forward(image_tensor[{ {i, i+batch_size-1}, {}, {}, {} }]):exp() 113 | -- labels[{ {i, i+batch_size-1}, {} }] = temp:double() 114 | -- else 115 | -- temp = model:forward(image_tensor[{ {i, num_images}, {}, {}, {} }]):exp() 116 | -- labels[{ {i, num_images}, {} }] = temp:double() 117 | -- end 118 | 119 | -- for i = 1, channels do 120 | -- matio.save('/home/sanuj/Projects/nuclei-net/data/testing-data/d/' .. file_name .. '_02_' .. i .. '.mat', labels[{ {}, {i} }]) 121 | -- end 122 | -- end 123 | -------------------------------------------------------------------------------- /torch-scripts/predict_full_mask.lua: -------------------------------------------------------------------------------- 1 | require 'dp' 2 | require 'cutorch' 3 | require 'optim' 4 | require 'image' 5 | require 'cunn' 6 | require 'os' 7 | 8 | -- matio = require 'matio' 9 | 10 | ws = 51 11 | batch_size = 1500 --1600 12 | classes = 3 13 | 14 | -- image_dir = '/home/sanuj/Downloads/anmol_maps' 15 | -- image_name = 'K10_13332_8866.jpg' 16 | 17 | image_dir = '/data/testing-data/40x' 18 | image_name = '63_LLM_YR4_cropped.jpg' 19 | 20 | input_image = image.load(image_dir .. '/' .. image_name, 3, 'byte') 21 | channels = (#input_image)[1]; w = (#input_image)[2]; h = (#input_image)[3] 22 | 23 | -- mirror pad 24 | -- p = ws-1 25 | -- im = torch.ByteTensor(channels, w+p, h+p):zero() 26 | -- im[{ {}, {p/2+1, w+p/2}, {p/2+1, h+p/2} }] = input_image 27 | -- h = h+p 28 | -- w = w+p 29 | ------------------------------------------------------------ 30 | p = ws-1 31 | module = nn.SpatialReflectionPadding(p/2, p/2, p/2, p/2) 32 | module:cuda() 33 | im = module:forward(input_image:cuda()) 34 | im = im:byte() 35 | h = h+p 36 | w = w+p 37 | ------------------------------------------------------------ 38 | 39 | -- map_path = '/home/sanuj/Downloads/anmol_maps/K10N_13332_8866_binary.jpg' 40 | -- map = image.load(map_path) 41 | -- map = map:byte() 42 | 43 | -- for x = 1, (#map)[2] do 44 | -- for y = 1, (#map)[3] do 45 | -- print('x: ' .. x .. 'y: ' .. y) 46 | -- if map[1][x][y] == 0 then 47 | -- im[1][x][y] = 1 48 | -- im[2][x][y] = 1 49 | -- im[3][x][y] = 1 50 | -- end 51 | -- end 52 | -- end 53 | 54 | -- print('Done superimposing.') 55 | 56 | -- file_num = 1 57 | -- p = (ws-1)/2 58 | os.execute("mkdir " .. image_dir .. '/' .. 'results') 59 | -- xp = torch.load('/home/sanuj/save/amitpc-HP-Z620-Workstation:1455425081:2.dat') 60 | -- xp = torch.load('/home/sanuj/Projects/models/amitpc-HP-Z620-Workstation:1457692046:1.dat') -- final 20x 61 | -- /home/sanuj/Projects/models/train_7259.dat 62 | xp = torch.load('/home/sanuj/Projects/models/train_701_val_734.dat') -- latest 20x 63 | -- xp = torch.load('/home/sanuj/save/LYoga:1462988633:1.dat') -- new 20x 64 | -- xp = torch.load('/home/sanuj/save/amitpc-HP-Z620-Workstation_1454011145_1.dat') -- final 40x 65 | -- xp = torch.load('/home/sanuj/save/LYoga:1454304060:1.dat') 66 | model = xp:model() 67 | 68 | -- print((h-ws+1)*(w-ws+1)*channels*ws*ws) 69 | -- cropped = torch.Tensor((h-ws+1)*(w-ws+1), channels, ws, ws):byte() 70 | -- labels = torch.Tensor((h-ws+1)*(w-ws+1), classes) 71 | 72 | cropped = torch.Tensor(batch_size, channels, ws, ws):byte() 73 | labels = torch.Tensor((h-ws+1)*(w-ws+1), classes) 74 | 75 | -- batch_done = true 76 | counter = 0 77 | last_counter = 1 78 | 79 | for x = 0, h-ws do 80 | for y = 0, w-ws do 81 | print('Counter: ' .. counter .. ' cropped: ' .. (counter % batch_size)+1) 82 | cropped[{ {(counter % batch_size)+1}, {}, {}, {} }] = image.crop(im, x, y, x+ws, y+ws) 83 | if (counter+1) % batch_size == 0 then 84 | print('PREDICTING!!!') 85 | temp = model:forward(cropped[{ {1, batch_size}, {}, {}, {} }]):exp() 86 | labels[{ {(counter+1)-batch_size+1, counter+1}, {} }] = temp:double() 87 | last_counter = counter 88 | end 89 | counter = counter + 1 90 | end 91 | end 92 | 93 | if last_counter ~= (counter - 1) then 94 | temp = model:forward(cropped[{ {1, counter % batch_size}, {}, {}, {} }]):exp() 95 | labels[{ {last_counter+2, counter}, {} }] = temp:double() 96 | end 97 | 98 | for i = 1, channels do 99 | image.save(image_dir .. '/results/' .. i .. '.png', image.vflip(torch.reshape(labels[{ {}, {i} }], h-ws+1, w-ws+1))) 100 | end 101 | -------------------------------------------------------------------------------- /torch-scripts/three_class/arch_1/three_class_nuclei_train_test.lua: -------------------------------------------------------------------------------- 1 | require 'torch'; 2 | require 'nn'; 3 | require 'itorch'; 4 | require 'image' 5 | 6 | trainset = torch.load('/home/sanuj/Projects/nuclei-net/data/training-data/78_RLM_YR4_3_class/train_small.t7') 7 | testset = torch.load('/home/sanuj/Projects/nuclei-net/data/training-data/63_LLM_YR4_3_class/test_small.t7') 8 | 9 | --image.display(trainset[1][100]) 10 | --print(trainset[2][100]) 11 | 12 | setmetatable(trainset, 13 | {__index = function(t, i) 14 | return {t[1][i], t[2][i]} 15 | end} 16 | ); 17 | 18 | function trainset:size() 19 | return self.data:size(1) 20 | end 21 | 22 | trainset[1] = trainset[1]:double() 23 | testset[1] = testset[1]:double() 24 | 25 | net = nn.Sequential() 26 | net:add(nn.SpatialConvolution(3, 48, 6, 6)) -- 3 input image channel, 48 output channels, 6x6 convolution kernel 27 | net:add(nn.SpatialMaxPooling(2,2,2,2)) -- A max-pooling operation that looks at 2x2 windows and finds the max. 28 | net:add(nn.SpatialConvolution(48, 48, 4, 4)) -- 48 input image channel, 48 output channels, 4x4 convolution kernel 29 | net:add(nn.SpatialMaxPooling(2,2,2,2)) 30 | net:add(nn.View(48*10*10)) -- reshapes from a 3D tensor of 16x5x5 into 1D tensor of 16*5*5 31 | net:add(nn.Linear(48*10*10, 1024)) -- fully connected layer (matrix multiplication between input and weights) 32 | net:add(nn.Linear(1024, 1024)) 33 | net:add(nn.Linear(1024, 3)) -- 3 is the number of outputs of the network 34 | net:add(nn.LogSoftMax()) -- converts the output to a log-probability. Useful for classification problems 35 | 36 | print('Nuclei-net\n' .. net:__tostring()); 37 | 38 | criterion = nn.ClassNLLCriterion() 39 | 40 | --require 'cunn' 41 | --net = net:cuda() 42 | --criterion = criterion:cuda() 43 | --trainset[1] = trainset[1]:cuda() 44 | 45 | trainer = nn.StochasticGradient(net, criterion) 46 | trainer.learningRate = 0.001 47 | trainer.maxIteration = 10 -- just do 10 epochs of training. 48 | 49 | class_performance = {0, 0, 0} 50 | for i=1,30000 do 51 | local groundtruth = testset[2][i] 52 | local prediction = net:forward(testset[1][i]) 53 | local confidences, indices = torch.sort(prediction, true) -- true means sort in descending order 54 | if groundtruth == indices[1] then 55 | class_performance[groundtruth] = class_performance[groundtruth] + 1 56 | end 57 | end 58 | 59 | for i=1,3 do 60 | print(i, 100*class_performance[i]/10000 .. ' %') 61 | end 62 | --------------------------------------------------------------------------------