├── Models ├── __init__.py ├── __pycache__ │ ├── AlexNet.cpython-35.pyc │ ├── ZFNet.cpython-35.pyc │ ├── __init__.cpython-35.pyc │ ├── alexnetvis.cpython-35.pyc │ ├── new.cpython-35.pyc │ └── old.cpython-35.pyc └── new.py ├── Preprocess ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-35.pyc │ ├── convert.cpython-35.pyc │ ├── convert1.cpython-35.pyc │ ├── convert2.cpython-35.pyc │ ├── convertdata.cpython-35.pyc │ ├── convertfurther.cpython-35.pyc │ └── patientconvert.cpython-35.pyc ├── convertfurther.py └── patientconvert.py ├── README.md ├── config.json ├── config2.json ├── further_classification.json ├── preprocessdata.py ├── test.py └── trainvis.py /Models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Models/__init__.py -------------------------------------------------------------------------------- /Models/__pycache__/AlexNet.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Models/__pycache__/AlexNet.cpython-35.pyc -------------------------------------------------------------------------------- /Models/__pycache__/ZFNet.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Models/__pycache__/ZFNet.cpython-35.pyc -------------------------------------------------------------------------------- /Models/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Models/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /Models/__pycache__/alexnetvis.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Models/__pycache__/alexnetvis.cpython-35.pyc -------------------------------------------------------------------------------- /Models/__pycache__/new.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Models/__pycache__/new.cpython-35.pyc -------------------------------------------------------------------------------- /Models/__pycache__/old.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Models/__pycache__/old.cpython-35.pyc -------------------------------------------------------------------------------- /Models/new.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense, Dropout, Activation, Flatten 3 | from keras.layers import Convolution2D, MaxPooling2D 4 | from keras.layers.normalization import BatchNormalization 5 | 6 | def load_model(nb_classes=1000,path_to_weights=None): 7 | model = Sequential() 8 | model.add(Convolution2D(32,5,5,border_mode="valid",subsample=(2,2),input_shape=(227,227,1))) #output=((227-5)/2 + 1 = 112 9 | model.add(BatchNormalization()) 10 | model.add(Activation('relu')) 11 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) #output=((112-2)/2 + 1 = 56 12 | 13 | 14 | model.add(Convolution2D(32,5,5,border_mode="same")) #output = 56 15 | model.add(BatchNormalization()) 16 | model.add(Activation('relu')) 17 | model.add(Convolution2D(64,3,3,border_mode="same")) #output = 56 18 | model.add(BatchNormalization()) 19 | model.add(Activation('relu')) 20 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) #output=((56-2)/2 + 1 = 28 21 | 22 | 23 | 24 | model.add(Convolution2D(64,3,3,border_mode="same")) #output = 28 25 | model.add(BatchNormalization()) 26 | model.add(Activation('relu')) 27 | model.add(Convolution2D(64,3,3,border_mode="same")) #output= 28 28 | model.add(BatchNormalization()) 29 | model.add(Activation('relu')) 30 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) #output=((28-2)/2 + 1 = 14 31 | 32 | 33 | 34 | model.add(Convolution2D(96,3,3,border_mode="same")) #output = 14 35 | model.add(BatchNormalization()) 36 | model.add(Activation('relu')) 37 | model.add(Convolution2D(96,3,3,border_mode="valid")) #output = ((14-3)/1) +1 = 12 38 | model.add(BatchNormalization()) 39 | model.add(Activation('relu')) 40 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) #output=((12-2)/2 + 1 = 6 41 | 42 | 43 | 44 | model.add(Convolution2D(192,3,3,border_mode="same")) #output =6 45 | model.add(BatchNormalization()) 46 | model.add(Activation('relu')) 47 | model.add(Convolution2D(192,3,3,border_mode="valid")) #output = ((6-3)/1) + 1 = 4 48 | model.add(BatchNormalization()) 49 | model.add(Activation('relu')) 50 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) #output=((4-2)/2 + 1 = 2 51 | 52 | model.add(Flatten()) 53 | 54 | model.add(Dense(output_dim=4096,input_dim=2*2*192)) 55 | model.add(Activation('relu')) 56 | #model.add(Dropout(0.4)) # for first level 57 | model.add(Dropout(0.4)) # for sec level 58 | 59 | model.add(Dense(output_dim=4096,input_dim=4096)) 60 | model.add(Activation('relu')) 61 | #model.add(Dropout(0.4)) # for first level 62 | model.add(Dropout(0.4)) # for sec level 63 | 64 | model.add(Dense(output_dim=nb_classes,input_dim=4096)) 65 | model.add(Activation('softmax')) 66 | 67 | if not path_to_weights==None: 68 | model.load_weights(path_to_weights) 69 | 70 | return model 71 | 72 | -------------------------------------------------------------------------------- /Preprocess/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__init__.py -------------------------------------------------------------------------------- /Preprocess/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /Preprocess/__pycache__/convert.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__pycache__/convert.cpython-35.pyc -------------------------------------------------------------------------------- /Preprocess/__pycache__/convert1.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__pycache__/convert1.cpython-35.pyc -------------------------------------------------------------------------------- /Preprocess/__pycache__/convert2.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__pycache__/convert2.cpython-35.pyc -------------------------------------------------------------------------------- /Preprocess/__pycache__/convertdata.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__pycache__/convertdata.cpython-35.pyc -------------------------------------------------------------------------------- /Preprocess/__pycache__/convertfurther.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__pycache__/convertfurther.cpython-35.pyc -------------------------------------------------------------------------------- /Preprocess/__pycache__/patientconvert.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/21Vipin/Medical-Image-Classification-using-deep-learning/ac5dbeba3f81ced97042517f4c161d5ad461d017/Preprocess/__pycache__/patientconvert.cpython-35.pyc -------------------------------------------------------------------------------- /Preprocess/convertfurther.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | import pickle 5 | import dicom 6 | import math 7 | import numpy as np 8 | from scipy.misc import imresize 9 | from scipy.ndimage import rotate 10 | from scipy import ndimage 11 | from sklearn.preprocessing import MultiLabelBinarizer 12 | 13 | def run(name,source,destination,split): 14 | x_train=[] 15 | x_test=[] 16 | train_imgname=[] 17 | y_train=[] 18 | y_test=[] 19 | test_imgname=[] 20 | 21 | _class_names=os.listdir(source) 22 | _class_names.sort() 23 | print (_class_names) 24 | _n2l={_class_names[i]:i for i in range(len(_class_names))} 25 | print (_n2l) 26 | 27 | 28 | if os.path.exists(destination): 29 | for i in range(len(_class_names)): 30 | images=os.listdir(source+"/"+_class_names[i]) 31 | count=0 32 | for y in images: 33 | src=source+"/"+_class_names[i]+"/"+y 34 | img=dicom.read_file(src) 35 | img=img.pixel_array 36 | img=imresize(img,(227,227)) 37 | x_train.append(img) 38 | y_train.append(_n2l[_class_names[i]]) 39 | train_imgname.append(y) 40 | count+=1 41 | print(count) 42 | print(_class_names[i]+" included in training.") 43 | 44 | #print(y_ts) 45 | train=list(zip(x_train,y_train,train_imgname)) 46 | 47 | #print(train) 48 | 49 | random.shuffle(train) 50 | 51 | x_train,y_train,train_imgname=zip(*train) 52 | 53 | x_train=np.array(x_train) 54 | y_tr=np.array(y_train) 55 | y_tr=MultiLabelBinarizer().fit_transform(y_tr.reshape(-1, 1)) 56 | train_imgname=np.array(train_imgname) 57 | 58 | d_train={} 59 | d_train['data']=x_train 60 | d_train['labels']=y_tr 61 | d_train['imgname']=train_imgname 62 | #print(d_train['labels']) 63 | 64 | with open(destination+'/'+name+'.further','wb') as f: 65 | pickle.dump(d_train,f) 66 | -------------------------------------------------------------------------------- /Preprocess/patientconvert.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | import pickle 5 | import dicom 6 | import math 7 | import numpy as np 8 | from scipy.misc import imresize 9 | from scipy.ndimage import rotate 10 | from scipy import ndimage 11 | from sklearn.preprocessing import MultiLabelBinarizer 12 | 13 | def run(name,source,destination,split): 14 | n=10 15 | l=256 16 | x_train=[] 17 | x_test=[] 18 | train_imgname=[] 19 | y_train=[] 20 | y_test=[] 21 | test_imgname=[] 22 | 23 | _class_names=os.listdir(source) 24 | _class_names.sort() 25 | print (len(_class_names)) 26 | _n2l={_class_names[i]:i for i in range(len(_class_names))} 27 | print (_n2l) 28 | 29 | 30 | if os.path.exists(destination): 31 | for i in range(len(_class_names)): 32 | images=os.listdir(source+"/"+_class_names[i]) 33 | ratio=math.floor(len(images)*split) 34 | count=0 35 | for y in images: 36 | src=source+"/"+_class_names[i]+"/"+y 37 | img=dicom.read_file(src) 38 | img=img.pixel_array 39 | img=imresize(img,(227,227)) 40 | x_train.append(img) 41 | y_train.append(_n2l[_class_names[i]]) 42 | 43 | train_imgname.append(y) 44 | count+=1 45 | print(count) 46 | print(_class_names[i]+" included in training.") 47 | 48 | 49 | #print(y_ts) 50 | train=list(zip(x_train,y_train,train_imgname)) 51 | 52 | #print(train) 53 | 54 | random.shuffle(train) 55 | 56 | x_train,y_train,train_imgname=zip(*train) 57 | 58 | x_train=np.array(x_train) 59 | y_tr=np.array(y_train) 60 | y_tr=MultiLabelBinarizer().fit_transform(y_tr.reshape(-1, 1)) 61 | print(y_tr) 62 | train_imgname=np.array(train_imgname) 63 | 64 | 65 | d_train={} 66 | d_train['data']=x_train 67 | d_train['labels']=y_tr 68 | d_train['imgname']=train_imgname 69 | #print(d_train['labels']) 70 | 71 | 72 | 73 | with open(destination+'/'+name+'.pat1','wb') as f: 74 | pickle.dump(d_train,f) 75 | 76 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Medical-Image-Classification-using-deep-learning 2 | 3 | Tumour is formed in human body by abnormal cell multiplication in the tissue. Early detection of tumors and classifying them to Benign and malignant tumours is important in order to prevent its further growth. MRI (Magnetic Resonance Imaging) is a medical imaging technique used by radiologists to study and analyse medical images. Doing critical analysis manually can create unnecessary delay and also the accuracy for the same will be very less due to human errors. The main objective of this project is to apply machine learning techniques to make systems capable enough to perform such critical analysis faster with higher accuracy and efficiency levels. This research work is been done on te existing architecture of convolution neural network which can identify the tumour from MRI image. The Convolution Neural Network was implemented using Keras and TensorFlow, accelerated by NVIDIA Tesla K40 GPU. Using REMBRANDT as the dataset for implementation, the Classification accuracy accuired for AlexNet and ZFNet are 63.56% and 84.42% respectively. 4 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "nb_classes":2, 3 | "epochs":1000, 4 | "batch_size":350, 5 | "weights":"any name of the file with (.h5) extention", 6 | "pre_train":"False", 7 | "shuffle":"True", 8 | "dataset_name":"Directory name where the labeled pre-processed dataset is kept", 9 | "raw_path":"The path to the dataset", 10 | "train_test_split":0.8, 11 | "validation_split":0.2, 12 | "data_path":"Directory name where the labeled pre-processed dataset is kept" 13 | } 14 | -------------------------------------------------------------------------------- /config2.json: -------------------------------------------------------------------------------- 1 | { 2 | "nb_classes":4, 3 | "epochs":1000, 4 | "batch_size":350, 5 | "weights":"any name of the file with (.h5) extention", 6 | "pre_train":"False", 7 | "shuffle":"True", 8 | "dataset_name":"Directory name where the labeled pre-processed dataset is kept", 9 | "raw_path":"The path to dataset", 10 | "train_test_split":0.8, 11 | "validation_split":0.2, 12 | "data_path":"Directory name where the labeled pre-processed dataset is kept" 13 | } 14 | -------------------------------------------------------------------------------- /further_classification.json: -------------------------------------------------------------------------------- 1 | { 2 | "nb_classes":4, 3 | "epochs":1000, 4 | "batch_size":350, 5 | "weights":"19aprilfurther.h5", 6 | "pre_train":"False", 7 | "shuffle":"True", 8 | "dataset_name":"brainn", 9 | "raw_path":"dataset", 10 | "train_test_split":0.8, 11 | "validation_split":0.2, 12 | "data_path":"brainn" 13 | } 14 | -------------------------------------------------------------------------------- /preprocessdata.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from Preprocess import patientconvert 3 | 4 | import numpy as np 5 | import pickle 6 | import json 7 | import os 8 | 9 | import sys 10 | 11 | 12 | def main(): 13 | path=sys.argv[1] 14 | with open(path) as f: 15 | config=json.load(f) 16 | 17 | if os.path.exists(config['data_path']): 18 | patientconvert.run(config['dataset_name'],config['raw_path'],config['data_path'],float(config['train_test_split'])) 19 | 20 | if __name__ == '__main__': 21 | if len(sys.argv)==1: 22 | print("Please include the config.json file path like this - python train.py config.json") 23 | else: 24 | main() 25 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from keras.optimizers import SGD 3 | import numpy as np 4 | import pickle 5 | import json 6 | #from keras import metrics 7 | #import os 8 | import sys 9 | from pandas_confusion import ConfusionMatrix 10 | 11 | from Models import new # model for first level classification 12 | from sklearn.preprocessing import MultiLabelBinarizer 13 | #from sklearn.metrics import classification_report 14 | import matplotlib.pyplot as plt 15 | import matplotlib 16 | #import brewer2mpl 17 | import numpy as np 18 | import pandas as pd 19 | from vis.visualization import visualize_saliency 20 | from vis.visualization import visualize_cam 21 | import cv2 22 | 23 | from keras.preprocessing.image import img_to_array 24 | from vis.utils.utils import stitch_images 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | def main(): 34 | path=sys.argv[1] 35 | with open(path) as f: 36 | config=json.load(f) 37 | batch_size=int(config['batch_size']) 38 | nb_classes=int(config['nb_classes']) 39 | weight_path=config['weights'] 40 | 41 | 42 | 43 | 44 | 45 | #####################First level of Classification ################################ 46 | 47 | ##### load model 48 | model=None 49 | 50 | model=new.load_model(nb_classes,weight_path) 51 | 52 | 53 | 54 | ####### specify the loss function 55 | sgd = SGD(lr=0.00005, decay = 1e-5, momentum=0.99, nesterov=True) 56 | #sgd = SGD(lr=0.00005, decay = 1e-6, momentum=0.9, nesterov=True) 57 | model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) 58 | #model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=[metrics.mae, metrics.sparse_categorical_accuracy]) 59 | 60 | 61 | 62 | ######## load data 63 | test={} 64 | with open(config['data_path']+'/'+config['dataset_name']+'.test','rb') as f: 65 | test=pickle.load(f) 66 | 67 | x_test,y_test,imgname=test['data'],test['labels'],test['imgname'] 68 | x_ts = x_test.reshape((-1,227,227,1)) 69 | 70 | print(x_ts.shape, 'test samples') 71 | print(y_test.shape, 'test sample labels') 72 | 73 | 74 | 75 | ##### evalution and prediction and confusion matrix formation 76 | scores=model.evaluate(x_ts,y_test,batch_size=batch_size,verbose=0) 77 | print("model %s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) 78 | prediction= model.predict_classes(x_ts,verbose=1) 79 | #print(prediction) 80 | np.save('prediction.npy', prediction) 81 | pre=np.array(prediction) 82 | pre=MultiLabelBinarizer().fit_transform(pre.reshape(-1, 1)) 83 | orig=y_test 84 | print('') 85 | print('') 86 | print('score for first level classification: ',scores) 87 | ''' 88 | count = 0 89 | for i in range(0,len(pre)): 90 | if not np.array_equal(orig[i],pre[i]): 91 | print(imgname[i],"_",orig[i],"_",pre[i],"_False") 92 | count = count + 1 93 | print (count) 94 | ''' 95 | aa=[0,1] 96 | aa = np.array(aa) 97 | print('') 98 | print('') 99 | print(MultiLabelBinarizer().fit_transform(aa.reshape(-1, 1))) 100 | print("0-Nontumor 1-Tumor") 101 | a=[0,1] 102 | a=np.array(a) 103 | b=[1,0] 104 | b=np.array(b) 105 | y_true = [] 106 | y_pred = [] 107 | print(range(len(prediction))) 108 | 109 | for i in range(len(prediction)): 110 | if np.array_equal(orig[i],a): 111 | y_true.append(1) 112 | elif np.array_equal(orig[i],b): 113 | y_true.append(0) 114 | 115 | for i in range(len(prediction)): 116 | if np.array_equal(pre[i],a): 117 | y_pred.append(1) 118 | elif np.array_equal(pre[i],b): 119 | y_pred.append(0) 120 | 121 | cm = ConfusionMatrix(y_true, y_pred) 122 | print('') 123 | print('') 124 | print('*****************************Confusion Matrix for first level Classification****************************') 125 | print(cm) 126 | 127 | print('') 128 | 129 | print('') 130 | 131 | 132 | 133 | 134 | ############################ Second level Classification ############################### 135 | ''' 136 | path=sys.argv[2] 137 | with open(path) as f: 138 | config2=json.load(f) 139 | batch_size2=int(config2['batch_size']) 140 | nb_classes2=int(config2['nb_classes']) 141 | weight_path2=config2['weights'] 142 | 143 | 144 | ##### load model2 145 | model2=None 146 | 147 | model2=new.load_model(nb_classes2,weight_path2) 148 | 149 | 150 | 151 | ####### specify the loss function 152 | sgd2 = SGD(lr=0.00005, decay = 1e-6, momentum=0.9, nesterov=True) 153 | model2.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) 154 | #model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=[metrics.mae, metrics.sparse_categorical_accuracy]) 155 | 156 | 157 | 158 | ######## load data 159 | test={} 160 | with open(config2['data_path']+'/'+config2['dataset_name']+'.further','rb') as f: 161 | test2=pickle.load(f) 162 | 163 | x_test2,y_test2,imgname2=test2['data'],test2['labels'],test2['imgname'] 164 | x_ts2 = x_test2.reshape((-1,227,227,1)) 165 | 166 | 167 | 168 | count=0 169 | tumorname = [] 170 | for i in range(0,len(pre)): 171 | if np.array_equal(pre[i],a): 172 | tumorname.append(imgname[i]) 173 | count+=1 174 | 175 | print(count) 176 | print(len(tumorname)) 177 | print(len(imgname2)) 178 | tumor = [] 179 | tumorlabels = [] 180 | count=0 181 | count1=0 182 | for i in range(len(tumorname)): 183 | for j in range(len(imgname2)): 184 | if tumorname[i] == imgname2[j]: 185 | tumor.append(x_ts2[j]) 186 | tumorlabels.append(y_test2[j]) 187 | count+=1 188 | 189 | print(count) 190 | 191 | tumor = np.array(tumor) 192 | tumorlabels = np.array(tumorlabels) 193 | overview(0,207, tumor) 194 | 195 | print(tumor.shape, ' predicted tumor samples') 196 | print(tumorlabels.shape, 'predicted tumor sample labels') 197 | print('') 198 | 199 | 200 | ##### evalution and prediction and confusion matrix formation 201 | scores2=model2.evaluate(tumor,tumorlabels,batch_size=batch_size2,verbose=0) 202 | print("model2 %s: %.2f%%" % (model2.metrics_names[1], scores2[1]*100)) 203 | prediction2= model2.predict_classes(tumor,verbose=1) 204 | #print(prediction) 205 | np.save('prediction2.npy', prediction) 206 | pre2=np.array(prediction2) 207 | pre2=MultiLabelBinarizer().fit_transform(pre2.reshape(-1, 1)) 208 | orig2=tumorlabels 209 | print('') 210 | print('') 211 | print('score for second level classification',scores2) 212 | count2 = 0 213 | aa=[0,1,2,3] 214 | aa = np.array(aa) 215 | print('') 216 | print(aa,MultiLabelBinarizer().fit_transform(aa.reshape(-1, 1))) 217 | print("0-astrocytoma 1-gbm 2- mixed 3- oligodendroglioma") 218 | print('') 219 | a=[1,0,0,0] 220 | b=[0,1,0,0] 221 | c=[0,0,1,0] 222 | d=[0,0,0,1] 223 | 224 | a=np.array(a) 225 | b=np.array(b) 226 | c=np.array(c) 227 | d=np.array(d) 228 | y_true2 = [] 229 | y_pred2 = [] 230 | 231 | 232 | for i in range(len(prediction2)): 233 | if np.array_equal(orig2[i],a): 234 | y_true2.append(0) 235 | elif np.array_equal(orig2[i],b): 236 | y_true2.append(1) 237 | elif np.array_equal(orig2[i],c): 238 | y_true2.append(2) 239 | elif np.array_equal(orig2[i],d): 240 | y_true2.append(3) 241 | 242 | for i in range(len(prediction2)): 243 | if np.array_equal(pre2[i],a): 244 | y_pred2.append(0) 245 | elif np.array_equal(pre2[i],b): 246 | y_pred2.append(1) 247 | elif np.array_equal(pre2[i],c): 248 | y_pred2.append(2) 249 | elif np.array_equal(pre2[i],d): 250 | y_pred2.append(3) 251 | 252 | cm2 = ConfusionMatrix(y_true2, y_pred2) 253 | print('') 254 | print('*****************************Confusion Matrix for SECOND level Classification****************************') 255 | print(cm2) 256 | # cm2.print_stats() 257 | print('') 258 | 259 | 260 | counter=0 261 | for i in range(len(pre2)): 262 | if np.array_equal(pre2[i],a): 263 | #print(tumorname[i],'__',a,'__astrocytoma') 264 | counter+=1 265 | 266 | print('') 267 | print(counter,'__astrocytoma__images') 268 | counter=0 269 | for i in range(len(pre2)): 270 | if np.array_equal(pre2[i],b): 271 | #print(tumorname[i],'__',b,'__gbm') 272 | counter+=1 273 | 274 | print('') 275 | print(counter,'__gbm__images') 276 | counter=0 277 | for i in range(len(pre2)): 278 | if np.array_equal(pre2[i],c): 279 | #print(tumorname[i],'__',c,'__mixed') 280 | counter+=1 281 | 282 | print('') 283 | print(counter,'__mixed__images') 284 | counter=0 285 | for i in range(len(pre2)): 286 | if np.array_equal(pre2[i],d): 287 | #print(tumorname[i],'__',d,'__oligodendroglioma') 288 | counter+=1 289 | 290 | print('') 291 | print(counter,'__oligodendroglioma__images') 292 | 293 | # layer_name = 'predictions' 294 | # layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0] 295 | 296 | # Images corresponding to tiger, penguin, dumbbell, speedboat, spider 297 | 298 | 299 | # heatmaps = [] 300 | # for img in tumor: 301 | # # Predict the corresponding class for use in `visualize_saliency`. 302 | # pred_class = np.argmax(model.predict(np.array([img_to_array(img)]))) 303 | 304 | # # Here we are asking it to show attention such that prob of `pred_class` is maximized. 305 | # heatmap = visualize_saliency(model, layer_idx, [pred_class], seed_img, text=tumorlabels) 306 | # heatmaps.append(heatmap) 307 | 308 | # cv2.imwrite('predictions.png',stitch_images(heatmaps)) 309 | ''' 310 | 311 | if __name__ == '__main__': 312 | if len(sys.argv)==1: 313 | print("Please include the config.json file path like this - python train.py config.json") 314 | else: 315 | main() -------------------------------------------------------------------------------- /trainvis.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from keras.optimizers import SGD 3 | from keras.callbacks import TensorBoard 4 | from keras.callbacks import ModelCheckpoint 5 | from keras.callbacks import Callback 6 | import numpy as np 7 | import pickle 8 | import json 9 | import h5py 10 | import os 11 | import matplotlib.pyplot as plt 12 | import matplotlib.pyplot as plt1 13 | from keras import metrics 14 | import sys 15 | import math 16 | 17 | from Models import new 18 | from Models import ZFNet 19 | 20 | def main(): 21 | path=sys.argv[1] 22 | with open(path) as f: 23 | config=json.load(f) 24 | nb_epochs=int(config['epochs']) 25 | batch_size=int(config['batch_size']) 26 | nb_classes=int(config['nb_classes']) 27 | pre_train=None 28 | #tbCallBack = TensorBoard(log_dir='Graph', histogram_freq=0, write_graph=True, write_images=True) 29 | if config['pre_train']=='True': 30 | pre_train=True 31 | else: 32 | pre_train=False 33 | shuffle=None 34 | if config['shuffle']=='True': 35 | shuffle=True 36 | else: 37 | shuffle=False 38 | weight_path=config['weights'] 39 | model=None 40 | if pre_train: 41 | model=new.load_model(nb_classes,weight_path) 42 | else: 43 | model=new.load_model(nb_classes) 44 | 45 | sgd = SGD(lr=0.00005, decay = 1e-5, momentum=0.99, nesterov=True) # 1e-6 = 10^-6 46 | model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) 47 | #model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=[metrics.mae, metrics.sparse_categorical_accuracy]) 48 | 49 | ###################################for first level calculation ####################### 50 | train={} 51 | with open(config['data_path']+'/'+config['dataset_name']+'.train','rb') as f: 52 | train=pickle.load(f) 53 | 54 | ####################################################################################### 55 | 56 | ############ for second level classification######################### 57 | # train={} 58 | # with open(config['data_path']+'/'+config['dataset_name']+'.further','rb') as f: 59 | # train=pickle.load(f) 60 | 61 | ################################################################################### 62 | 63 | x_train,y_train,imgname=train['data'],train['labels'],train['imgname'] 64 | x_train = x_train.reshape((-1,227,227,1)) 65 | #y_train=y_train.reshape((-1,1)) 66 | #print(y_train) 67 | 68 | 69 | class LossHistory(Callback): 70 | def on_train_begin(self, logs={}): 71 | self.losses = [] 72 | 73 | def on_batch_end(self, batch, logs={}): 74 | self.losses.append(logs.get('loss')) 75 | 76 | 77 | print(x_train.shape, 'train samples') 78 | print(y_train.shape, 'train sample labels') 79 | #https://gis.stackexchange.com/questions/72458/export-list-of-values-into-csv-or-txt-file 80 | checkpointer = ModelCheckpoint(filepath=config['weights'], verbose=1, save_best_only=True) 81 | fit_1=model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=nb_epochs, validation_split=config['validation_split'], shuffle=shuffle, verbose=1, callbacks=[checkpointer]) #,callbacks=tbCallback) 82 | print(history.losses) 83 | 84 | 85 | # import csv 86 | # csvfile = "history.csv" 87 | 88 | #Assuming res is a flat list 89 | # with open(csvfile, "w") as output: 90 | # writer = csv.writer(output, lineterminator='\n') 91 | # for val in fit_1: 92 | # writer.writerow([val]) 93 | 94 | 95 | #model.save_weights(config['weights']) 96 | #tbCallback.set_model(model) 97 | print(fit_1.history.keys()) 98 | #print(fit_1.history.keys()) 99 | # summarize history for accuracy 100 | plt.plot(fit_1.history['acc']) 101 | plt.plot(fit_1.history['val_acc']) 102 | plt.title('model accuracy') 103 | plt.ylabel('accuracy') 104 | plt.xlabel('epoch') 105 | plt.legend(['train', 'test'], loc='upper left') 106 | plt.figure(0) 107 | plt1.plot(fit_1.history['loss']) 108 | plt1.plot(fit_1.history['val_loss']) 109 | plt1.title('model loss') 110 | plt1.ylabel('loss') 111 | plt1.xlabel('epoch') 112 | plt1.legend(['train', 'test'], loc='upper left') 113 | plt.show() 114 | plt1.show() 115 | 116 | if __name__ == '__main__': 117 | if len(sys.argv)==1: 118 | print("Please include the config.json file path like this - python train.py config.json") 119 | else: 120 | main() 121 | --------------------------------------------------------------------------------