├── fig ├── figure_10.png ├── knn_gabor.png ├── roc_curve.png ├── svm_gabor.png ├── dim_result.png ├── figure_13_a.png ├── figure_13_b.png ├── roc_curve1.png ├── svm_reduce.png ├── figure_reduce.png ├── distance_result.png ├── svm_cnn_layer13.png └── svm_cnn_layer3_9.png ├── code ├── CNN_feature │ ├── __pycache__ │ │ ├── res.cpython-36.pyc │ │ ├── ResNet.cpython-36.pyc │ │ ├── res11.cpython-36.pyc │ │ ├── DenseNet.cpython-36.pyc │ │ ├── cnn_feature.cpython-36.pyc │ │ ├── densenet11.cpython-36.pyc │ │ ├── inceptionv4.cpython-36.pyc │ │ ├── resnet_utils.cpython-36.pyc │ │ └── inception_utils.cpython-36.pyc │ ├── inception_utils.py │ ├── cnn_feature.py │ ├── ResNet.py │ ├── iris_demo1.py │ ├── DenseNet.py │ ├── resnet_utils.py │ ├── inceptionv4.py │ └── 1.py ├── tradition │ ├── __pycache__ │ │ ├── Gabor.cpython-36.pyc │ │ ├── copy.cpython-36.pyc │ │ ├── Matching.cpython-36.pyc │ │ ├── Evaluation.cpython-36.pyc │ │ ├── Enhancement.cpython-36.pyc │ │ ├── Normalization.cpython-36.pyc │ │ └── Segmentation.cpython-36.pyc │ ├── Enhancement.py │ ├── Normalization.py │ ├── Segmentation.py │ ├── Gabor.py │ ├── Evaluation.py │ ├── iris_demo2.py │ └── Matching.py ├── CNN_classifier │ ├── __pycache__ │ │ ├── utils.cpython-36.pyc │ │ └── DenseNet.cpython-36.pyc │ ├── utils.py │ ├── eval.py │ ├── DenseNet.py │ └── train.py ├── script │ ├── copy_pic.py │ ├── GetVector.py │ ├── GetPic.py │ ├── GetList.py │ └── scirpt.py ├── Iris.py └── tensorflow-iris.py ├── Iris.py ├── README.md └── csv ├── train1.csv └── test1.csv /fig/figure_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/figure_10.png -------------------------------------------------------------------------------- /fig/knn_gabor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/knn_gabor.png -------------------------------------------------------------------------------- /fig/roc_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/roc_curve.png -------------------------------------------------------------------------------- /fig/svm_gabor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/svm_gabor.png -------------------------------------------------------------------------------- /fig/dim_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/dim_result.png -------------------------------------------------------------------------------- /fig/figure_13_a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/figure_13_a.png -------------------------------------------------------------------------------- /fig/figure_13_b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/figure_13_b.png -------------------------------------------------------------------------------- /fig/roc_curve1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/roc_curve1.png -------------------------------------------------------------------------------- /fig/svm_reduce.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/svm_reduce.png -------------------------------------------------------------------------------- /fig/figure_reduce.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/figure_reduce.png -------------------------------------------------------------------------------- /fig/distance_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/distance_result.png -------------------------------------------------------------------------------- /fig/svm_cnn_layer13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/svm_cnn_layer13.png -------------------------------------------------------------------------------- /fig/svm_cnn_layer3_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/fig/svm_cnn_layer3_9.png -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/res.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/res.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/__pycache__/Gabor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/tradition/__pycache__/Gabor.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/__pycache__/copy.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/tradition/__pycache__/copy.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/ResNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/ResNet.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/res11.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/res11.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/__pycache__/Matching.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/tradition/__pycache__/Matching.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_classifier/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_classifier/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/DenseNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/DenseNet.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/__pycache__/Evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/tradition/__pycache__/Evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_classifier/__pycache__/DenseNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_classifier/__pycache__/DenseNet.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/cnn_feature.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/cnn_feature.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/densenet11.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/densenet11.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/inceptionv4.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/inceptionv4.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/__pycache__/Enhancement.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/tradition/__pycache__/Enhancement.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/__pycache__/Normalization.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/tradition/__pycache__/Normalization.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/__pycache__/Segmentation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/tradition/__pycache__/Segmentation.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/resnet_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/resnet_utils.cpython-36.pyc -------------------------------------------------------------------------------- /code/CNN_feature/__pycache__/inception_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Linchunhui/Iris_Recognition/HEAD/code/CNN_feature/__pycache__/inception_utils.cpython-36.pyc -------------------------------------------------------------------------------- /code/tradition/Enhancement.py: -------------------------------------------------------------------------------- 1 | from skimage.filters.rank import equalize 2 | from skimage.morphology import disk 3 | import numpy as np 4 | 5 | 6 | def ImageEnhancement(normalized_iris): 7 | row = 64 8 | col = 512 9 | normalized_iris = normalized_iris.astype(np.uint8) 10 | 11 | enhanced_image = normalized_iris 12 | 13 | enhanced_image = equalize(enhanced_image, disk(32)) 14 | 15 | roi = enhanced_image[0:48, :] 16 | return roi 17 | -------------------------------------------------------------------------------- /code/CNN_classifier/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import pandas as pd 3 | def get_train_list(): 4 | #dir="D:/study/iris/process_data/rectangle/train/" 5 | train = pd.read_csv("D:/study/iris/csv/train.csv") 6 | img_list = train['img'].apply(lambda x:"D:/study/iris/CASIA/norm_512/train/"+x.split('\\')[-1].split('.')[0]+".jpg") 7 | print(img_list) 8 | label_list = train['label'] 9 | return img_list, label_list 10 | 11 | def get_test_list(): 12 | #dir="D:/study/iris/process_data/rectangle/train/" 13 | test = pd.read_csv("D:/study/iris/csv/test.csv") 14 | img_list = test['img'].apply(lambda x:"D:/study/iris/CASIA/norm_512/test/"+x.split('\\')[-1].split('.')[0]+".jpg") 15 | label_list = test['label'] 16 | return img_list, label_list 17 | 18 | def get_batch(image, label, batch_size, capacity): 19 | image = tf.cast(image, tf.string) 20 | label = tf.cast(label, tf.int32) 21 | # make an input queue 22 | input_queue = tf.train.slice_input_producer([image, label], shuffle=False) 23 | label = input_queue[1] 24 | image_contents = tf.read_file(input_queue[0]) 25 | image = tf.image.decode_jpeg(image_contents, channels=1) 26 | image = tf.image.resize_images(image,(64,64)) 27 | 28 | image_batch, label_batch = tf.train.batch([image, label], 29 | batch_size=batch_size, 30 | num_threads=64, 31 | capacity=capacity) 32 | #tf.summary.image("input_img", image_batch, max_outputs=5) 33 | label_batch = tf.reshape(label_batch, [batch_size]) 34 | image_batch = tf.cast(image_batch, tf.float32) 35 | return image_batch, label_batch 36 | if __name__=="__main__": 37 | get_train_list() -------------------------------------------------------------------------------- /code/script/copy_pic.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import shutil 3 | import os 4 | 5 | train = pd.read_csv("D:/study/iris/csv/train.csv") 6 | test = pd.read_csv("D:/study/iris/csv/test.csv") 7 | 8 | train_img_list = train["img"] 9 | train_label_list = train["label"] 10 | test_img_list = test["img"] 11 | test_label_list = test["label"] 12 | train_size = len(train_img_list) 13 | test_size = len(test_img_list) 14 | copy_dir1="D:/study/iris/CASIA/origin/train" 15 | copy_dir2="D:/study/iris/CASIA/origin/test" 16 | 17 | for i in range(train_size): 18 | train_path=train_img_list[i] 19 | #print(train_path) 20 | img_name=train_path.split('\\')[-1] 21 | copy_path1=os.path.join(copy_dir1,img_name) 22 | print(train_path,copy_path1) 23 | shutil.copy(train_path,copy_path1) 24 | for i in range(test_size): 25 | test_path = test_img_list[i] 26 | # print(train_path) 27 | img_name = test_path.split('\\')[-1] 28 | copy_path2 = os.path.join(copy_dir2, img_name) 29 | shutil.copy(test_path,copy_path2) 30 | 31 | '''img = cv2.imread(train_path, 0) 32 | iris, pupil = IrisLocalization(img) 33 | #cv2.circle(img, (iris[0], iris[1]), iris[2], (0, 0, 255), 1) 34 | #cv2.circle(img, (pupil[0], pupil[1]), pupil[2], (0, 255, 0), 1) 35 | #cv2.imwrite("D:/study/iris/process_data/circle/train/{}.jpg".format(img_name),img) 36 | normalized = IrisNormalization(img, pupil, iris) 37 | cv2.imwrite("D:/study/iris/process_data/rectangle/train/{}.jpg".format(img_name),normalized) 38 | ROI = ImageEnhancement(normalized) 39 | cv2.imwrite("D:/study/iris/process_data/enhancement/train/{}.jpg".format(img_name), ROI) 40 | #train_features[i, :] = FeatureExtraction(ROI) 41 | #train_classes[i] = train_label_list[i] 42 | print('train_feature:',train_img_list[i],train_label_list[i],'{}/{}'.format(i,train_size))''' -------------------------------------------------------------------------------- /code/CNN_classifier/eval.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from utils import * 4 | from DenseNet import densenet 5 | import cv2 6 | #net 7 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" # gpu编号 8 | config = tf.ConfigProto() 9 | config.gpu_options.allow_growth = True # 设置最小gpu使用量 10 | 11 | from PIL import ImageFile 12 | 13 | ImageFile.LOAD_TRUNCATED_IMAGES = True 14 | try: 15 | import urllib2 as urllib 16 | except ImportError: 17 | import urllib.request as urllib 18 | 19 | log_train_dir="D:/study/iris/code/CNN/log/train6/model.ckpt-43749" 20 | 21 | def main(): 22 | init_tf(log_train_dir, 1000) 23 | func1() 24 | 25 | def init_tf(logs_train_dir,N_CLASSES): 26 | global sess, pred, x 27 | # process image 28 | x = tf.placeholder(tf.float32, shape=[64, 64]) 29 | #x_norm = tf.image.per_image_standardization(x) 30 | x_4d = tf.reshape(x, [-1, 64, 64, 1]) 31 | # predict 32 | logits = densenet(x_4d,1.0,N_CLASSES) 33 | print("logit", np.shape(logits)) 34 | pred = tf.nn.softmax(logits) 35 | 36 | saver = tf.train.Saver() 37 | sess = tf.Session() 38 | saver.restore(sess, logs_train_dir) 39 | print('load model done...') 40 | 41 | 42 | def evaluate_image2(img_path,size): 43 | #img = image.load_img(img_path, target_size=(size, size)) 44 | image = cv2.imread(img_path) 45 | img = image[:, :, 0] 46 | img = cv2.resize(img, (size,size)) 47 | image_array = np.array(img).reshape(size,size) 48 | # image_array = image.img_to_array(img) 49 | prediction = sess.run(pred, feed_dict={x: image_array}) 50 | return prediction 51 | 52 | def func1(size=64): 53 | count=0 54 | test_img, test_label=get_test_list() 55 | for i in range(len(test_img)): 56 | pred = evaluate_image2(test_img[i], size) 57 | index = np.argmax(pred) 58 | print(test_img[i],index,test_label[i]) 59 | if index+1==test_label[i]: 60 | count+=1 61 | #print(count) 62 | print((count/len(test_img))*1.00) 63 | if __name__ == "__main__": 64 | main() -------------------------------------------------------------------------------- /code/tradition/Normalization.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import cv2 4 | from PIL import Image 5 | from skimage.filters.rank import equalize 6 | from skimage.morphology import disk 7 | 8 | def IrisNormalization(image, inner_circle, outer_circle): 9 | localized_img = image 10 | row = 64 11 | col = 512 12 | normalized_iris = np.zeros(shape=(64, 512)) 13 | inner_y = inner_circle[0] # height 14 | inner_x = inner_circle[1] # width 15 | outer_y = outer_circle[0] 16 | outer_x = outer_circle[1] 17 | angle = 2.0 * math.pi / col 18 | inner_boundary_x = np.zeros(shape=(1, col)) 19 | inner_boundary_y = np.zeros(shape=(1, col)) 20 | outer_boundary_x = np.zeros(shape=(1, col)) 21 | outer_boundary_y = np.zeros(shape=(1, col)) 22 | for j in range(col): 23 | inner_boundary_x[0][j] = inner_circle[0] + inner_circle[2] * math.cos(angle * (j)) 24 | inner_boundary_y[0][j] = inner_circle[1] + inner_circle[2] * math.sin(angle * (j)) 25 | 26 | outer_boundary_x[0][j] = outer_circle[0] + outer_circle[2] * math.cos(angle * (j)) 27 | outer_boundary_y[0][j] = outer_circle[1] + outer_circle[2] * math.sin(angle * (j)) 28 | 29 | for j in range(512): 30 | for i in range(64): 31 | normalized_iris[i][j] = localized_img[min(int(int(inner_boundary_y[0][j]) 32 | + (int(outer_boundary_y[0][j]) - int( 33 | inner_boundary_y[0][j])) * (i / 64.0)), localized_img.shape[0] - 1)][min(int(int(inner_boundary_x[0][j]) 34 | + (int( 35 | outer_boundary_x[0][j]) - int(inner_boundary_x[0][j])) 36 | * (i / 64.0)), 37 | localized_img.shape[1] - 1)] 38 | 39 | res_image = 255 - normalized_iris 40 | return res_image 41 | 42 | 43 | -------------------------------------------------------------------------------- /code/script/GetVector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import numpy as np 5 | import cv2 6 | from Segmentation import IrisLocalization 7 | from Normalization import IrisNormalization 8 | from Enhancement import ImageEnhancement 9 | from Gabor import FeatureExtraction 10 | import datetime 11 | import pandas as pd 12 | 13 | train = pd.read_csv("D:/study/iris/csv/train1.csv") 14 | test = pd.read_csv("D:/study/iris/csv/test1.csv") 15 | 16 | train_img_list = train["img"] 17 | train_label_list = train["label"] 18 | test_img_list = test["img"] 19 | test_label_list = test["label"] 20 | train_size = len(train_img_list) 21 | test_size = len(test_img_list) 22 | 23 | train_features = np.zeros((train_size,1536)) 24 | train_classes = np.zeros(train_size, dtype = np.uint8) 25 | test_features = np.zeros((test_size,1536)) 26 | test_classes = np.zeros(test_size, dtype = np.uint8) 27 | 28 | starttime = datetime.datetime.now() 29 | 30 | for i in range(train_size): 31 | train_path=train_img_list[i] 32 | img = cv2.imread(train_path, 0) 33 | iris, pupil = IrisLocalization(img) 34 | normalized = IrisNormalization(img, pupil, iris) 35 | ROI = ImageEnhancement(normalized) 36 | train_features[i, :] = FeatureExtraction(ROI) 37 | train_classes[i] = train_label_list[i] 38 | print('train_feature:',train_img_list[i],train_label_list[i],'{}/{}'.format(i,train_size)) 39 | 40 | for j in range(test_size): 41 | test_path=test_img_list[j] 42 | img = cv2.imread(test_path, 0) 43 | iris, pupil = IrisLocalization(img) 44 | normalized = IrisNormalization(img, pupil, iris) 45 | ROI = ImageEnhancement(normalized) 46 | test_features[j, :] = FeatureExtraction(ROI) 47 | test_classes[j] = test_label_list[j] 48 | print('test_feature:', test_img_list[j], test_label_list[j], '{}/{}'.format(j, test_size)) 49 | 50 | endtime = datetime.datetime.now() 51 | 52 | print('image processing and feature extraction takes '+str((endtime-starttime).seconds)+' seconds') 53 | train_vector=pd.DataFrame(train_features) 54 | #print(train_vector) 55 | train_vector.to_csv("D:/study/iris/csv/train_vector1.csv", index=False, encoding="utf-8") 56 | test_vector=pd.DataFrame(test_features) 57 | #print(train_vector) 58 | test_vector.to_csv("D:/study/iris/csv/test_vector1.csv", index=False, encoding="utf-8") 59 | -------------------------------------------------------------------------------- /code/tradition/Segmentation.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | import numpy as np 5 | import cv2 6 | from skimage.transform import hough_circle, hough_circle_peaks 7 | 8 | 9 | def IrisLocalization(eye): 10 | blured = cv2.bilateralFilter(eye, 9, 100, 100) 11 | Xp = blured.sum(axis=0).argmin() 12 | Yp = blured.sum(axis=1).argmin() 13 | x = blured[max(Yp - 60, 0):min(Yp + 60, 280), max(Xp - 60, 0):min(Xp + 60, 320)].sum(axis=0).argmin() 14 | y = blured[max(Yp - 60, 0):min(Yp + 60, 280), max(Xp - 60, 0):min(Xp + 60, 320)].sum(axis=1).argmin() 15 | Xp = max(Xp - 60, 0) + x 16 | Yp = max(Yp - 60, 0) + y 17 | if Xp >= 100 and Yp >= 80: 18 | blur = cv2.GaussianBlur(eye[Yp - 60:Yp + 60, Xp - 60:Xp + 60], (5, 5), 0) 19 | pupil_circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, dp=1.2, minDist=200, param1=200, param2=12, 20 | minRadius=15, maxRadius=80) 21 | xp, yp, rp = np.round(pupil_circles[0][0]).astype("int") 22 | xp = Xp - 60 + xp 23 | yp = Yp - 60 + yp 24 | else: 25 | pupil_circles = cv2.HoughCircles(blured, cv2.HOUGH_GRADIENT, 4, 280, minRadius=25, maxRadius=55, param2=51) 26 | xp, yp, rp = np.round(pupil_circles[0][0]).astype("int") 27 | eye_copy = eye.copy() 28 | rp = rp + 7 # slightly enlarge the pupil radius makes a better result 29 | blured_copy = cv2.medianBlur(eye_copy, 11) 30 | blured_copy = cv2.medianBlur(blured_copy, 11) 31 | blured_copy = cv2.medianBlur(blured_copy, 11) 32 | eye_edges = cv2.Canny(blured_copy, threshold1=15, threshold2=30, L2gradient=True) 33 | eye_edges[:, xp - rp - 30:xp + rp + 30] = 0 34 | 35 | hough_radii = np.arange(rp + 45, 150, 2) 36 | hough_res = hough_circle(eye_edges, hough_radii) 37 | accums, xi, yi, ri = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=1) 38 | iris = [] 39 | iris.extend(xi) 40 | iris.extend(yi) 41 | iris.extend(ri) 42 | if ((iris[0] - xp) ** 2 + (iris[1] - yp) ** 2) ** 0.5 > rp * 0.3: 43 | iris[0] = xp 44 | iris[1] = yp 45 | return np.array(iris), np.array([xp, yp, rp]) 46 | 47 | 48 | if __name__=="__main__": 49 | eye = r"D:/study/iris/CASIA-Iris-Thousand\779\R\S5779R00.jpg" 50 | img = cv2.imread(eye, 0) 51 | # a,b=IrisLocalization(img) 52 | a1, b1 = IrisLocalization(img) 53 | -------------------------------------------------------------------------------- /code/tradition/Gabor.py: -------------------------------------------------------------------------------- 1 | from scipy import ndimage 2 | import math 3 | import numpy as np 4 | 5 | 6 | ###this is feature extraction and works 7 | # defined filter 8 | def defined_filter(x, y, f): 9 | m1 = math.cos(2 * math.pi * f * math.sqrt(x ** 2 * y ** 2)) 10 | return m1 11 | 12 | 13 | space_constant_x1 = 3 14 | space_constant_x2 = 4.5 15 | space_constant_y = 1.5 16 | 17 | f1 = 0.1 18 | f2 = 0.07 19 | x1 = range(-9, 10, 1) 20 | x2 = range(-14, 15, 1) 21 | y = range(-5, 6, 1) 22 | 23 | 24 | def gabor_filter(x, y, space_constant_x, space_constant_y, f): 25 | m1 = defined_filter(x, y, f) 26 | return (1 / (2 * math.pi * space_constant_x * space_constant_y) 27 | * np.exp(-1 / 2 * (x ** 2 / (space_constant_x ** 2) + y ** 2 / (space_constant_y ** 2)))) * m1 28 | 29 | 30 | def FeatureExtraction(roi): 31 | filter1 = [] 32 | filter2 = [] 33 | f1 = 0.1 34 | f2 = 0.07 35 | x1 = range(-9, 10, 1) 36 | x2 = range(-14, 15, 1) 37 | y = range(-5, 6, 1) 38 | space_constant_x1 = 3 39 | space_constant_x2 = 4.5 40 | space_constant_y = 1.5 41 | for j in range(len(y)): 42 | for i in range(len(x1)): 43 | cell_1 = gabor_filter(x1[i], y[j], space_constant_x1, space_constant_y, f1) 44 | filter1.append(cell_1) 45 | for k in range(len(x2)): 46 | cell_2 = gabor_filter(x2[k], y[j], space_constant_x2, space_constant_y, f2) 47 | filter2.append(cell_2) 48 | filter1 = np.reshape(filter1, (len(y), len(x1))) 49 | filter2 = np.reshape(filter2, (len(y), len(x2))) 50 | 51 | filtered_eye1 = ndimage.convolve(roi, np.real(filter1), mode='wrap', cval=0) 52 | filtered_eye2 = ndimage.convolve(roi, np.real(filter2), mode='wrap', cval=0) 53 | 54 | vector = [] 55 | i = 0 56 | while i < roi.shape[0]: 57 | j = 0 58 | while j < roi.shape[1]: 59 | mean1 = filtered_eye1[i:i + 8, j:j + 8].mean() 60 | mean2 = filtered_eye2[i:i + 8, j:j + 8].mean() 61 | AAD1 = abs(filtered_eye1[i:i + 8, j:j + 8] - mean1).mean() 62 | AAD2 = abs(filtered_eye2[i:i + 8, j:j + 8] - mean2).mean() 63 | 64 | vector.append(mean1) 65 | vector.append(AAD1) 66 | vector.append(mean2) 67 | vector.append(AAD2) 68 | j = j + 8 69 | i = i + 8 70 | vector = np.array(vector) 71 | return vector 72 | 73 | -------------------------------------------------------------------------------- /code/script/GetPic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import numpy as np 5 | import cv2 6 | from Segmentation import IrisLocalization 7 | from Normalization import IrisNormalization 8 | from Enhancement import ImageEnhancement 9 | from Gabor import FeatureExtraction 10 | import datetime 11 | import pandas as pd 12 | 13 | train = pd.read_csv("D:/study/iris/csv/train1.csv") 14 | test = pd.read_csv("D:/study/iris/csv/test1.csv") 15 | 16 | train_img_list = train["img"] 17 | train_label_list = train["label"] 18 | test_img_list = test["img"] 19 | test_label_list = test["label"] 20 | train_size = len(train_img_list) 21 | test_size = len(test_img_list) 22 | 23 | train_features = np.zeros((train_size,1536)) 24 | train_classes = np.zeros(train_size, dtype = np.uint8) 25 | test_features = np.zeros((test_size,1536)) 26 | test_classes = np.zeros(test_size, dtype = np.uint8) 27 | 28 | starttime = datetime.datetime.now() 29 | 30 | '''for i in range(train_size): 31 | train_path=train_img_list[i] 32 | #print(train_path) 33 | img_name=train_path.split('\\')[-1].split('.')[0] 34 | img = cv2.imread(train_path, 0) 35 | iris, pupil = IrisLocalization(img) 36 | #cv2.circle(img, (iris[0], iris[1]), iris[2], (0, 0, 255), 1) 37 | #cv2.circle(img, (pupil[0], pupil[1]), pupil[2], (0, 255, 0), 1) 38 | #cv2.imwrite("D:/study/iris/process_data/circle/train/{}.jpg".format(img_name),img) 39 | normalized = IrisNormalization(img, pupil, iris) 40 | cv2.imwrite("D:/study/iris/process_data/rectangle/train/{}.jpg".format(img_name),normalized) 41 | ROI = ImageEnhancement(normalized) 42 | cv2.imwrite("D:/study/iris/process_data/enhancement/train/{}.jpg".format(img_name), ROI) 43 | #train_features[i, :] = FeatureExtraction(ROI) 44 | #train_classes[i] = train_label_list[i] 45 | print('train_feature:',train_img_list[i],train_label_list[i],'{}/{}'.format(i,train_size))''' 46 | 47 | for j in range(test_size): 48 | test_path=test_img_list[j] 49 | img_name = test_path.split('\\')[-1].split('.')[0] 50 | img = cv2.imread(test_path, 0) 51 | iris, pupil = IrisLocalization(img) 52 | #cv2.circle(img, (iris[0], iris[1]), iris[2], (0, 0, 255), 1) 53 | #cv2.circle(img, (pupil[0], pupil[1]), pupil[2], (0, 255, 0), 1) 54 | #cv2.imwrite("D:/study/iris/process_data/circle/test/{}".format(img_name), img) 55 | normalized = IrisNormalization(img, pupil, iris) 56 | cv2.imwrite("D:/study/iris/process_data/rectangle/test/{}.jpg".format(img_name), normalized) 57 | #ROI = ImageEnhancement(normalized) 58 | #cv2.imwrite("D:/study/iris/process_data/enhancement/test/{}.jpg".format(img_name), ROI) 59 | #test_features[j, :] = FeatureExtraction(ROI) 60 | #test_classes[j] = test_label_list[j] 61 | print('test_feature:', test_img_list[j], test_label_list[j], '{}/{}'.format(j, test_size)) 62 | 63 | endtime = datetime.datetime.now() -------------------------------------------------------------------------------- /code/CNN_feature/inception_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Contains common code shared by all inception models. 16 | Usage of arg scope: 17 | with slim.arg_scope(inception_arg_scope()): 18 | logits, end_points = inception.inception_v3(images, num_classes, 19 | is_training=is_training) 20 | """ 21 | from __future__ import absolute_import 22 | from __future__ import division 23 | from __future__ import print_function 24 | 25 | import tensorflow as tf 26 | 27 | slim = tf.contrib.slim 28 | 29 | 30 | def inception_arg_scope(weight_decay=0.00004, 31 | use_batch_norm=True, 32 | batch_norm_decay=0.9997, 33 | batch_norm_epsilon=0.001, 34 | activation_fn=tf.nn.relu, 35 | batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS): 36 | """Defines the default arg scope for inception models. 37 | Args: 38 | weight_decay: The weight decay to use for regularizing the model. 39 | use_batch_norm: "If `True`, batch_norm is applied after each convolution. 40 | batch_norm_decay: Decay for batch norm moving average. 41 | batch_norm_epsilon: Small float added to variance to avoid dividing by zero 42 | in batch norm. 43 | activation_fn: Activation function for conv2d. 44 | batch_norm_updates_collections: Collection for the update ops for 45 | batch norm. 46 | Returns: 47 | An `arg_scope` to use for the inception models. 48 | """ 49 | batch_norm_params = { 50 | # Decay for the moving averages. 51 | 'decay': batch_norm_decay, 52 | # epsilon to prevent 0s in variance. 53 | 'epsilon': batch_norm_epsilon, 54 | # collection containing update_ops. 55 | 'updates_collections': batch_norm_updates_collections, 56 | # use fused batch norm if possible. 57 | 'fused': None, 58 | } 59 | if use_batch_norm: 60 | normalizer_fn = slim.batch_norm 61 | normalizer_params = batch_norm_params 62 | else: 63 | normalizer_fn = None 64 | normalizer_params = {} 65 | # Set weight_decay for weights in Conv and FC layers. 66 | with slim.arg_scope([slim.conv2d, slim.fully_connected], 67 | weights_regularizer=slim.l2_regularizer(weight_decay)): 68 | with slim.arg_scope( 69 | [slim.conv2d], 70 | weights_initializer=slim.variance_scaling_initializer(), 71 | activation_fn=activation_fn, 72 | normalizer_fn=normalizer_fn, 73 | normalizer_params=normalizer_params) as sc: 74 | return sc -------------------------------------------------------------------------------- /code/Iris.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | 4 | import numpy as np 5 | import cv2 6 | from Segmentation import IrisLocalization 7 | from Normalization import IrisNormalization 8 | from Enhancement import ImageEnhancement 9 | from Gabor import FeatureExtraction 10 | import Matching as IM 11 | import Evaluation as PE 12 | import datetime 13 | import pandas as pd 14 | 15 | train = pd.read_csv("D:/study/iris/csv/train1.csv") 16 | test = pd.read_csv("D:/study/iris/csv/test1.csv") 17 | 18 | train_img_list = train["img"] 19 | train_label_list = train["label"] 20 | test_img_list = test["img"] 21 | test_label_list = test["label"] 22 | train_size = len(train_img_list) 23 | test_size = len(test_img_list) 24 | 25 | train_features = np.zeros((train_size,1536)) 26 | train_classes = np.zeros(train_size, dtype = np.uint8) 27 | test_features = np.zeros((test_size,1536)) 28 | test_classes = np.zeros(test_size, dtype = np.uint8) 29 | 30 | #starttime = datetime.datetime.now() 31 | 32 | '''for i in range(train_size): 33 | train_path=train_img_list[i] 34 | img = cv2.imread(train_path, 0) 35 | iris, pupil = IrisLocalization(img) 36 | normalized = IrisNormalization(img, pupil, iris) 37 | ROI = ImageEnhancement(normalized) 38 | train_features[i, :] = FeatureExtraction(ROI) 39 | train_classes[i] = train_label_list[i] 40 | 41 | for j in range(test_size): 42 | test_path=test_img_list[j] 43 | img = cv2.imread(test_path, 0) 44 | iris, pupil = IrisLocalization(img) 45 | normalized = IrisNormalization(img, pupil, iris) 46 | ROI = ImageEnhancement(normalized) 47 | train_features[j, :] = FeatureExtraction(ROI) 48 | train_classes[j] = train_label_list[j]''' 49 | 50 | '''for i in range(1,109): 51 | filespath = rootpath + str(i).zfill(3) 52 | trainpath = filespath + "/1/" 53 | testpath = filespath + "/2/" 54 | for j in range(1,4): 55 | irispath = trainpath + str(i).zfill(3) + "_1_" + str(j) + ".bmp" 56 | img = cv2.imread(irispath, 0) 57 | iris, pupil = IrisLocalization(img) 58 | normalized = IrisNormalization(img, pupil, iris) 59 | ROI = ImageEnhancement(normalized) 60 | train_features[(i-1)*3+j-1, :] = FeatureExtraction(ROI) 61 | train_classes[(i-1)*3+j-1] = i 62 | for k in range(1,5): 63 | irispath = testpath + str(i).zfill(3) + "_2_" + str(k) + ".bmp" 64 | img = cv2.imread(irispath, 0) 65 | iris, pupil = IrisLocalization(img) 66 | normalized = IrisNormalization(img, pupil, iris) 67 | ROI = ImageEnhancement(normalized) 68 | test_features[(i-1)*4+k-1, :] = FeatureExtraction(ROI) 69 | test_classes[(i-1)*4+k-1] = i''' 70 | 71 | #endtime = datetime.datetime.now() 72 | 73 | #print('image processing and feature extraction takes '+str((endtime-starttime).seconds)+' seconds') 74 | 75 | 76 | PE.table_CRR(train_features, train_classes, test_features, test_classes) 77 | PE.performance_evaluation(train_features, train_classes, test_features, test_classes) 78 | #thresholds_2=[0.74,0.76,0.78] 79 | 80 | 81 | # this part is for bootsrap 82 | starttime = datetime.datetime.now() 83 | thresholds_3=np.arange(0.6,0.9,0.02) 84 | times = 100 #running 100 times takes about 1 to 2 hours 85 | total_fmrs, total_fnmrs, crr_mean, crr_u, crr_l = IM.IrisMatchingBootstrap(train_features, train_classes, test_features, test_classes,times,thresholds_3) 86 | fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u = IM.calcROCBootstrap(total_fmrs, total_fnmrs) 87 | 88 | endtime = datetime.datetime.now() 89 | 90 | print('Bootsrap takes'+str((endtime-starttime).seconds) + 'seconds') 91 | 92 | fmrs_mean *= 100 #use for percent(%) 93 | fmrs_l *= 100 94 | fmrs_u *= 100 95 | fnmrs_mean *= 100 96 | fnmrs_l *= 100 97 | fnmrs_u *= 100 98 | PE.FM_FNM_table(fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u, thresholds_3) 99 | PE.FMR_conf(fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u) 100 | PE.FNMR_conf(fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u) 101 | -------------------------------------------------------------------------------- /Iris.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | 4 | import numpy as np 5 | import cv2 6 | from Segmentation import IrisLocalization 7 | from Normalization import IrisNormalization 8 | from Enhancement import ImageEnhancement 9 | from Gabor import FeatureExtraction 10 | import Matching as IM 11 | import Evaluation as PE 12 | import datetime 13 | import pandas as pd 14 | 15 | train = pd.read_csv("D:/study/iris/csv/train1.csv") 16 | test = pd.read_csv("D:/study/iris/csv/test1.csv") 17 | 18 | train_img_list = train["img"] 19 | train_label_list = train["label"] 20 | test_img_list = test["img"] 21 | test_label_list = test["label"] 22 | train_size = len(train_img_list) 23 | test_size = len(test_img_list) 24 | 25 | train_features = np.zeros((train_size,1536)) 26 | train_classes = np.zeros(train_size, dtype = np.uint8) 27 | test_features = np.zeros((test_size,1536)) 28 | test_classes = np.zeros(test_size, dtype = np.uint8) 29 | 30 | #starttime = datetime.datetime.now() 31 | 32 | '''for i in range(train_size): 33 | train_path=train_img_list[i] 34 | img = cv2.imread(train_path, 0) 35 | iris, pupil = IrisLocalization(img) 36 | normalized = IrisNormalization(img, pupil, iris) 37 | ROI = ImageEnhancement(normalized) 38 | train_features[i, :] = FeatureExtraction(ROI) 39 | train_classes[i] = train_label_list[i] 40 | 41 | for j in range(test_size): 42 | test_path=test_img_list[j] 43 | img = cv2.imread(test_path, 0) 44 | iris, pupil = IrisLocalization(img) 45 | normalized = IrisNormalization(img, pupil, iris) 46 | ROI = ImageEnhancement(normalized) 47 | train_features[j, :] = FeatureExtraction(ROI) 48 | train_classes[j] = train_label_list[j]''' 49 | 50 | '''for i in range(1,109): 51 | filespath = rootpath + str(i).zfill(3) 52 | trainpath = filespath + "/1/" 53 | testpath = filespath + "/2/" 54 | for j in range(1,4): 55 | irispath = trainpath + str(i).zfill(3) + "_1_" + str(j) + ".bmp" 56 | img = cv2.imread(irispath, 0) 57 | iris, pupil = IrisLocalization(img) 58 | normalized = IrisNormalization(img, pupil, iris) 59 | ROI = ImageEnhancement(normalized) 60 | train_features[(i-1)*3+j-1, :] = FeatureExtraction(ROI) 61 | train_classes[(i-1)*3+j-1] = i 62 | for k in range(1,5): 63 | irispath = testpath + str(i).zfill(3) + "_2_" + str(k) + ".bmp" 64 | img = cv2.imread(irispath, 0) 65 | iris, pupil = IrisLocalization(img) 66 | normalized = IrisNormalization(img, pupil, iris) 67 | ROI = ImageEnhancement(normalized) 68 | test_features[(i-1)*4+k-1, :] = FeatureExtraction(ROI) 69 | test_classes[(i-1)*4+k-1] = i''' 70 | 71 | #endtime = datetime.datetime.now() 72 | 73 | #print('image processing and feature extraction takes '+str((endtime-starttime).seconds)+' seconds') 74 | 75 | 76 | PE.table_CRR(train_features, train_classes, test_features, test_classes) 77 | PE.performance_evaluation(train_features, train_classes, test_features, test_classes) 78 | #thresholds_2=[0.74,0.76,0.78] 79 | 80 | 81 | # this part is for bootsrap 82 | starttime = datetime.datetime.now() 83 | thresholds_3=np.arange(0.6,0.9,0.02) 84 | times = 100 #running 100 times takes about 1 to 2 hours 85 | total_fmrs, total_fnmrs, crr_mean, crr_u, crr_l = IM.IrisMatchingBootstrap(train_features, train_classes, test_features, test_classes,times,thresholds_3) 86 | fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u = IM.calcROCBootstrap(total_fmrs, total_fnmrs) 87 | 88 | endtime = datetime.datetime.now() 89 | 90 | print('Bootsrap takes'+str((endtime-starttime).seconds) + 'seconds') 91 | 92 | fmrs_mean *= 100 #use for percent(%) 93 | fmrs_l *= 100 94 | fmrs_u *= 100 95 | fnmrs_mean *= 100 96 | fnmrs_l *= 100 97 | fnmrs_u *= 100 98 | PE.FM_FNM_table(fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u, thresholds_3) 99 | PE.FMR_conf(fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u) 100 | PE.FNMR_conf(fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u) 101 | -------------------------------------------------------------------------------- /code/script/GetList.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import os, glob 4 | 5 | def get_label(): 6 | list1 = [i for i in range(1,109)] 7 | label = [] 8 | for i in range(len(list1)): 9 | if len(str(list1[i])) == 1: 10 | s = "00" + str(list1[i]) 11 | label.append(s) 12 | continue 13 | if len(str(list1[i])) == 2: 14 | s = "0" + str(list1[i]) 15 | label.append(s) 16 | continue 17 | else: 18 | label.append(str(list1[i])) 19 | return label 20 | 21 | def get_LR(): 22 | return ['L','R'] 23 | 24 | def get_onetwo(): 25 | return ['1','2'] 26 | 27 | def get_files(file_dir): 28 | label = get_label() 29 | LR=get_LR() 30 | image_list, label_list = [], [] 31 | for i in label: 32 | for j in LR: 33 | for img in glob.glob(os.path.join(file_dir, i, j, "*.jpg")): 34 | image_list.append(img) 35 | label_list.append(int(i)) 36 | print('There are %d data' %(len(image_list))) 37 | temp = np.array([image_list, label_list]) 38 | temp = temp.transpose() 39 | np.random.shuffle(temp) 40 | image_list = list(temp[:, 0]) 41 | label_list = list(temp[:, 1]) 42 | label_list = [int(i) for i in label_list] 43 | return image_list, label_list 44 | 45 | def split(img_list,label_list,split_rate=0.3): 46 | l = len(img_list)*(1-split_rate) 47 | l = int(l) 48 | #print(l) 49 | img_train_list = img_list[:l] 50 | label_train_list = label_list[:l] 51 | img_test_list = img_list[l:] 52 | label_test_list = label_list[l:] 53 | return img_train_list, label_train_list, img_test_list, label_test_list 54 | 55 | def get_files1(file_dir): 56 | label = get_label() 57 | LR=get_onetwo() 58 | image_list_train, label_list_train, image_list_test, label_list_test = [], [], [], [] 59 | for i in label: 60 | for img in glob.glob(os.path.join(file_dir, i, LR[0], "*.bmp")): 61 | image_list_train.append(img) 62 | label_list_train.append(int(i)) 63 | for img in glob.glob(os.path.join(file_dir, i, LR[1], "*.bmp")): 64 | image_list_test.append(img) 65 | label_list_test.append(int(i)) 66 | print('There are %d data' %(len(image_list_train))) 67 | print('There are %d data' % (len(image_list_test))) 68 | 69 | temp = np.array([image_list_train, label_list_train]) 70 | temp = temp.transpose() 71 | np.random.shuffle(temp) 72 | image_list_train = list(temp[:, 0]) 73 | label_list_train = list(temp[:, 1]) 74 | label_list_train = [int(i) for i in label_list_train] 75 | 76 | temp1 = np.array([image_list_test, label_list_test]) 77 | temp1 = temp1.transpose() 78 | np.random.shuffle(temp1) 79 | image_list_test = list(temp1[:, 0]) 80 | label_list_test = list(temp1[:, 1]) 81 | label_list_test = [int(i) for i in label_list_test] 82 | return image_list_train, label_list_train, image_list_test, label_list_test 83 | def main(): 84 | file_dir = "D:/study/iris/CASIA-Iris-Thousand" 85 | img_list, label_list = get_files(file_dir) 86 | img_train_list, label_train_list, img_test_list, label_test_list = split(img_list, label_list) 87 | train = pd.DataFrame() 88 | test = pd.DataFrame() 89 | train['img'] = img_train_list 90 | train['label'] = label_train_list 91 | test['img'] = img_test_list 92 | test['label'] = label_test_list 93 | train.to_csv("D:/study/iris/csv/train.csv", index=False, encoding="utf-8") 94 | test.to_csv("D:/study/iris/csv/test.csv", index=False, encoding="utf-8") 95 | print("finish!") 96 | 97 | def main1(): 98 | file_dir = "D:/study/iris/data" 99 | img_train_list, label_train_list, img_test_list, label_test_list = get_files1(file_dir) 100 | train = pd.DataFrame() 101 | test = pd.DataFrame() 102 | train['img'] = img_train_list 103 | train['label'] = label_train_list 104 | test['img'] = img_test_list 105 | test['label'] = label_test_list 106 | train.to_csv("D:/study/iris/csv/train1.csv", index=False, encoding="utf-8") 107 | test.to_csv("D:/study/iris/csv/test1.csv", index=False, encoding="utf-8") 108 | print("finish!") 109 | if __name__=="__main__": 110 | main1() -------------------------------------------------------------------------------- /code/CNN_feature/cnn_feature.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import pandas as pd 4 | from ResNet import resnet_base,get_restorer 5 | from inceptionv4 import inception_v4,inception_v4_base 6 | from DenseNet import densenet121 7 | from tensorflow.contrib import slim 8 | from tensorflow.python import pywrap_tensorflow 9 | import cv2 10 | import os 11 | 12 | def init_tf(): 13 | global sess, logits, x 14 | # process image 15 | x = tf.placeholder(tf.float32, shape=[64,512, 3]) 16 | x_4d = tf.reshape(x, [-1, 64, 512, 3]) 17 | logits = resnet_base(x_4d,"resnet_v1_101",is_training=False) 18 | logits= tf.reshape(logits,[-1]) 19 | print("logit", np.shape(logits)) 20 | restorer, checkpoint_path=get_restorer() 21 | sess = tf.Session() 22 | restorer.restore(sess, checkpoint_path) 23 | print('load model done...') 24 | 25 | def initialize_uninitialized(sess): 26 | global_vars = tf.global_variables() 27 | is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars]) 28 | not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f] 29 | print([str(i.name) for i in not_initialized_vars]) 30 | if len(not_initialized_vars): 31 | sess.run(tf.variables_initializer(not_initialized_vars)) 32 | 33 | def init_tf_dense(): 34 | global sess, logits, x 35 | # process image 36 | checkpoint_path = r"D:\study\iris\code\CNN\model\tf-densenet121.ckpt" 37 | x = tf.placeholder(tf.float32, shape=[64, 512, 3]) 38 | x_4d = tf.reshape(x, [-1, 64, 512, 3]) 39 | logits = densenet121(x_4d, num_classes=1000, is_training=False) 40 | print("logit1", np.shape(logits)) 41 | logits= tf.reshape(logits, [-1]) 42 | print("logit", np.shape(logits)) 43 | restore_variable = slim.get_variables_to_restore() 44 | print(restore_variable) 45 | init_fn = slim.assign_from_checkpoint_fn(checkpoint_path, restore_variable, ignore_missing_vars=True) 46 | sess = tf.Session() 47 | init_fn(sess) 48 | initialize_uninitialized(sess) 49 | print('load model done...') 50 | 51 | def inceptionv4_init(): 52 | global sess, logits, x 53 | # process image 54 | checkpoint_path = r"D:\study\iris\code\CNN\model\inception_v4.ckpt" 55 | x = tf.placeholder(tf.float32, shape=[64, 512, 3]) 56 | x_4d = tf.reshape(x, [-1, 64, 512, 3]) 57 | logits,_ = inception_v4_base(x_4d) 58 | logits = tf.reshape(logits, [-1]) 59 | print("logit", np.shape(logits)) 60 | restore_variable = slim.get_variables_to_restore() 61 | init_fn = slim.assign_from_checkpoint_fn(checkpoint_path, restore_variable, ignore_missing_vars=True) 62 | sess = tf.Session() 63 | init_fn(sess) 64 | initialize_uninitialized(sess) 65 | 66 | def feature_ex(img_path): 67 | #img = image.load_img(img_path, target_size=(size, size)) 68 | img = cv2.imread(img_path,0) 69 | #img = cv2.resize(img,(64,64)) 70 | img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) 71 | img_array = np.array(img) 72 | feature = sess.run(logits, feed_dict={x:img_array}) 73 | return feature 74 | 75 | def ex_train(): 76 | train=pd.read_csv("D:/study/iris/csv/train.csv") 77 | dir = r"D:\study\iris\CASIA\norm_512\train" 78 | train_img_list = train["img"] 79 | train_label_list = train["label"] 80 | train_size = len(train_img_list) 81 | feature_list=[] 82 | for i in range(train_size): 83 | path = os.path.join(dir,train_img_list[i].split("\\")[-1]) 84 | feature = np.array(feature_ex(path)).astype(np.float16) 85 | feature_list.append(feature) 86 | print("train:", i, path) 87 | return np.array(feature_list) 88 | #cnn_feature = pd.DataFrame(np.array(feature_list)) 89 | #cnn_feature.to_csv("D:/study/iris/csv/train_vector2.csv", index=False, encoding="utf-8") 90 | 91 | def ex_test(): 92 | test=pd.read_csv("D:/study/iris/csv/test.csv") 93 | dir = r"D:\study\iris\CASIA\norm_512\test" 94 | test_img_list = test["img"] 95 | test_label_list = test["label"] 96 | test_size = len(test_img_list) 97 | feature_list=[] 98 | for i in range(test_size): 99 | path = os.path.join(dir,test_img_list[i].split("\\")[-1]) 100 | feature = np.array(feature_ex(path)).astype(np.float16) 101 | feature_list.append(feature) 102 | print("test:", i, path) 103 | return np.array(feature_list) 104 | #cnn_feature = pd.DataFrame(np.array(feature_list)) 105 | #cnn_feature.to_csv("D:/study/iris/csv/test_vector2.csv", index=False, encoding="utf-8") 106 | 107 | 108 | if __name__=="__main__": 109 | init_tf_dense() 110 | #ex_train() 111 | #ex_test() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Iris_Recognition 2 | Iris recognition include tradition algorithm and deep learning. 3 | 4 | 5 | 6 | - [Code](#Code) 7 | - [script](##script) 8 | - [tradition](##tradition) 9 | - [CNN_feature](##CNN_feature) 10 | - [CNN classifier](##CNN\classifier) 11 | - [Dataset](#Dataset) 12 | - [CASIA-Iris Version 1.0](##CASIA-Iris\Version\1.0) 13 | - [CASIA-Iris-Thousand](##CASIA-Iris-Thousand) 14 | - [Algorithm](#Algorithm) 15 | - [1.Tradition Algorithm](##1.Tradition\Algorithm) 16 | - [Preprocessing](###Preprocessing) 17 | - [USIT v2.2](###USIT\v2.2) 18 | - [Gabor Feature Extraction](###Gabor\Feature\Extraction) 19 | - [Distance Based Match](###Distance\Based\Match) 20 | - [Machine Learning Predict](###Machine\Learning\Predict) 21 | - [2.CNN Feature Extraction](##2.CNN\Feature\Extraction) 22 | - [3.CNN Classification](##3.CNN\Classification) 23 | 24 | 25 | 26 | # Code 27 | code dir 28 | ## script 29 | * script 30 | * GetList.py Get the image path list and label list,then split to train set and test set. 31 | * GetPic.py Get and save the iris pictures after segmentation and ROI pictures after normalization and enhancement. 32 | * GetVector.py Get the feature vector list of train and test according gabor filter and save as csv. 33 | * copy_pic.py Copy all the picture into one dir. 34 | ## tradition 35 | * tradition 36 | * Segmentation.py Segment the iris ring with hough transform and canny edge detection. 37 | * Normalization.py Flat the circular ring into rectangle ROI. 38 | * Enhancement.py Enhancement the ROI after normalization. 39 | * Gabor.py Feature extraction with gabor filter. 40 | * Matching.py Match with cityblock、euclidean and cosine distance. 41 | * Evaluation.py Evaluate the accuracy rate with each distance. 42 | * iris_demo2.py Run and get the results. 43 | ## CNN_feature 44 | * CNN_feature 45 | * inception_utils.py Inception utils from geogle tensorflow slim. 46 | * inceptionv4.py Inceptionv4 from geogle tensorflow slim. 47 | * resnet_utils.py Resnet_utils from geogle tensorflow slim. 48 | * ResNet.py Resnet from geogle tensorflow slim. 49 | * DenseNet.py DenseNet from geogle tensorflow slim. 50 | * cnn_feature.py CNN feature extraction with inceptionv4、resnet and densenet. 51 | * iris_demo1.py Run and get the results. 52 | ## CNN classifier 53 | * CNN_classifier 54 | * utils.py Preprocess. 55 | * DenseNet.py Densenet. 56 | * train.py Train with a mini-densenet. 57 | * eval.py Eval and get the results. 58 | 59 | # Dataset 60 | ## CASIA-Iris version1.0 61 | Include 108 classes, each class has 7 images, three of them for train and the other for test. 62 | 63 | ## CASIA-Iris-Thousand 64 | Include 1000 classes, each class has 20 images,half are left eyes and half right. 65 | We random select 70% of them for train, and 30% for test.All the results are based on this dataset. 66 | 67 | # Algorithm 68 | ## 1.Tradition Algorithm 69 | ### Preprocessing 70 | We use `hough transform` and `canny edge detection` to segment the iris,and then unfold the ring between the outer circle and inner circle into a rectangle of size 64*512. After `normalization`,we did local image equalization as Li Ma's paper.Finally,we use gabor filter to extract the feature vector from the ROI. 71 | 72 | ### USIT v2.2 73 | We also recommend an open-source software,`USIT v2.2`,from the University of Salzburg to complete the preprocessing. 74 | [Github](https://github.com/ngoclamvt123/usit-v2.2.0) 75 | You just need to clone the git and install opencv and boost,and then release `wahet.cpp`. 76 | 77 | usage: 78 | for a single image 79 | ``` 80 | test.exe -i D:\study\iris\CASIA-Iris-Thousand\000\L\S5000L00.jpg -o texture.png -s 256 64 -e 81 | ``` 82 | for a batch images 83 | ``` 84 | test.exe -i D:\study\iris\CASIA\origin\train\*.jpg -o D:/study/iris/CASIA/enhance_512/train/?1.jpg -s 512 64 -e 85 | ``` 86 | If you don't need enhancement,you just need delete "-e". 87 | If you need the segmentation,you just need add "-sr D:/study/iris/CASIA/seg/train/?1.jpg" 88 | 89 | ### Gabor Feature Extraction 90 | We use gabor filter to complete the feature extraction. 91 | 92 | ### Distance Based Match 93 | We use cityblock distance,euclidean distance and cosine distance to match,and results respectively are 88.19%,84.95% and 85.42%. 94 | 95 | ### Machine Learning Predict 96 | We use pca to reduce the dimension and then use KNN and SVM to train and predict.When the dimension reduce to 380, the result is the best,90.2% for KNN and 90.7% for SVM. 97 | 98 | ## 2.CNN Feature Extraction 99 | We use InceptionV4,ResNet-101,Densenet121 to extract feature from the ROI after enhancement.When inceptionV4 at "Mixed6a",ResNet with block[3,4,9] and DenseNet with block[6,12,3] and then use pca reduce the dimension to 580 for SVM to get the best results 95.8%,96.4% and 97.1%.We also append avgrage pooling after convolution to avoid MemoryError. 100 | 101 | ## 3.CNN Classification 102 | We also use a mini densenet with 40 layers to train a model.Dataset are the ROI of the CASIA-Iris-Thousand.However,it doesn't work for the limitation of the dataset and lead to over-fit. 103 | 104 | 105 | 106 | 107 | 108 | -------------------------------------------------------------------------------- /code/CNN_classifier/DenseNet.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from __future__ import division 3 | 4 | import tensorflow as tf 5 | import numpy as np 6 | import math 7 | 8 | #CLASSES = 108 9 | dense_blocks_num = 6 10 | k = 12 11 | L = 40 12 | layers = 4 13 | 14 | #Used for BN 15 | _BATCH_NORM_DECAY = 0.997 16 | _BATCH_NORM_EPSILON = 1e-5 17 | 18 | ''' 19 | (layers * 2) * dense_blocks_num + (dense_blocks_num - 1) + 1(init) + 1(fc) 20 | = (6 * 2) * 4 + (3 - 1) + 1 + 1 21 | = 52 22 | = L 23 | ''' 24 | 25 | def weight_variable(shape, stddev=None, name='weight'): 26 | if stddev == None: 27 | if len(shape) == 4: 28 | stddev = math.sqrt(2. / (shape[0] * shape[1] * shape[2])) 29 | else: 30 | stddev = math.sqrt(2. / shape[0]) 31 | else: 32 | stddev = 0.1 33 | initial = tf.truncated_normal(shape, stddev=stddev) 34 | W = tf.Variable(initial, name=name) 35 | tf.add_to_collection(tf.GraphKeys.WEIGHTS, W) 36 | return W 37 | 38 | 39 | def bias_variable(shape, name='bias'): 40 | initial = tf.constant(0.1, shape=shape) 41 | return tf.Variable(initial) 42 | 43 | 44 | def bn_layer(inputs, training): 45 | 46 | return tf.layers.batch_normalization( 47 | inputs=inputs, axis=-1, 48 | momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, 49 | scale=True, training=training, fused=True) 50 | 51 | 52 | def dense_block_layer(input, k, k0, layer, train, keep_prob): 53 | ''' 54 | :param input: input feature map 55 | :param k: output channels 56 | :param k0: input channels 57 | :param layer: the layer number in the dense block 58 | :param train: used for bn layer 59 | :return: the dense block layer output 60 | ''' 61 | input_channels = k0 + (layer - 1) * k 62 | weights_1 = weight_variable(shape=[1, 1, input_channels, 4 * k]) 63 | weights_3 = weight_variable(shape=[3, 3, 4 * k, k]) 64 | with tf.name_scope("dense_bottleneck_layer"): 65 | input = bn_layer(input, train) 66 | input = tf.nn.relu(input) 67 | input = tf.nn.conv2d(input, weights_1, [1, 1, 1, 1], padding='SAME', name='conv1') 68 | input = tf.nn.dropout(input, keep_prob=keep_prob) 69 | input = bn_layer(input, train) 70 | input = tf.nn.relu(input) 71 | input = tf.nn.conv2d(input, weights_3, [1, 1, 1, 1], padding='SAME', name='conv3') 72 | input = tf.nn.dropout(input, keep_prob=keep_prob) 73 | return input 74 | 75 | 76 | def dense_block(input, k, layers, train, keep_prob): 77 | ''' 78 | dense block 79 | :param input: input feature map 80 | :param k: output channels 81 | :param layers: the layer numbers of the dense block 82 | :param train: used fot train 83 | :return: the dense block output 84 | ''' 85 | input_shape = input.get_shape().as_list() 86 | k0 = input_shape[-1] 87 | #output = input 88 | for i in range(1, layers + 1): 89 | #print(i) 90 | with tf.name_scope("layer_%d" % i): 91 | output = dense_block_layer(input, k, k0, i, train, keep_prob) 92 | input = tf.concat(values=[input, output], axis=-1) 93 | return input 94 | 95 | def transition_layer(input, train, keep_prob): 96 | """ 97 | 1x1 conv, 2x2 avegage pool 98 | :param input: 99 | :return: 100 | """ 101 | input_shape = input.get_shape().as_list() 102 | with tf.name_scope('transition_layer'): 103 | input = bn_layer(input, train) 104 | weights = weight_variable(shape = [1, 1, input_shape[-1], input_shape[-1]]) 105 | input = tf.nn.conv2d(input, weights, [1, 1, 1, 1], padding='SAME') 106 | input = tf.nn.dropout(input, keep_prob=keep_prob) 107 | input = tf.nn.avg_pool(input, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') 108 | return input 109 | 110 | def densenet(input, keep_prob,N_Class, train=True): 111 | ''' 112 | densenet: k = 12, L = 40 113 | :param input: 114 | :param train: 115 | :return: 116 | ''' 117 | 118 | input_shape = input.get_shape().as_list() 119 | with tf.name_scope('initial'): 120 | weights = weight_variable(shape=[3, 3, input_shape[-1], 16]) 121 | input = tf.nn.conv2d(input, weights, [1, 1, 1, 1], padding='SAME') 122 | 123 | with tf.name_scope("dense_blocks"): 124 | for i in range(1, 1 + dense_blocks_num): 125 | with tf.name_scope("dense_block_%d" % i): 126 | input = dense_block(input, k, layers, train, keep_prob) 127 | if i != dense_blocks_num: 128 | input = transition_layer(input, train, keep_prob) 129 | 130 | print(input) 131 | with tf.name_scope('classification_layer'): 132 | #input = tf.nn.avg_pool(input, [1, 8, 8, 1], [1, 8, 8, 1], padding='VALID') 133 | input = tf.reduce_mean(input, axis=[1, 2]) 134 | print(input) 135 | input_shape = input.get_shape().as_list() 136 | weights = weight_variable(shape=[input_shape[-1], N_Class]) 137 | bias = bias_variable([N_Class]) 138 | input = tf.matmul(input, weights) + bias 139 | 140 | output = input 141 | return output 142 | 143 | 144 | if __name__ == '__main__': 145 | input = tf.constant(0.1, shape=[8, 64, 512, 3], dtype=tf.float32) 146 | output = densenet(input, 1.0,10) 147 | print(output) -------------------------------------------------------------------------------- /code/CNN_classifier/train.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import os 3 | import tensorflow as tf 4 | from utils import * 5 | from DenseNet import densenet 6 | import tensorflow.contrib.slim as slim 7 | 8 | keep_prob = 0.5 9 | batch_size = 32 10 | num_class = 1000 11 | init_lr = 0.01 12 | decay_steps = 30 13 | epochs = 100 14 | log_dir="D:/study/iris/code/CNN/log/train6" 15 | 16 | def conv_net(x, n_classes, dropout, reuse, is_training): 17 | # Define a scope for reusing the variables 18 | with tf.variable_scope('ConvNet', reuse=reuse): 19 | # Convolution Layer with 24 filters and a kernel size of 5 20 | conv1 = tf.layers.conv2d(x, 24, 5, activation=tf.nn.relu) 21 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 22 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) 23 | 24 | # Convolution Layer with 48 filters and a kernel size of 3 25 | conv2 = tf.layers.conv2d(conv1, 48, 3, activation=tf.nn.relu) 26 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 27 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) 28 | 29 | # Flatten the data to a 1-D vector for the fully connected layer 30 | fc1 = tf.contrib.layers.flatten(conv2) 31 | 32 | # Fully connected layer (in contrib folder for now) 33 | fc1 = tf.layers.dense(fc1, 768) 34 | # Apply Dropout (if is_training is False, dropout is not applied) 35 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) 36 | 37 | # Output layer, class prediction 38 | out = tf.layers.dense(fc1, n_classes) 39 | # Because 'softmax_cross_entropy_with_logits' already apply softmax, 40 | # we only apply softmax to testing network 41 | out = tf.nn.softmax(out) if not is_training else out 42 | 43 | return out 44 | 45 | def main(): 46 | train(BATCH_SIZE=batch_size,N_CLASSES=num_class,init_lr= init_lr,\ 47 | decay_steps=decay_steps,logs_train_dir=log_dir,epochs=epochs) 48 | 49 | 50 | CAPACITY = 1000 51 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" # gpu编号 52 | config = tf.ConfigProto() 53 | config.gpu_options.allow_growth = True # 设置最小gpu使用量 54 | 55 | def train(BATCH_SIZE,N_CLASSES,init_lr,decay_steps,logs_train_dir,epochs): 56 | train_list,train_label=get_train_list() 57 | one_epoch_step = len(train_list) / BATCH_SIZE 58 | MAX_STEP=epochs * one_epoch_step 59 | global_step = tf.Variable(0, name='global_step', trainable=False) 60 | # label without one-hot 61 | batch_train, batch_labels = get_batch(train_list, train_label, BATCH_SIZE, CAPACITY) 62 | logits =densenet(batch_train, keep_prob, N_CLASSES, True) 63 | #net 64 | print(logits.get_shape()) 65 | # loss 66 | label_smoothing = 0.1 67 | one_hot_labels = slim.one_hot_encoding(batch_labels, N_CLASSES) 68 | one_hot_labels = (1.0 - label_smoothing) * one_hot_labels + label_smoothing / N_CLASSES #标签平滑 69 | 70 | cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_labels) 71 | loss = tf.reduce_mean(cross_entropy, name='loss') 72 | tf.summary.scalar('train_loss', loss) 73 | # optimizer 74 | lr = tf.train.exponential_decay(learning_rate=init_lr, global_step=global_step, decay_steps=decay_steps*one_epoch_step, 75 | decay_rate=0.1) 76 | tf.summary.scalar('learning_rate', lr) 77 | 78 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 79 | with tf.control_dependencies(update_ops): 80 | optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, global_step=global_step) 81 | 82 | correct_pred = tf.equal(tf.argmax(logits, 1), tf.cast(batch_labels, tf.int64)) 83 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 84 | 85 | tf.summary.scalar('train_acc', accuracy) 86 | 87 | summary_op = tf.summary.merge_all() 88 | sess = tf.Session(config=config) 89 | train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) 90 | 91 | # saver = tf.train.Saver() 92 | var_list = tf.trainable_variables() 93 | g_list = tf.global_variables() 94 | bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name] 95 | bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name] 96 | var_list += bn_moving_vars 97 | saver = tf.train.Saver(var_list=var_list, max_to_keep=10) 98 | 99 | #saver = tf.train.Saver(max_to_keep=100) 100 | sess.run(tf.global_variables_initializer()) 101 | coord = tf.train.Coordinator() 102 | threads = tf.train.start_queue_runners(sess=sess, coord=coord) 103 | #saver.restore(sess, logs_train_dir+'/model.ckpt-10000') 104 | try: 105 | for step in range(int(MAX_STEP)): 106 | if coord.should_stop(): 107 | break 108 | _, learning_rate, tra_loss, tra_acc = sess.run([optimizer, lr, loss, accuracy]) 109 | if step % 10 == 0: 110 | print('Epoch %3d/%d, Step %6d/%d, lr %f, train loss = %.2f, train accuracy = %.2f%%' % ( 111 | step / one_epoch_step, MAX_STEP / one_epoch_step, step, MAX_STEP, learning_rate, tra_loss, 112 | tra_acc * 100.0)) 113 | summary_str = sess.run(summary_op) 114 | train_writer.add_summary(summary_str, step) 115 | if step % 5000 == 0 or (step + 1) == MAX_STEP: 116 | checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') 117 | saver.save(sess, checkpoint_path, global_step=step) 118 | except tf.errors.OutOfRangeError: 119 | print('Done training -- epoch limit reached') 120 | finally: 121 | coord.request_stop() 122 | 123 | coord.join(threads) 124 | sess.close() 125 | 126 | 127 | if __name__ == '__main__': 128 | main() -------------------------------------------------------------------------------- /code/CNN_feature/ResNet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from __future__ import absolute_import, print_function, division 4 | 5 | 6 | import tensorflow as tf 7 | import tensorflow.contrib.slim as slim 8 | from tensorflow.contrib.slim.nets import resnet_v1 9 | from tensorflow.contrib.slim.nets import resnet_utils 10 | from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block 11 | import tfplot as tfp 12 | 13 | 14 | def resnet_arg_scope( 15 | is_training=True, weight_decay=0.0001, batch_norm_decay=0.997, 16 | batch_norm_epsilon=1e-5, batch_norm_scale=True): 17 | ''' 18 | 19 | In Default, we do not use BN to train resnet, since batch_size is too small. 20 | So is_training is False and trainable is False in the batch_norm params. 21 | 22 | ''' 23 | batch_norm_params = { 24 | 'is_training': False, 'decay': batch_norm_decay, 25 | 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 26 | 'trainable': False, 27 | 'updates_collections': tf.GraphKeys.UPDATE_OPS 28 | } 29 | 30 | with slim.arg_scope( 31 | [slim.conv2d], 32 | weights_regularizer=slim.l2_regularizer(weight_decay), 33 | weights_initializer=slim.variance_scaling_initializer(), 34 | trainable=is_training, 35 | activation_fn=tf.nn.relu, 36 | normalizer_fn=slim.batch_norm, 37 | normalizer_params=batch_norm_params): 38 | with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc: 39 | return arg_sc 40 | 41 | def resnet_base(img_batch, scope_name, is_training=False): 42 | ''' 43 | this code is derived from light-head rcnn. 44 | https://github.com/zengarden/light_head_rcnn 45 | 46 | It is convenient to freeze blocks. So we adapt this mode. 47 | ''' 48 | if scope_name == 'resnet_v1_50': 49 | middle_num_units = 6 50 | elif scope_name == 'resnet_v1_101': 51 | middle_num_units = 23 52 | else: 53 | raise NotImplementedError('We only support resnet_v1_50 or resnet_v1_101. Check your network name....yjr') 54 | 55 | blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2), 56 | resnet_v1_block('block2', base_depth=128, num_units=4, stride=2), 57 | resnet_v1_block('block3', base_depth=256, num_units=9, stride=2), 58 | resnet_v1_block('block4', base_depth=512, num_units=3, stride=1)] 59 | # when use fpn . stride list is [1, 2, 2] 60 | 61 | with slim.arg_scope(resnet_arg_scope(is_training=False)): 62 | with tf.variable_scope(scope_name, scope_name): 63 | # Do the first few layers manually, because 'SAME' padding can behave inconsistently 64 | # for images of different sizes: sometimes 0, sometimes 1 65 | net = resnet_utils.conv2d_same( 66 | img_batch, 64, 7, stride=2, scope='conv1') 67 | net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]]) 68 | net = slim.max_pool2d( 69 | net, [3, 3], stride=2, padding='VALID', scope='pool1') 70 | 71 | not_freezed = [False] * 0 + (4-0)*[True] 72 | # Fixed_Blocks can be 1~3 73 | 74 | with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[0]))): 75 | C2, end_points_C2 = resnet_v1.resnet_v1(net, 76 | blocks[0:1], 77 | global_pool=False, 78 | include_root_block=False, 79 | scope=scope_name) 80 | #C2=tf.layers.average_pooling2d(inputs=C2, pool_size=3, strides=2,padding="valid") 81 | #C2=tf.reduce_mean(C2, axis=[1, 2], keep_dims=False, name='global_average_pooling') 82 | with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[1]))): 83 | C3, end_points_C3 = resnet_v1.resnet_v1(C2, 84 | blocks[1:2], 85 | global_pool=False, 86 | include_root_block=False, 87 | scope=scope_name) 88 | C3 = slim.avg_pool2d(C3, 2) 89 | #C3 = tf.reduce_mean(C3, axis=[1, 2], keep_dims=False, name='global_average_pooling') 90 | #return C3 91 | '''with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[2]))): 92 | C4, end_points_C4 = resnet_v1.resnet_v1(C3, 93 | blocks[2:3], 94 | global_pool=False, 95 | include_root_block=False, 96 | scope=scope_name)''' 97 | return C3 98 | 99 | # C2 = tf.Print(C2, [tf.shape(C2)], summarize=10, message='C2_shape') 100 | #add_heatmap(C2, name='Layer2/C2_heat') 101 | def get_restorer(): 102 | checkpoint_path = r"E:\FPN\data\pretrained_weights\resnet_v1_101.ckpt" 103 | print("model restore from pretrained mode, path is :", checkpoint_path) 104 | model_variables = slim.get_model_variables() 105 | 106 | 107 | def name_in_ckpt_rpn(var): 108 | return var.op.name 109 | 110 | def name_in_ckpt_fastrcnn_head(var): 111 | return '/'.join(var.op.name.split('/')[1:]) 112 | 113 | nameInCkpt_Var_dict = {} 114 | for var in model_variables: 115 | if var.name.startswith('resnet_v1_101'): 116 | var_name_in_ckpt = name_in_ckpt_rpn(var) 117 | nameInCkpt_Var_dict[var_name_in_ckpt] = var 118 | restore_variables = nameInCkpt_Var_dict 119 | for key, item in restore_variables.items(): 120 | print("var_in_graph: ", item.name) 121 | print("var_in_ckpt: ", key) 122 | print(20*"___") 123 | restorer = tf.train.Saver(restore_variables) 124 | print(20 * "****") 125 | print("restore from pretrained_weighs in IMAGE_NET") 126 | return restorer, checkpoint_path 127 | 128 | -------------------------------------------------------------------------------- /code/tensorflow-iris.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import tensorflow as tf 4 | import os 5 | 6 | # Dataset Parameters 7 | MODE = 'folder' # or 'file', if you choose a plain text file (see above). 8 | DATASET_PATH = 'MMU/' # the dataset file or root folder path. 9 | TEST_PATH = 'MMU2/' # the dataset file or root folder path. 10 | 11 | # Image Parameters 12 | N_CLASSES = 45 # total number of classes 13 | IMG_HEIGHT = 24 # the image height to be resized to 14 | IMG_WIDTH = 32 # the image width to be resized to 15 | CHANNELS = 3 # The 3 color channels, change to 1 if grayscale 16 | 17 | 18 | # Reading the dataset 19 | # 2 modes: 'file' or 'folder' 20 | def read_images(dataset_path, mode, batch_size): 21 | imagepaths, labels = list(), list() 22 | if mode == 'file': 23 | # Read dataset file 24 | data = open(dataset_path, 'r').read().splitlines() 25 | for d in data: 26 | imagepaths.append(d.split(' ')[0]) 27 | labels.append(int(d.split(' ')[1])) 28 | elif mode == 'folder': 29 | # An ID will be affected to each sub-folders by alphabetical order 30 | label = 0 31 | # List the directory 32 | classes = None 33 | print(dataset_path) 34 | for (path, dirs, files) in os.walk(dataset_path): 35 | print("Dosyalar Okunuyor") 36 | print(path) 37 | classes = sorted(dirs) 38 | break 39 | # classes = sorted(os.walk(dataset_path).__next__()[1]) 40 | # List each sub-directory (the classes) 41 | for c in classes: 42 | c_dir = os.path.join(dataset_path, c) 43 | walk = os.walk(c_dir).__next__() 44 | # Add each image to the training set 45 | for sample in walk[2]: 46 | # Only keeps jpeg images 47 | if sample.endswith('.bmp'): 48 | imagepaths.append(os.path.join(c_dir, sample)) 49 | labels.append(label) 50 | label += 1 51 | else: 52 | raise Exception("Unknown mode.") 53 | 54 | # Convert to Tensor 55 | imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string) 56 | labels = tf.convert_to_tensor(labels, dtype=tf.int32) 57 | # Build a TF Queue, shuffle data 58 | image, label = tf.train.slice_input_producer([imagepaths, labels], 59 | shuffle=True) 60 | 61 | # Read images from disk 62 | image = tf.read_file(image) 63 | image = tf.image.decode_bmp(image, channels=CHANNELS) 64 | 65 | # Resize images to a common size 66 | image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH]) 67 | # Normalize 68 | image = image * 1.0 / 127.5 - 1.0 69 | 70 | # Create batches 71 | X, Y = tf.train.batch([image, label], batch_size=batch_size, 72 | capacity=batch_size * 8, 73 | num_threads=1) 74 | 75 | return X, Y 76 | 77 | 78 | # Parameters 79 | learning_rate = 0.0010 80 | num_steps = 1000 81 | batch_size = 64 82 | display_step = 100 83 | 84 | # Network Parameters 85 | dropout = 0.75 # Dropout, probability to keep units 86 | 87 | # Build the data input 88 | X, Y = read_images(DATASET_PATH, MODE, batch_size) 89 | X2, Y2 = read_images(TEST_PATH, MODE, batch_size) 90 | 91 | 92 | # Create model 93 | def conv_net(x, n_classes, dropout, reuse, is_training): 94 | # Define a scope for reusing the variables 95 | with tf.variable_scope('ConvNet', reuse=reuse): 96 | # Convolution Layer with 24 filters and a kernel size of 5 97 | conv1 = tf.layers.conv2d(x, 24, 5, activation=tf.nn.relu) 98 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 99 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) 100 | 101 | # Convolution Layer with 48 filters and a kernel size of 3 102 | conv2 = tf.layers.conv2d(conv1, 48, 3, activation=tf.nn.relu) 103 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 104 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) 105 | 106 | # Flatten the data to a 1-D vector for the fully connected layer 107 | fc1 = tf.contrib.layers.flatten(conv2) 108 | 109 | # Fully connected layer (in contrib folder for now) 110 | fc1 = tf.layers.dense(fc1, 768) 111 | # Apply Dropout (if is_training is False, dropout is not applied) 112 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) 113 | 114 | # Output layer, class prediction 115 | out = tf.layers.dense(fc1, n_classes) 116 | # Because 'softmax_cross_entropy_with_logits' already apply softmax, 117 | # we only apply softmax to testing network 118 | out = tf.nn.softmax(out) if not is_training else out 119 | 120 | return out 121 | 122 | 123 | # Because Dropout have different behavior at training and prediction time, we 124 | # need to create 2 distinct computation graphs that share the same weights. 125 | 126 | # Create a graph for training 127 | logits_train = conv_net(X, N_CLASSES, dropout, reuse=False, is_training=True) 128 | # Create another graph for testing that reuse the same weights 129 | logits_test = conv_net(X2, N_CLASSES, dropout, reuse=True, is_training=False) 130 | 131 | # Define loss and optimizer (with train logits, for dropout to take effect) 132 | loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 133 | logits=logits_train, labels=Y)) 134 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 135 | train_op = optimizer.minimize(loss_op) 136 | 137 | # Evaluate model (with test logits, for dropout to be disabled) 138 | correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y2, tf.int64)) 139 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 140 | 141 | # Initialize the variables (i.e. assign their default value) 142 | init = tf.global_variables_initializer() 143 | 144 | # Saver object 145 | saver = tf.train.Saver() 146 | 147 | # Start training 148 | with tf.Session() as sess: 149 | # Run the initializer 150 | sess.run(init) 151 | 152 | # Start the data queue 153 | tf.train.start_queue_runners() 154 | 155 | # Training cycle 156 | for step in range(1, num_steps + 1): 157 | 158 | if step % display_step == 0: 159 | # Run optimization and calculate batch loss and accuracy 160 | _, loss, acc = sess.run([train_op, loss_op, accuracy]) 161 | print("Step " + str(step) + ", Minibatch Loss= " + \ 162 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 163 | "{:.3f}".format(acc)) 164 | else: 165 | # Only run the optimization op (backprop) 166 | sess.run(train_op) 167 | 168 | # lastacc = sess.run(accuracy) 169 | # print("Accuracy= {:.3f}".format(lastacc)) 170 | print("Optimization Finished!") 171 | 172 | # Save model 173 | # saver.save(sess, './my_tf_model') 174 | 175 | # Coordinate the loading of image files. 176 | coord = tf.train.Coordinator() 177 | threads = tf.train.start_queue_runners(coord=coord) 178 | 179 | # Finish off the filename queue coordinator. 180 | coord.request_stop() 181 | coord.join(threads) -------------------------------------------------------------------------------- /code/tradition/Evaluation.py: -------------------------------------------------------------------------------- 1 | from Matching import IrisMatching, IrisMatchingRed, IrisMatchingRed1,calcROC 2 | from tabulate import tabulate # 13min+ 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | 6 | thresholds_2 = [0.076, 0.085, 0.1] 7 | 8 | 9 | def table_CRR(train_features, train_classes, test_features, test_classes): 10 | thresholds = np.arange(0.04, 0.1, 0.003) 11 | L1_1, _, _ = IrisMatching(train_features, train_classes, test_features, test_classes, 1) 12 | print("L1:",(L1_1*100)) 13 | L2_1, _, _ = IrisMatching(train_features, train_classes, test_features, test_classes, 2) 14 | print("L2:",(L2_1*100)) 15 | C_1, distsm, distsn = IrisMatching(train_features, train_classes, test_features, test_classes, 3) 16 | print("Cos:",(C_1*100)) 17 | #L1_2, L2_2, C_2 = IrisMatchingRed(train_features, train_classes, test_features, test_classes, 280) 18 | print("Correct recognition rate (%)") 19 | print( 20 | tabulate([['L1 distance measure', L1_1 * 100], ['L2 distance measure', L2_1 * 100], 21 | ['Cosine similarity measure', C_1 * 100]], 22 | headers=['Similartiy measure', 'Original feature set'])) 23 | fmrs, fnmrs = calcROC(distsm, distsn, thresholds) 24 | plt.plot(fmrs, fnmrs) 25 | plt.xlabel('False Match Rate') 26 | plt.ylabel('False Non_match Rate') 27 | plt.title('ROC Curve') 28 | plt.savefig('D:/study/iris/fig/roc_curve1.png') 29 | plt.show() 30 | 31 | 32 | # table_CRR(train_features, train_classes, test_features, test_classes) 33 | 34 | def performance_evaluation(train_features, train_classes, test_features, test_classes): 35 | n = range(140, 441, 20) 36 | l1_crr=[] 37 | l2_crr=[] 38 | cos_crr = [] 39 | for i in range(len(n)): 40 | l1crr, l2crr, coscrr = IrisMatchingRed(train_features, train_classes, test_features, test_classes, n[i]) 41 | print(n[i],l1crr*100,l2crr*100,coscrr*100) 42 | l1_crr.append(l1crr * 100) 43 | l2_crr.append(l2crr * 100) 44 | cos_crr.append(coscrr * 100) 45 | print( 46 | tabulate([[n[0],l1_crr[0], l2_crr[0], cos_crr[0]], 47 | [n[1], l1_crr[1], l2_crr[1], cos_crr[1]], 48 | [n[2], l1_crr[2], l2_crr[2], cos_crr[2]], 49 | [n[3], l1_crr[3], l2_crr[3], cos_crr[3]], 50 | [n[4], l1_crr[4], l2_crr[4], cos_crr[4]], 51 | [n[5], l1_crr[5], l2_crr[5], cos_crr[5]], 52 | [n[6], l1_crr[6], l2_crr[6], cos_crr[6]], 53 | [n[7], l1_crr[7], l2_crr[7], cos_crr[7]], 54 | [n[8], l1_crr[8], l2_crr[8], cos_crr[8]], 55 | [n[9], l1_crr[9], l2_crr[9], cos_crr[9]], 56 | [n[10], l1_crr[10], l2_crr[10], cos_crr[10]], 57 | [n[11], l1_crr[11], l2_crr[11], cos_crr[11]], 58 | [n[12], l1_crr[12], l2_crr[12], cos_crr[12]], 59 | [n[13], l1_crr[13], l2_crr[13], cos_crr[13]], 60 | [n[14], l1_crr[14], l2_crr[14], cos_crr[14]], 61 | [n[15], l1_crr[15], l2_crr[15], cos_crr[15]]], 62 | headers=['Dimensionality of the feature vector', 'L1 distance measure', "L2 distance measure","Cosine similarity measure"])) 63 | plt.plot(n, l1_crr, marker="*", color='navy') 64 | plt.plot(n, l2_crr, marker="*", color='blue') 65 | plt.plot(n, cos_crr, marker="*", color='red') 66 | plt.xlabel('Dimensionality of the feature vector') 67 | plt.ylabel('Correct Recognition Rate') 68 | plt.savefig('D:/study/iris/fig/figure_reduce11.png') 69 | plt.show() 70 | 71 | def performance_evaluation1(train_features, train_classes, test_features, test_classes): 72 | n = range(140, 441, 20) 73 | l1_crr=[] 74 | l2_crr=[] 75 | cos_crr = [] 76 | for i in range(len(n)): 77 | l1crr = IrisMatchingRed1(train_features, train_classes, test_features, test_classes, n[i]) 78 | print(n[i],l1crr*100) 79 | l1_crr.append(l1crr * 100) 80 | print( 81 | tabulate([[n[0],l1_crr[0]], 82 | [n[1], l1_crr[1]], 83 | [n[2], l1_crr[2]], 84 | [n[3], l1_crr[3]], 85 | [n[4], l1_crr[4]], 86 | [n[5], l1_crr[5]], 87 | [n[6], l1_crr[6]], 88 | [n[7], l1_crr[7]], 89 | [n[8], l1_crr[8]], 90 | [n[9], l1_crr[9]], 91 | [n[10], l1_crr[10]], 92 | [n[11], l1_crr[11]], 93 | [n[12], l1_crr[12]], 94 | [n[13], l1_crr[13]], 95 | [n[14], l1_crr[14]], 96 | ["none", l1_crr[15]]], 97 | headers=['Dimensionality of the feature vector', 'SVM distance measure'])) 98 | plt.plot(n, l1_crr, marker="*", color='navy') 99 | plt.xlabel('Dimensionality of the feature vector') 100 | plt.ylabel('Correct Recognition Rate') 101 | plt.savefig('D:/study/iris/fig/svm_reduce12.png') 102 | plt.show() 103 | 104 | # performance_evaluation(train_features, train_classes, test_features, test_classes) 105 | 106 | 107 | def FM_FNM_table(fmrs_mean, fmrs_l, fmrs_u, fnmrs_mean, fnmrs_l, fnmrs_u, thresholds): 108 | print("False Match and False Nonmatch Rates with Different Threshold Values") 109 | print( 110 | tabulate([[thresholds[7], str(fmrs_mean[7]) + "[" + str(fmrs_l[7]) + "," + str(fmrs_u[7]) + "]", 111 | str(fnmrs_mean[7]) + "[" + str(fnmrs_l[7]) + "," + str(fnmrs_u[7]) + "]"], 112 | [thresholds[8], str(fmrs_mean[8]) + "[" + str(fmrs_l[8]) + "," + str(fmrs_u[8]) + "]", 113 | str(fnmrs_mean[8]) + "[" + str(fnmrs_l[8]) + "," + str(fnmrs_u[8]) + "]"], 114 | [thresholds[9], str(fmrs_mean[9]) + "[" + str(fmrs_l[9]) + "," + str(fmrs_u[9]) + "]", 115 | str(fnmrs_mean[9]) + "[" + str(fnmrs_l[9]) + "," + str(fnmrs_u[9]) + "]"]], 116 | headers=['Threshold', 'False match rate(%)', "False non-match rate(%)"])) 117 | 118 | 119 | # FM_FNM_table(train_features, train_classes, test_features, test_classes, thresholds_2) 120 | 121 | def FMR_conf(fmrs_mean, fmrs_l, fmrs_u, fnmrs_mean, fnmrs_l, fnmrs_u): 122 | plt.figure() 123 | lw = 2 124 | plt.plot(fmrs_mean, fnmrs_mean, color='navy', lw=lw, linestyle='-') 125 | plt.plot(fmrs_l, fnmrs_mean, color='navy', lw=lw, linestyle='--') 126 | plt.plot(fmrs_u, fnmrs_mean, color='navy', lw=lw, linestyle='--') 127 | plt.xlim([0.0, 60]) 128 | plt.ylim([0.0, 40]) 129 | plt.xlabel('False Match Rate(%)') 130 | plt.ylabel('False Non_match Rate(%)') 131 | plt.title('FMR Confidence Interval') 132 | plt.savefig('D:/study/iris/fig/figure_13_a.png') 133 | plt.show() 134 | 135 | 136 | def FNMR_conf(fmrs_mean, fmrs_l, fmrs_u, fnmrs_mean, fnmrs_l, fnmrs_u): 137 | plt.figure() 138 | lw = 2 139 | plt.plot(fmrs_mean, fnmrs_mean, color='navy', lw=lw, linestyle='-') 140 | plt.plot(fmrs_mean, fnmrs_l, color='navy', lw=lw, linestyle='--') 141 | plt.plot(fmrs_mean, fnmrs_u, color='navy', lw=lw, linestyle='--') 142 | plt.xlim([0.0, 100]) 143 | plt.ylim([0.0, 40]) 144 | plt.xlabel('False Match Rate(%)') 145 | plt.ylabel('False Non_match Rate(%)') 146 | plt.title('FNMR Confidence Interval') 147 | plt.savefig('D:/study/iris/fig/figure_13_b.png') 148 | plt.show() 149 | 150 | # FMR_conf(fmrs_mean,fmrs_l,fmrs_u,fnmrs_mean,fnmrs_l,fnmrs_u) 151 | -------------------------------------------------------------------------------- /code/script/scirpt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | 4 | import math 5 | import numpy as np 6 | import cv2 7 | from PIL import Image 8 | from skimage.filters.rank import equalize 9 | from skimage.morphology import disk 10 | from skimage.transform import hough_circle, hough_circle_peaks 11 | import pandas as pd 12 | 13 | def IrisLocalization1(eye): 14 | blured = cv2.bilateralFilter(eye, 9, 100, 100) 15 | Xp = blured.sum(axis=0).argmin() 16 | Yp = blured.sum(axis=1).argmin() 17 | x = blured[max(Yp - 60, 0):min(Yp + 60, 480), max(Xp - 60, 0):min(Xp + 60, 640)].sum(axis=0).argmin() 18 | y = blured[max(Yp - 60, 0):min(Yp + 60, 480), max(Xp - 60, 0):min(Xp + 60, 640)].sum(axis=1).argmin() 19 | Xp = max(Xp - 60, 0) + x 20 | Yp = max(Yp - 60, 0) + y 21 | if Xp >= 200 and Yp >= 160: 22 | blur = cv2.GaussianBlur(eye[Yp - 60:Yp + 60, Xp - 60:Xp + 60], (5, 5), 0) 23 | pupil_circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, dp=1.2, minDist=200, param1=200, param2=12, 24 | minRadius=15, maxRadius=80) 25 | xp, yp, rp = np.round(pupil_circles[0][0]).astype("int") 26 | xp = Xp - 60 + xp 27 | yp = Yp - 60 + yp 28 | else: 29 | pupil_circles = cv2.HoughCircles(blured, cv2.HOUGH_GRADIENT, 4, 480, minRadius=25, maxRadius=55, param2=51) 30 | xp, yp, rp = np.round(pupil_circles[0][0]).astype("int") 31 | eye_copy = eye.copy() 32 | rp = rp + 7 # slightly enlarge the pupil radius makes a better result 33 | blured_copy = cv2.medianBlur(eye_copy, 11) 34 | blured_copy = cv2.medianBlur(blured_copy, 11) 35 | blured_copy = cv2.medianBlur(blured_copy, 11) 36 | eye_edges = cv2.Canny(blured_copy, threshold1=15, threshold2=30, L2gradient=True) 37 | eye_edges[:, xp - rp - 30:xp + rp + 30] = 0 38 | 39 | hough_radii = np.arange(rp + 45, 150, 2) 40 | hough_res = hough_circle(eye_edges, hough_radii) 41 | accums, xi, yi, ri = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=1) 42 | iris = [] 43 | iris.extend(xi) 44 | iris.extend(yi) 45 | iris.extend(ri) 46 | if ((iris[0] - xp) ** 2 + (iris[1] - yp) ** 2) ** 0.5 > rp * 0.3: 47 | iris[0] = xp 48 | iris[1] = yp 49 | #cv2.imshow('blur',blured) 50 | cv2.circle(blured,(iris[0],iris[1]),iris[2],(0,0,255),1) 51 | 52 | cv2.circle(blured, (xp, yp), rp, (0, 255,0 ), 1) 53 | #cv2.imshow('circle1',blured) 54 | cv2.imshow('circle2',blured) 55 | cv2.waitKey(0) 56 | return np.array(iris), np.array([xp, yp, rp]) 57 | 58 | def IrisLocalization(eye): 59 | blured = cv2.bilateralFilter(eye, 9, 100, 100) 60 | Xp = blured.sum(axis=0).argmin() 61 | Yp = blured.sum(axis=1).argmin() 62 | x = blured[max(Yp - 120, 0):min(Yp + 120, 480), max(Xp - 120, 0):min(Xp + 120, 640)].sum(axis=0).argmin() 63 | y = blured[max(Yp - 120, 0):min(Yp + 120, 480), max(Xp - 120, 0):min(Xp + 120, 640)].sum(axis=1).argmin() 64 | Xp = max(Xp - 120, 0) + x 65 | Yp = max(Yp - 120, 0) + y 66 | if Xp >= 200 and Yp >= 160: 67 | blur = cv2.GaussianBlur(eye[Yp - 120:Yp + 120, Xp - 120:Xp + 120], (5, 5), 0) 68 | pupil_circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, dp=1.2, minDist=200, param1=210, param2=12, 69 | minRadius=10, maxRadius=80) 70 | xp, yp, rp = np.round(pupil_circles[0][0]).astype("int") 71 | xp = Xp - 120 + xp 72 | yp = Yp - 120 + yp 73 | else: 74 | pupil_circles = cv2.HoughCircles(blured, cv2.HOUGH_GRADIENT, 4, 200, minRadius=25, maxRadius=80, param2=25) 75 | xp, yp, rp = np.round(pupil_circles[0][0]).astype("int") 76 | eye_copy = eye.copy() 77 | rp = rp + 7 # slightly enlarge the pupil radius makes a better result 78 | blured_copy = cv2.medianBlur(eye_copy, 11) 79 | blured_copy = cv2.medianBlur(blured_copy, 11) 80 | blured_copy = cv2.medianBlur(blured_copy, 11) 81 | eye_edges = cv2.Canny(blured_copy, threshold1=15, threshold2=30, L2gradient=True) 82 | eye_edges[:, xp - rp - 60:xp + rp + 60] = 0 83 | 84 | hough_radii = np.arange(rp + 45, 300, 2) 85 | hough_res = hough_circle(eye_edges, hough_radii) 86 | accums, xi, yi, ri = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=1) 87 | iris = [] 88 | iris.extend(xi) 89 | iris.extend(yi) 90 | iris.extend(ri) 91 | if ((iris[0] - xp) ** 2 + (iris[1] - yp) ** 2) ** 0.5 > rp * 0.3: 92 | iris[0] = xp 93 | iris[1] = yp 94 | #cv2.imshow('blur',blured) 95 | cv2.circle(blured,(iris[0],iris[1]),iris[2],(0,0,255),1) 96 | 97 | cv2.circle(blured, (xp, yp), rp, (0, 255,0 ), 1) 98 | #cv2.imshow('circle1',blured) 99 | cv2.imshow('circle2',blured) 100 | cv2.waitKey(0) 101 | return np.array(iris), np.array([xp, yp, rp]) 102 | 103 | 104 | def IrisNormalization(image, inner_circle, outer_circle): 105 | localized_img = image 106 | row = 64 107 | col = 512 108 | normalized_iris = np.zeros(shape=(64, 512)) 109 | inner_y = inner_circle[0] # height 110 | inner_x = inner_circle[1] # width 111 | outer_y = outer_circle[0] 112 | outer_x = outer_circle[1] 113 | angle = 2.0 * math.pi / col 114 | inner_boundary_x = np.zeros(shape=(1, col)) 115 | inner_boundary_y = np.zeros(shape=(1, col)) 116 | outer_boundary_x = np.zeros(shape=(1, col)) 117 | outer_boundary_y = np.zeros(shape=(1, col)) 118 | for j in range(col): 119 | inner_boundary_x[0][j] = inner_circle[0] + inner_circle[2] * math.cos(angle * (j)) 120 | inner_boundary_y[0][j] = inner_circle[1] + inner_circle[2] * math.sin(angle * (j)) 121 | 122 | outer_boundary_x[0][j] = outer_circle[0] + outer_circle[2] * math.cos(angle * (j)) 123 | outer_boundary_y[0][j] = outer_circle[1] + outer_circle[2] * math.sin(angle * (j)) 124 | 125 | for j in range(512): 126 | for i in range(64): 127 | normalized_iris[i][j] = localized_img[min(int(int(inner_boundary_y[0][j]) 128 | + (int(outer_boundary_y[0][j]) - int( 129 | inner_boundary_y[0][j])) * (i / 64.0)), localized_img.shape[0] - 1)][min(int(int(inner_boundary_x[0][j]) 130 | + (int( 131 | outer_boundary_x[0][j]) - int(inner_boundary_x[0][j])) 132 | * (i / 64.0)), 133 | localized_img.shape[1] - 1)] 134 | 135 | res_image = 255 - normalized_iris 136 | return res_image 137 | 138 | def MatrixToImage(data): 139 | data = data*255 140 | new_im = Image.fromarray(data.astype(np.uint8)) 141 | return new_im 142 | 143 | def ImageEnhancement(normalized_iris): 144 | row = 64 145 | col = 512 146 | normalized_iris = normalized_iris.astype(np.uint8) 147 | 148 | enhanced_image = normalized_iris 149 | 150 | enhanced_image = equalize(enhanced_image, disk(32)) 151 | 152 | roi = enhanced_image[0:48, :] 153 | return roi 154 | 155 | if __name__=="__main__": 156 | train = pd.read_csv("D:/study/iris/csv/train.csv") 157 | train_img_list = train["img"] 158 | for i in range(23,len(train_img_list)): 159 | eye = train_img_list[i] 160 | print(i) 161 | img = cv2.imread(eye, 0) 162 | # a,b=IrisLocalization(img) 163 | a1, b1 = IrisLocalization(img) -------------------------------------------------------------------------------- /code/tradition/iris_demo2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | 4 | from Gabor import FeatureExtraction 5 | from sklearn.neighbors import KNeighborsClassifier 6 | from sklearn.manifold import LocallyLinearEmbedding 7 | from sklearn.svm import SVC 8 | from sklearn.tree import DecisionTreeClassifier 9 | from sklearn.ensemble import RandomForestClassifier 10 | from sklearn.decomposition import PCA 11 | from sklearn.pipeline import make_pipeline 12 | from sklearn.ensemble import AdaBoostClassifier 13 | from sklearn.preprocessing import OneHotEncoder 14 | from cnn_feature import * 15 | import Evaluation as PE 16 | from tabulate import tabulate # 13min+ 17 | import matplotlib.pyplot as plt 18 | train = pd.read_csv("D:/study/iris/csv/train.csv") 19 | test = pd.read_csv("D:/study/iris/csv/test.csv") 20 | 21 | train_img_list = train["img"] 22 | train_label_list = train["label"] 23 | test_img_list = test["img"] 24 | test_label_list = test["label"] 25 | train_size = len(train_img_list) 26 | test_size = len(test_img_list) 27 | 28 | #train_features = np.zeros((train_size,-1)) 29 | train_classes = np.zeros(train_size, dtype = np.uint8) 30 | #test_features = np.zeros((test_size,-1)) 31 | test_classes = np.zeros(test_size, dtype = np.uint8) 32 | 33 | #starttime = datetime.datetime.now() 34 | dir1 = r"D:\study\iris\CASIA\enhance\train" 35 | dir2 = r"D:\study\iris\CASIA\enhance\test" 36 | train=[] 37 | test=[] 38 | for i in range(train_size): 39 | train_path = os.path.join(dir1, train_img_list[i].split("\\")[-1]) 40 | img = cv2.imread(train_path, 0) 41 | train.append(FeatureExtraction(img)) 42 | print("train:", i) 43 | train_classes[i] = train_label_list[i] 44 | train_features = np.array(train) 45 | print(train_features.shape) 46 | 47 | for j in range(test_size): 48 | test_path = os.path.join(dir2, test_img_list[j].split("\\")[-1]) 49 | img = cv2.imread(test_path, 0) 50 | test.append(FeatureExtraction(img)) 51 | print("test:",j) 52 | test_classes[j] = test_label_list[j] 53 | test_features = np.array(test) 54 | print(test_features.shape) 55 | #endtime = datetime.datetime.now() 56 | def SVM(train_features, train_classes, test_features, test_classes): 57 | train_redfeatures = train_features.copy() 58 | test_redfeatures = test_features.copy() 59 | total = float(len(test_classes)) 60 | 61 | pca = PCA(n_components=380, whiten=True, random_state=42) 62 | svc = SVC(kernel='rbf', C=5, gamma=0.001) 63 | 64 | svm = make_pipeline(pca, svc) 65 | svm.fit(train_redfeatures, train_classes) 66 | svmclasses = svm.predict(test_redfeatures) 67 | svmcrr = float(np.sum(svmclasses == test_classes)) / total 68 | return svmcrr 69 | def KNN(train_features, train_classes, test_features, test_classes): 70 | train_redfeatures = train_features.copy() 71 | test_redfeatures = test_features.copy() 72 | total = float(len(test_classes)) 73 | 74 | pca = PCA(n_components=380, whiten=True, random_state=42) 75 | cosknn1 = KNeighborsClassifier(n_neighbors=1, metric='cosine') 76 | cosknn=make_pipeline(pca, cosknn1) 77 | 78 | cosknn.fit(train_redfeatures, train_classes) 79 | cosclasses = cosknn.predict(test_redfeatures) 80 | coscrr = float(np.sum(cosclasses == test_classes)) / total 81 | return coscrr 82 | 83 | def SVM1(train_features, train_classes, test_features, test_classes): 84 | train_redfeatures = train_features.copy() 85 | test_redfeatures = test_features.copy() 86 | total = float(len(test_classes)) 87 | 88 | #lle = LocallyLinearEmbedding(n_neighbors=200 + 1, n_components=200) 89 | #lle.fit(train_features) 90 | #train_redfeatures = lle.transform(train_redfeatures) 91 | #test_redfeatures = lle.transform(test_redfeatures) 92 | #print("finish reduce dim!") 93 | n = range(220, 501, 20) 94 | l1_crr = [] 95 | for i in range(len(n)): 96 | pca = PCA(n_components=n[i], whiten=True, random_state=42) 97 | svc = SVC(kernel='rbf', C=5, gamma=0.001) 98 | 99 | svm = make_pipeline(pca, svc) 100 | svm.fit(train_redfeatures, train_classes) 101 | svmclasses = svm.predict(test_redfeatures) 102 | svmcrr = float(np.sum(svmclasses == test_classes)) / total 103 | print("svm",n[i],svmcrr) 104 | l1_crr.append(svmcrr) 105 | print( 106 | tabulate([[n[0], l1_crr[0]], 107 | [n[1], l1_crr[1]], 108 | [n[2], l1_crr[2]], 109 | [n[3], l1_crr[3]], 110 | [n[4], l1_crr[4]], 111 | [n[5], l1_crr[5]], 112 | [n[6], l1_crr[6]], 113 | [n[7], l1_crr[7]], 114 | [n[8], l1_crr[8]], 115 | [n[9], l1_crr[9]], 116 | [n[10], l1_crr[10]], 117 | [n[11], l1_crr[11]], 118 | [n[12], l1_crr[12]], 119 | [n[13], l1_crr[13]], 120 | [n[14], l1_crr[14]]], 121 | # [n[15], l1_crr[15]]], 122 | headers=['Dimensionality of the feature vector', 'SVM distance measure'])) 123 | plt.plot(n, l1_crr, marker="*", color='navy') 124 | plt.xlabel('Dimensionality of the feature vector') 125 | plt.ylabel('Correct Recognition Rate') 126 | plt.savefig('D:/study/iris/fig/svm_gabor.png') 127 | plt.show() 128 | 129 | def KNN1(train_features, train_classes, test_features, test_classes): 130 | train_redfeatures = train_features.copy() 131 | test_redfeatures = test_features.copy() 132 | total = float(len(test_classes)) 133 | 134 | n = range(220, 501, 20) 135 | l1_crr = [] 136 | for i in range(len(n)): 137 | pca = PCA(n_components=n[i], whiten=True, random_state=42) 138 | cosknn1 = KNeighborsClassifier(n_neighbors=1, metric='cosine') 139 | 140 | cosknn = make_pipeline(pca,cosknn1) 141 | cosknn.fit(train_redfeatures, train_classes) 142 | knnclasses = cosknn.predict(test_redfeatures) 143 | knncrr = float(np.sum(knnclasses == test_classes)) / total 144 | print("knn:",n[i], knncrr) 145 | l1_crr.append(knncrr) 146 | print( 147 | tabulate([[n[0], l1_crr[0]], 148 | [n[1], l1_crr[1]], 149 | [n[2], l1_crr[2]], 150 | [n[3], l1_crr[3]], 151 | [n[4], l1_crr[4]], 152 | [n[5], l1_crr[5]], 153 | [n[6], l1_crr[6]], 154 | [n[7], l1_crr[7]], 155 | [n[8], l1_crr[8]], 156 | [n[9], l1_crr[9]], 157 | [n[10], l1_crr[10]], 158 | [n[11], l1_crr[11]], 159 | [n[12], l1_crr[12]], 160 | [n[13], l1_crr[13]], 161 | [n[14], l1_crr[14]]], 162 | # [n[15], l1_crr[15]]], 163 | headers=['Dimensionality of the feature vector', 'KNN distance measure'])) 164 | plt.plot(n, l1_crr, marker="*", color='navy') 165 | plt.xlabel('Dimensionality of the feature vector') 166 | plt.ylabel('Correct Recognition Rate') 167 | plt.savefig('D:/study/iris/fig/knn_gabor.png') 168 | plt.show() 169 | 170 | #return l1_crr 171 | #print('image processing and feature extraction takes '+str((endtime-starttime).seconds)+' seconds' 172 | if __name__=="__main__": 173 | #PE.table_CRR(train_features, train_classes, test_features, test_classes) 174 | #print("KNN:",KNN(train_features, train_classes, test_features, test_classes)) 175 | #print("SVM:",SVM(train_features, train_classes, test_features, test_classes)) 176 | #SVM1(train_features, train_classes, test_features, test_classes) 177 | #KNN1(train_features, train_classes, test_features, test_classes) 178 | PE.table_CRR(train_features, train_classes, test_features, test_classes) 179 | #thresholds_2=[0.74,0.76,0.78] -------------------------------------------------------------------------------- /code/CNN_feature/iris_demo1.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis 4 | from sklearn.neighbors import KNeighborsClassifier 5 | from sklearn.manifold import LocallyLinearEmbedding 6 | from sklearn.svm import SVC 7 | from sklearn.tree import DecisionTreeClassifier 8 | from sklearn.ensemble import RandomForestClassifier 9 | from sklearn.decomposition import PCA 10 | from sklearn.pipeline import make_pipeline 11 | from sklearn.ensemble import AdaBoostClassifier 12 | from sklearn.preprocessing import OneHotEncoder 13 | from cnn_feature import * 14 | from tabulate import tabulate # 13min+ 15 | import matplotlib.pyplot as plt 16 | 17 | train = pd.read_csv("D:/study/iris/csv/train.csv") 18 | test = pd.read_csv("D:/study/iris/csv/test.csv") 19 | train_vector = pd.read_csv("D:/study/iris/csv/train_vector1.csv") 20 | test_vector = pd.read_csv("D:/study/iris/csv/test_vector1.csv") 21 | 22 | train_img_list = train["img"] 23 | train_label_list = train["label"] 24 | test_img_list = test["img"] 25 | test_label_list = test["label"] 26 | train_size = len(train_img_list) 27 | test_size = len(test_img_list) 28 | 29 | train_features = np.array(train_vector) 30 | train_classes = train_label_list 31 | test_features = np.array(test_vector) 32 | test_classes = test_label_list 33 | 34 | def onehot(train_classes): 35 | train_classes = np.array(train_classes).reshape(-1, 1) 36 | enc = OneHotEncoder(categories='auto') 37 | enc.fit(train_classes) 38 | 39 | # one-hot编码的结果是比较奇怪的,最好是先转换成二维数组 40 | tempdata = enc.transform(train_classes).toarray() 41 | return tempdata 42 | 43 | def SVM(train_features, train_classes, test_features, test_classes): 44 | train_redfeatures = train_features.copy() 45 | test_redfeatures = test_features.copy() 46 | total = float(len(test_classes)) 47 | 48 | pca = PCA(n_components=680, whiten=True, random_state=42) 49 | #svc = SVC(kernel='rbf', C=8, gamma=0.001) 50 | svc = KNeighborsClassifier(n_neighbors=1,metric="cosine") 51 | svm = make_pipeline(pca, svc) 52 | svm.fit(train_redfeatures, train_classes) 53 | svmclasses = svm.predict(test_redfeatures) 54 | svmcrr = float(np.sum(svmclasses == test_classes)) / total 55 | return (svmcrr*100) 56 | 57 | def SVM1(train_features, train_classes, test_features, test_classes): 58 | train_redfeatures = train_features.copy() 59 | test_redfeatures = test_features.copy() 60 | total = float(len(test_classes)) 61 | 62 | #lle = LocallyLinearEmbedding(n_neighbors=200 + 1, n_components=200) 63 | #lle.fit(train_features) 64 | #train_redfeatures = lle.transform(train_redfeatures) 65 | #test_redfeatures = lle.transform(test_redfeatures) 66 | #print("finish reduce dim!") 67 | n = range(220, 601, 20) 68 | l1_crr = [] 69 | for i in range(len(n)): 70 | pca = PCA(n_components=n[i], whiten=True, random_state=42) 71 | svc = SVC(kernel='rbf', C=5, gamma=0.001) 72 | 73 | svm = make_pipeline(pca, svc) 74 | svm.fit(train_redfeatures, train_classes) 75 | svmclasses = svm.predict(test_redfeatures) 76 | svmcrr = float(np.sum(svmclasses == test_classes)) / total 77 | print(n[i],svmcrr) 78 | l1_crr.append(svmcrr) 79 | print( 80 | tabulate([[n[0], l1_crr[0]], 81 | [n[1], l1_crr[1]], 82 | [n[2], l1_crr[2]], 83 | [n[3], l1_crr[3]], 84 | [n[4], l1_crr[4]], 85 | [n[5], l1_crr[5]], 86 | [n[6], l1_crr[6]], 87 | [n[7], l1_crr[7]], 88 | [n[8], l1_crr[8]], 89 | [n[9], l1_crr[9]], 90 | [n[10], l1_crr[10]], 91 | [n[11], l1_crr[11]], 92 | [n[12], l1_crr[12]], 93 | [n[13], l1_crr[13]], 94 | [n[14], l1_crr[14]]], 95 | # [n[15], l1_crr[15]]], 96 | headers=['Dimensionality of the feature vector', 'SVM distance measure'])) 97 | plt.plot(n, l1_crr, marker="*", color='navy') 98 | plt.xlabel('Dimensionality of the feature vector') 99 | plt.ylabel('Correct Recognition Rate') 100 | plt.savefig('D:/study/iris/fig/svm_dense_layer2_12.png') 101 | plt.show() 102 | 103 | #return l1_crr 104 | 105 | def KNN(train_features, train_classes, test_features, test_classes): 106 | train_redfeatures = train_features.copy() 107 | test_redfeatures = test_features.copy() 108 | total = float(len(test_classes)) 109 | 110 | pca = PCA(n_components=150, whiten=True, random_state=42) 111 | cosknn1 = KNeighborsClassifier(n_neighbors=1, metric='cosine') 112 | cosknn=make_pipeline(pca, cosknn1) 113 | 114 | cosknn.fit(train_redfeatures, train_classes) 115 | cosclasses = cosknn.predict(test_redfeatures) 116 | coscrr = float(np.sum(cosclasses == test_classes)) / total 117 | return (coscrr*100) 118 | 119 | 120 | def DTC(train_features, train_classes, test_features, test_classes): 121 | train_redfeatures = train_features.copy() 122 | test_redfeatures = test_features.copy() 123 | total = float(len(test_classes)) 124 | train_classes=onehot(train_classes) 125 | pca = PCA(n_components=160, whiten=True, random_state=42) 126 | dtc = DecisionTreeClassifier(max_depth=19,min_samples_leaf=12,min_samples_split=10) 127 | model = make_pipeline(pca, dtc) 128 | 129 | model.fit(train_redfeatures, train_classes) 130 | classes = model.predict(test_redfeatures) 131 | crr = float(np.sum(classes == test_classes)) / total 132 | #crr = model.score(test_redfeatures,test_classes) 133 | return crr 134 | 135 | def RF(train_features, train_classes, test_features, test_classes): 136 | train_redfeatures = train_features.copy() 137 | test_redfeatures = test_features.copy() 138 | total = float(len(test_classes)) 139 | 140 | #pca = PCA(n_components=160, whiten=True, random_state=42) 141 | model = RandomForestClassifier(n_estimators=800, max_features=250, max_depth=15, min_samples_split=120, 142 | min_samples_leaf=25 ,oob_score=True, random_state=10) 143 | #model = make_pipeline(pca, rf) 144 | 145 | model.fit(train_redfeatures, train_classes) 146 | classes = model.predict(test_redfeatures) 147 | crr = float(np.sum(classes == test_classes)) / total 148 | crr = model.score(test_redfeatures,test_classes) 149 | return crr 150 | 151 | def Ada(train_features, train_classes, test_features, test_classes): 152 | train_redfeatures = train_features.copy() 153 | test_redfeatures = test_features.copy() 154 | total = float(len(test_classes)) 155 | 156 | pca = PCA(n_components=160, whiten=True, random_state=42) 157 | ada = AdaBoostClassifier(DecisionTreeClassifier(max_depth=10), 158 | algorithm="SAMME.R",#可以不写 159 | n_estimators=200) 160 | model = make_pipeline(pca, ada) 161 | 162 | model.fit(train_redfeatures, train_classes) 163 | classes = model.predict(test_redfeatures) 164 | crr = float(np.sum(classes == test_classes)) / total 165 | #crr = model.score(test_redfeatures,test_classes) 166 | return crr 167 | if __name__=="__main__": 168 | #PE.table_CRR(train_features, train_classes, test_features, test_classes) 169 | #PE.performance_evaluation1(train_features, train_classes, test_features, test_classes) 170 | init_tf() 171 | #init_tf_dense() 172 | #inceptionv4_init() 173 | train_features1 = ex_train() 174 | test_features1 = ex_test() 175 | print("train fe",train_features1.shape) 176 | print("train label",train_classes.shape) 177 | print("test fe",test_features1.shape) 178 | print("test label",test_classes.shape) 179 | print(SVM(train_features1, train_classes, test_features1, test_classes)) 180 | #SVM1(train_features1, train_classes, test_features1, test_classes) 181 | # print(DTC(train_features, train_classes, test_features, test_classes)) 182 | -------------------------------------------------------------------------------- /code/tradition/Matching.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | 4 | 5 | from scipy.spatial import distance 6 | import random 7 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis 8 | from sklearn.neighbors import KNeighborsClassifier 9 | from sklearn.manifold import LocallyLinearEmbedding 10 | from sklearn.svm import SVC 11 | import numpy as np 12 | 13 | 14 | 15 | def selectTestSample(test_features, test_classes): 16 | index = random.sample(range(len(test_classes)), 108) 17 | sample_features = np.array([test_features[i, :] for i in index]) 18 | sample_classes = np.array([test_classes[i] for i in index]) 19 | return sample_features, sample_classes 20 | 21 | 22 | def CalcTest(train_features, train_classes, test_sample, test_class, dist): 23 | dists = np.zeros(len(train_classes)) 24 | distsm = [] 25 | distsn = [] 26 | offset = np.array([-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10]) 27 | for i in range(len(train_classes)): 28 | if dist == 1: 29 | distoff = np.ones(11) 30 | for j in range(len(offset)): 31 | distoff[j] = distance.cityblock(train_features[i, :], np.roll(test_sample, offset[j])) 32 | dists[i] = np.min(distoff) 33 | # dists[i] = distance.cityblock(train_features[i,:],test_sample) 34 | if train_classes[i] == test_class: 35 | distsm.append(dists[i]) 36 | else: 37 | distsn.append(dists[i]) 38 | if dist == 2: 39 | distoff = np.ones(11) 40 | for j in range(len(offset)): 41 | distoff[j] = distance.euclidean(train_features[i, :], np.roll(test_sample, offset[j])) 42 | dists[i] = np.min(distoff) 43 | if train_classes[i] == test_class: 44 | distsm.append(dists[i]) 45 | else: 46 | distsn.append(dists[i]) 47 | if dist == 3: 48 | distoff = np.ones(11) 49 | for j in range(len(offset)): 50 | distoff[j] = distance.cosine(train_features[i, :], np.roll(test_sample, offset[j])) 51 | dists[i] = np.min(distoff) 52 | # dists[i] = distance.cosine(train_features[i,:],test_sample) 53 | if train_classes[i] == test_class: 54 | distsm.append(dists[i]) 55 | else: 56 | distsn.append(dists[i]) 57 | sample_class = train_classes[np.argmin(dists)] 58 | return sample_class, distsm, distsn 59 | 60 | 61 | def IrisMatching(train_features, train_classes, test_features, test_classes, dist): 62 | total = float(len(test_classes)) 63 | num = 0.0 64 | distancesm = [] 65 | distancesn = [] 66 | 67 | for i in range(len(test_classes)): 68 | test_class, distsm, distsn = CalcTest(train_features, train_classes, test_features[i, :], test_classes[i], dist) 69 | distancesm.extend(distsm) 70 | distancesn.extend(distsn) 71 | if test_class == test_classes[i]: 72 | num += 1.0 73 | crr = num / total 74 | 75 | return crr, distancesm, distancesn 76 | 77 | 78 | def IrisMatchingRed(train_features, train_classes, test_features, test_classes, n): 79 | train_redfeatures = train_features.copy() 80 | test_redfeatures = test_features.copy() 81 | total = float(len(test_classes)) 82 | if n < 108: 83 | lda = LinearDiscriminantAnalysis(n_components=n) 84 | lda.fit(train_features, train_classes) 85 | train_redfeatures = lda.transform(train_features) 86 | test_redfeatures = lda.transform(test_features) 87 | if n >= 108 and n < 323: 88 | lle = LocallyLinearEmbedding(n_neighbors=n + 1, n_components=n) 89 | lle.fit(train_features) 90 | train_redfeatures = lle.transform(train_features) 91 | test_redfeatures = lle.transform(test_features) 92 | 93 | l1knn = KNeighborsClassifier(n_neighbors=1, metric='l1') 94 | l1knn.fit(train_redfeatures, train_classes) 95 | l1classes = l1knn.predict(test_redfeatures) 96 | l1crr = float(np.sum(l1classes == test_classes)) / total 97 | 98 | l2knn = KNeighborsClassifier(n_neighbors=1, metric='l2') 99 | l2knn.fit(train_redfeatures, train_classes) 100 | l2classes = l2knn.predict(test_redfeatures) 101 | l2crr = float(np.sum(l2classes == test_classes)) / total 102 | 103 | cosknn = KNeighborsClassifier(n_neighbors=1, metric='cosine') 104 | cosknn.fit(train_redfeatures, train_classes) 105 | cosclasses = cosknn.predict(test_redfeatures) 106 | coscrr = float(np.sum(cosclasses == test_classes)) / total 107 | # table_CRR() 108 | return l1crr, l2crr, coscrr 109 | 110 | def IrisMatchingRed1(train_features, train_classes, test_features, test_classes, n): 111 | train_redfeatures = train_features.copy() 112 | test_redfeatures = test_features.copy() 113 | total = float(len(test_classes)) 114 | if n < 108: 115 | lda = LinearDiscriminantAnalysis(n_components=n) 116 | lda.fit(train_features, train_classes) 117 | train_redfeatures = lda.transform(train_features) 118 | test_redfeatures = lda.transform(test_features) 119 | if n >= 108 and n < 323: 120 | lle = LocallyLinearEmbedding(n_neighbors=n + 1, n_components=n) 121 | lle.fit(train_features) 122 | train_redfeatures = lle.transform(train_features) 123 | test_redfeatures = lle.transform(test_features) 124 | 125 | model=SVC(kernel='rbf') 126 | model.fit(train_redfeatures,train_classes) 127 | modelclasses=model.predict(test_redfeatures) 128 | modelcrr=float(np.sum(modelclasses == test_classes)) / total 129 | return modelcrr 130 | 131 | def IrisMatchingBootstrap(train_features, train_classes, test_features, test_classes, times, thresholds): 132 | total_fmrs = [] 133 | total_fnmrs = [] 134 | total_crr = np.zeros(times) 135 | lle = LocallyLinearEmbedding(n_neighbors=201, n_components=200) 136 | lle.fit(train_features) 137 | train_redfeatures = lle.transform(train_features) 138 | test_redfeatures = lle.transform(test_features) 139 | for t in range(times): 140 | tests_features, tests_classes = selectTestSample(test_redfeatures, test_classes) 141 | crr, distm, distn = IrisMatching(train_redfeatures, train_classes, tests_features, tests_classes, 3) 142 | fmrs, fnmrs = calcROC(distm, distn, thresholds) 143 | total_fmrs.append(fmrs) 144 | total_fnmrs.append(fnmrs) 145 | total_crr[t] = crr 146 | total_fmrs = np.array(total_fmrs) 147 | total_fnmrs = np.array(total_fnmrs) 148 | crr_mean = np.mean(total_crr) 149 | crr_std = np.std(total_crr) 150 | crr_u = min(crr_mean + crr_std * 1.96, 1) 151 | crr_l = crr_mean - crr_std * 1.96 152 | return total_fmrs, total_fnmrs, crr_mean, crr_u, crr_l 153 | 154 | 155 | def calcROCBootstrap(fmrs, fnmrs): 156 | fmrs_mean = np.mean(fmrs, axis=0) 157 | fmrs_l = np.percentile(fmrs, 5, axis=0) 158 | fmrs_u = np.percentile(fmrs, 95, axis=0) 159 | 160 | fnmrs_mean = np.mean(fnmrs, axis=0) 161 | fnmrs_l = np.percentile(fnmrs, 5, axis=0) 162 | fnmrs_u = np.percentile(fnmrs, 95, axis=0) 163 | 164 | return fmrs_mean, fmrs_l, fmrs_u, fnmrs_mean, fnmrs_l, fnmrs_u 165 | 166 | 167 | def calcROC(distancesm, distancesn, thresholds): 168 | distancesm = np.array(distancesm) 169 | distancesn = np.array(distancesn) 170 | numm = float(len(distancesm)) 171 | numn = float(len(distancesn)) 172 | # thresholds = [0.04,0.043,0.046,0.049,0.052,0.055,0.058,0.061,0.064,0.067,0.07,0.073,0.076,0.079,0.082,0.085,0.088,0.091,0.094,0.097,0.1,0.103,0.106,0.109] 173 | fmrs = [] 174 | fnmrs = [] 175 | 176 | for t in thresholds: 177 | fm = 0.0 178 | fnm = 0.0 179 | for dm in distancesm: 180 | if dm > t: 181 | fnm += 1.0 182 | for dn in distancesn: 183 | if dn < t: 184 | fm += 1.0 185 | 186 | fnmr = fnm / numm 187 | fmr = fm / numn 188 | 189 | fmrs.append(fmr) 190 | fnmrs.append(fnmr) 191 | # fmr_fnmr(fmr,fnmr) 192 | return fmrs, fnmrs # two list -------------------------------------------------------------------------------- /code/CNN_feature/DenseNet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 pudae. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Contains the definition of the DenseNet architecture. 16 | As described in https://arxiv.org/abs/1608.06993. 17 | Densely Connected Convolutional Networks 18 | Gao Huang, Zhuang Liu, Kilian Q. Weinberger, Laurens van der Maaten 19 | """ 20 | from __future__ import absolute_import 21 | from __future__ import division 22 | from __future__ import print_function 23 | 24 | import tensorflow as tf 25 | 26 | slim = tf.contrib.slim 27 | 28 | 29 | @slim.add_arg_scope 30 | def _global_avg_pool2d(inputs, data_format='NHWC', scope=None, outputs_collections=None): 31 | with tf.variable_scope(scope, 'xx', [inputs]) as sc: 32 | axis = [1, 2] if data_format == 'NHWC' else [2, 3] 33 | net = tf.reduce_mean(inputs, axis=axis, keep_dims=True) 34 | net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) 35 | return net 36 | 37 | 38 | @slim.add_arg_scope 39 | def _conv(inputs, num_filters, kernel_size, stride=1, dropout_rate=None, 40 | scope=None, outputs_collections=None): 41 | with tf.variable_scope(scope, 'xx', [inputs]) as sc: 42 | net = slim.batch_norm(inputs) 43 | net = tf.nn.relu(net) 44 | net = slim.conv2d(net, num_filters, kernel_size) 45 | 46 | if dropout_rate: 47 | net = tf.nn.dropout(net) 48 | 49 | net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) 50 | 51 | return net 52 | 53 | 54 | @slim.add_arg_scope 55 | def _conv_block(inputs, num_filters, data_format='NHWC', scope=None, outputs_collections=None): 56 | with tf.variable_scope(scope, 'conv_blockx', [inputs]) as sc: 57 | net = inputs 58 | net = _conv(net, num_filters*4, 1, scope='x1') 59 | net = _conv(net, num_filters, 3, scope='x2') 60 | if data_format == 'NHWC': 61 | net = tf.concat([inputs, net], axis=3) 62 | else: # "NCHW" 63 | net = tf.concat([inputs, net], axis=1) 64 | 65 | net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) 66 | 67 | return net 68 | 69 | 70 | @slim.add_arg_scope 71 | def _dense_block(inputs, num_layers, num_filters, growth_rate, 72 | grow_num_filters=True, scope=None, outputs_collections=None): 73 | 74 | with tf.variable_scope(scope, 'dense_blockx', [inputs]) as sc: 75 | net = inputs 76 | for i in range(num_layers): 77 | branch = i + 1 78 | net = _conv_block(net, growth_rate, scope='conv_block'+str(branch)) 79 | 80 | if grow_num_filters: 81 | num_filters += growth_rate 82 | 83 | net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) 84 | 85 | return net, num_filters 86 | 87 | 88 | @slim.add_arg_scope 89 | def _transition_block(inputs, num_filters, compression=1.0, 90 | scope=None, outputs_collections=None): 91 | 92 | num_filters = int(num_filters * compression) 93 | with tf.variable_scope(scope, 'transition_blockx', [inputs]) as sc: 94 | net = inputs 95 | net = _conv(net, num_filters, 1, scope='blk') 96 | 97 | net = slim.avg_pool2d(net, 2) 98 | 99 | net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) 100 | 101 | return net, num_filters 102 | 103 | 104 | def densenet(inputs, 105 | num_classes=1000, 106 | reduction=None, 107 | growth_rate=None, 108 | num_filters=None, 109 | num_layers=None, 110 | dropout_rate=None, 111 | data_format='NHWC', 112 | is_training=True, 113 | reuse=None, 114 | scope=None): 115 | assert reduction is not None 116 | assert growth_rate is not None 117 | assert num_filters is not None 118 | assert num_layers is not None 119 | 120 | compression = 1.0 - reduction 121 | num_dense_blocks = len(num_layers) 122 | 123 | if data_format == 'NCHW': 124 | inputs = tf.transpose(inputs, [0, 3, 1, 2]) 125 | 126 | with tf.variable_scope(scope, 'densenetxxx', [inputs, num_classes], 127 | reuse=reuse) as sc: 128 | end_points_collection = sc.name + '_end_points' 129 | with slim.arg_scope([slim.batch_norm, slim.dropout], 130 | is_training=is_training), \ 131 | slim.arg_scope([slim.conv2d, _conv, _conv_block, 132 | _dense_block, _transition_block], 133 | outputs_collections=end_points_collection), \ 134 | slim.arg_scope([_conv], dropout_rate=dropout_rate): 135 | net = inputs 136 | 137 | # initial convolution 138 | net = slim.conv2d(net, num_filters, 7, stride=2, scope='conv1') 139 | net = slim.batch_norm(net) 140 | net = tf.nn.relu(net) 141 | net = slim.max_pool2d(net, 3, stride=2, padding='SAME') 142 | 143 | # blocks 144 | for i in range(num_dense_blocks - 1): 145 | # dense blocks 146 | net, num_filters = _dense_block(net, num_layers[i], num_filters, 147 | growth_rate, 148 | scope='dense_block' + str(i+1)) 149 | 150 | # Add transition_block 151 | net, num_filters = _transition_block(net, num_filters, 152 | compression=compression, 153 | scope='transition_block' + str(i+1)) 154 | 155 | net, num_filters = _dense_block( 156 | net, num_layers[-1], num_filters, 157 | growth_rate, 158 | scope='dense_block' + str(num_dense_blocks)) 159 | net = slim.avg_pool2d(net, 2) 160 | 161 | return net 162 | 163 | # final blocks 164 | ''' with tf.variable_scope('final_block', [inputs]): 165 | net = slim.batch_norm(net) 166 | net = tf.nn.relu(net) 167 | net = _global_avg_pool2d(net, scope='global_avg_pool') 168 | 169 | net = slim.conv2d(net, num_classes, 1, 170 | biases_initializer=tf.zeros_initializer(), 171 | scope='logits') 172 | 173 | end_points = slim.utils.convert_collection_to_dict( 174 | end_points_collection) 175 | 176 | if num_classes is not None: 177 | end_points['predictions'] = slim.softmax(net, scope='predictions')''' 178 | 179 | #return net, end_points 180 | 181 | 182 | def densenet121(inputs, num_classes=1000, data_format='NHWC', is_training=True, reuse=None): 183 | return densenet(inputs, 184 | num_classes=num_classes, 185 | reduction=0.5, 186 | growth_rate=32, 187 | num_filters=64, 188 | num_layers=[6,12,6], 189 | data_format=data_format, 190 | is_training=is_training, 191 | reuse=reuse, 192 | scope='densenet121') 193 | densenet121.default_image_size = 224 194 | 195 | 196 | def densenet161(inputs, num_classes=1000, data_format='NHWC', is_training=True, reuse=None): 197 | return densenet(inputs, 198 | num_classes=num_classes, 199 | reduction=0.5, 200 | growth_rate=48, 201 | num_filters=96, 202 | num_layers=[6,12,36,24], 203 | data_format=data_format, 204 | is_training=is_training, 205 | reuse=reuse, 206 | scope='densenet161') 207 | densenet161.default_image_size = 224 208 | 209 | 210 | def densenet169(inputs, num_classes=1000, data_format='NHWC', is_training=True, reuse=None): 211 | return densenet(inputs, 212 | num_classes=num_classes, 213 | reduction=0.5, 214 | growth_rate=32, 215 | num_filters=64, 216 | num_layers=[6,12,32,32], 217 | data_format=data_format, 218 | is_training=is_training, 219 | reuse=reuse, 220 | scope='densenet169') 221 | densenet169.default_image_size = 224 222 | 223 | 224 | def densenet_arg_scope(weight_decay=1e-4, 225 | batch_norm_decay=0.99, 226 | batch_norm_epsilon=1.1e-5, 227 | data_format='NHWC'): 228 | with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.avg_pool2d, slim.max_pool2d, 229 | _conv_block, _global_avg_pool2d], 230 | data_format=data_format): 231 | with slim.arg_scope([slim.conv2d], 232 | weights_regularizer=slim.l2_regularizer(weight_decay), 233 | activation_fn=None, 234 | biases_initializer=None): 235 | with slim.arg_scope([slim.batch_norm], 236 | scale=True, 237 | decay=batch_norm_decay, 238 | epsilon=batch_norm_epsilon) as scope: 239 | return scope -------------------------------------------------------------------------------- /code/CNN_feature/resnet_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Contains building blocks for various versions of Residual Networks. 16 | Residual networks (ResNets) were proposed in: 17 | Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun 18 | Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015 19 | More variants were introduced in: 20 | Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun 21 | Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016 22 | We can obtain different ResNet variants by changing the network depth, width, 23 | and form of residual unit. This module implements the infrastructure for 24 | building them. Concrete ResNet units and full ResNet networks are implemented in 25 | the accompanying resnet_v1.py and resnet_v2.py modules. 26 | Compared to https://github.com/KaimingHe/deep-residual-networks, in the current 27 | implementation we subsample the output activations in the last residual unit of 28 | each block, instead of subsampling the input activations in the first residual 29 | unit of each block. The two implementations give identical results but our 30 | implementation is more memory efficient. 31 | """ 32 | from __future__ import absolute_import 33 | from __future__ import division 34 | from __future__ import print_function 35 | 36 | import collections 37 | import tensorflow as tf 38 | 39 | slim = tf.contrib.slim 40 | 41 | 42 | class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])): 43 | """A named tuple describing a ResNet block. 44 | Its parts are: 45 | scope: The scope of the `Block`. 46 | unit_fn: The ResNet unit function which takes as input a `Tensor` and 47 | returns another `Tensor` with the output of the ResNet unit. 48 | args: A list of length equal to the number of units in the `Block`. The list 49 | contains one (depth, depth_bottleneck, stride) tuple for each unit in the 50 | block to serve as argument to unit_fn. 51 | """ 52 | 53 | 54 | def subsample(inputs, factor, scope=None): 55 | """Subsamples the input along the spatial dimensions. 56 | Args: 57 | inputs: A `Tensor` of size [batch, height_in, width_in, channels]. 58 | factor: The subsampling factor. 59 | scope: Optional variable_scope. 60 | Returns: 61 | output: A `Tensor` of size [batch, height_out, width_out, channels] with the 62 | input, either intact (if factor == 1) or subsampled (if factor > 1). 63 | """ 64 | if factor == 1: 65 | return inputs 66 | else: 67 | return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) 68 | 69 | 70 | def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None): 71 | """Strided 2-D convolution with 'SAME' padding. 72 | When stride > 1, then we do explicit zero-padding, followed by conv2d with 73 | 'VALID' padding. 74 | Note that 75 | net = conv2d_same(inputs, num_outputs, 3, stride=stride) 76 | is equivalent to 77 | net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') 78 | net = subsample(net, factor=stride) 79 | whereas 80 | net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') 81 | is different when the input's height or width is even, which is why we add the 82 | current function. For more details, see ResnetUtilsTest.testConv2DSameEven(). 83 | Args: 84 | inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. 85 | num_outputs: An integer, the number of output filters. 86 | kernel_size: An int with the kernel_size of the filters. 87 | stride: An integer, the output stride. 88 | rate: An integer, rate for atrous convolution. 89 | scope: Scope. 90 | Returns: 91 | output: A 4-D tensor of size [batch, height_out, width_out, channels] with 92 | the convolution output. 93 | """ 94 | if stride == 1: 95 | return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate, 96 | padding='SAME', scope=scope) 97 | else: 98 | kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) 99 | pad_total = kernel_size_effective - 1 100 | pad_beg = pad_total // 2 101 | pad_end = pad_total - pad_beg 102 | inputs = tf.pad(inputs, 103 | [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) 104 | return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, 105 | rate=rate, padding='VALID', scope=scope) 106 | 107 | 108 | @slim.add_arg_scope 109 | def stack_blocks_dense(net, blocks, output_stride=None, 110 | store_non_strided_activations=False, 111 | outputs_collections=None): 112 | """Stacks ResNet `Blocks` and controls output feature density. 113 | First, this function creates scopes for the ResNet in the form of 114 | 'block_name/unit_1', 'block_name/unit_2', etc. 115 | Second, this function allows the user to explicitly control the ResNet 116 | output_stride, which is the ratio of the input to output spatial resolution. 117 | This is useful for dense prediction tasks such as semantic segmentation or 118 | object detection. 119 | Most ResNets consist of 4 ResNet blocks and subsample the activations by a 120 | factor of 2 when transitioning between consecutive ResNet blocks. This results 121 | to a nominal ResNet output_stride equal to 8. If we set the output_stride to 122 | half the nominal network stride (e.g., output_stride=4), then we compute 123 | responses twice. 124 | Control of the output feature density is implemented by atrous convolution. 125 | Args: 126 | net: A `Tensor` of size [batch, height, width, channels]. 127 | blocks: A list of length equal to the number of ResNet `Blocks`. Each 128 | element is a ResNet `Block` object describing the units in the `Block`. 129 | output_stride: If `None`, then the output will be computed at the nominal 130 | network stride. If output_stride is not `None`, it specifies the requested 131 | ratio of input to output spatial resolution, which needs to be equal to 132 | the product of unit strides from the start up to some level of the ResNet. 133 | For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1, 134 | then valid values for the output_stride are 1, 2, 6, 24 or None (which 135 | is equivalent to output_stride=24). 136 | store_non_strided_activations: If True, we compute non-strided (undecimated) 137 | activations at the last unit of each block and store them in the 138 | `outputs_collections` before subsampling them. This gives us access to 139 | higher resolution intermediate activations which are useful in some 140 | dense prediction problems but increases 4x the computation and memory cost 141 | at the last unit of each block. 142 | outputs_collections: Collection to add the ResNet block outputs. 143 | Returns: 144 | net: Output tensor with stride equal to the specified output_stride. 145 | Raises: 146 | ValueError: If the target output_stride is not valid. 147 | """ 148 | # The current_stride variable keeps track of the effective stride of the 149 | # activations. This allows us to invoke atrous convolution whenever applying 150 | # the next residual unit would result in the activations having stride larger 151 | # than the target output_stride. 152 | current_stride = 1 153 | 154 | # The atrous convolution rate parameter. 155 | rate = 1 156 | 157 | for block in blocks: 158 | with tf.variable_scope(block.scope, 'block', [net]) as sc: 159 | block_stride = 1 160 | for i, unit in enumerate(block.args): 161 | if store_non_strided_activations and i == len(block.args) - 1: 162 | # Move stride from the block's last unit to the end of the block. 163 | block_stride = unit.get('stride', 1) 164 | unit = dict(unit, stride=1) 165 | 166 | with tf.variable_scope('unit_%d' % (i + 1), values=[net]): 167 | # If we have reached the target output_stride, then we need to employ 168 | # atrous convolution with stride=1 and multiply the atrous rate by the 169 | # current unit's stride for use in subsequent layers. 170 | if output_stride is not None and current_stride == output_stride: 171 | net = block.unit_fn(net, rate=rate, **dict(unit, stride=1)) 172 | rate *= unit.get('stride', 1) 173 | 174 | else: 175 | net = block.unit_fn(net, rate=1, **unit) 176 | current_stride *= unit.get('stride', 1) 177 | if output_stride is not None and current_stride > output_stride: 178 | raise ValueError('The target output_stride cannot be reached.') 179 | 180 | # Collect activations at the block's end before performing subsampling. 181 | net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) 182 | 183 | # Subsampling of the block's output activations. 184 | if output_stride is not None and current_stride == output_stride: 185 | rate *= block_stride 186 | else: 187 | net = subsample(net, block_stride) 188 | current_stride *= block_stride 189 | if output_stride is not None and current_stride > output_stride: 190 | raise ValueError('The target output_stride cannot be reached.') 191 | 192 | if output_stride is not None and current_stride != output_stride: 193 | raise ValueError('The target output_stride cannot be reached.') 194 | 195 | return net 196 | 197 | 198 | def resnet_arg_scope(weight_decay=0.0001, 199 | batch_norm_decay=0.997, 200 | batch_norm_epsilon=1e-5, 201 | batch_norm_scale=True, 202 | activation_fn=tf.nn.relu, 203 | use_batch_norm=True, 204 | batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS): 205 | """Defines the default ResNet arg scope. 206 | TODO(gpapan): The batch-normalization related default values above are 207 | appropriate for use in conjunction with the reference ResNet models 208 | released at https://github.com/KaimingHe/deep-residual-networks. When 209 | training ResNets from scratch, they might need to be tuned. 210 | Args: 211 | weight_decay: The weight decay to use for regularizing the model. 212 | batch_norm_decay: The moving average decay when estimating layer activation 213 | statistics in batch normalization. 214 | batch_norm_epsilon: Small constant to prevent division by zero when 215 | normalizing activations by their variance in batch normalization. 216 | batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the 217 | activations in the batch normalization layer. 218 | activation_fn: The activation function which is used in ResNet. 219 | use_batch_norm: Whether or not to use batch normalization. 220 | batch_norm_updates_collections: Collection for the update ops for 221 | batch norm. 222 | Returns: 223 | An `arg_scope` to use for the resnet models. 224 | """ 225 | batch_norm_params = { 226 | 'decay': batch_norm_decay, 227 | 'epsilon': batch_norm_epsilon, 228 | 'scale': batch_norm_scale, 229 | 'updates_collections': batch_norm_updates_collections, 230 | 'fused': None, # Use fused batch norm if possible. 231 | } 232 | 233 | with slim.arg_scope( 234 | [slim.conv2d], 235 | weights_regularizer=slim.l2_regularizer(weight_decay), 236 | weights_initializer=slim.variance_scaling_initializer(), 237 | activation_fn=activation_fn, 238 | normalizer_fn=slim.batch_norm if use_batch_norm else None, 239 | normalizer_params=batch_norm_params): 240 | with slim.arg_scope([slim.batch_norm], **batch_norm_params): 241 | # The following implies padding='SAME' for pool1, which makes feature 242 | # alignment easier for dense prediction tasks. This is also used in 243 | # https://github.com/facebook/fb.resnet.torch. However the accompanying 244 | # code of 'Deep Residual Learning for Image Recognition' uses 245 | # padding='VALID' for pool1. You can switch to that choice by setting 246 | # slim.arg_scope([slim.max_pool2d], padding='VALID'). 247 | with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: 248 | return arg_sc -------------------------------------------------------------------------------- /csv/train1.csv: -------------------------------------------------------------------------------- 1 | img,label 2 | D:/study/iris/data\030\1\030_1_2.bmp,30 3 | D:/study/iris/data\066\1\066_1_3.bmp,66 4 | D:/study/iris/data\011\1\011_1_2.bmp,11 5 | D:/study/iris/data\002\1\002_1_3.bmp,2 6 | D:/study/iris/data\059\1\059_1_2.bmp,59 7 | D:/study/iris/data\079\1\079_1_1.bmp,79 8 | D:/study/iris/data\018\1\018_1_1.bmp,18 9 | D:/study/iris/data\055\1\055_1_2.bmp,55 10 | D:/study/iris/data\040\1\040_1_2.bmp,40 11 | D:/study/iris/data\075\1\075_1_1.bmp,75 12 | D:/study/iris/data\100\1\100_1_3.bmp,100 13 | D:/study/iris/data\083\1\083_1_1.bmp,83 14 | D:/study/iris/data\093\1\093_1_2.bmp,93 15 | D:/study/iris/data\062\1\062_1_3.bmp,62 16 | D:/study/iris/data\105\1\105_1_2.bmp,105 17 | D:/study/iris/data\093\1\093_1_1.bmp,93 18 | D:/study/iris/data\082\1\082_1_1.bmp,82 19 | D:/study/iris/data\084\1\084_1_3.bmp,84 20 | D:/study/iris/data\071\1\071_1_3.bmp,71 21 | D:/study/iris/data\059\1\059_1_3.bmp,59 22 | D:/study/iris/data\009\1\009_1_1.bmp,9 23 | D:/study/iris/data\076\1\076_1_1.bmp,76 24 | D:/study/iris/data\091\1\091_1_1.bmp,91 25 | D:/study/iris/data\107\1\107_1_1.bmp,107 26 | D:/study/iris/data\086\1\086_1_1.bmp,86 27 | D:/study/iris/data\053\1\053_1_1.bmp,53 28 | D:/study/iris/data\003\1\003_1_3.bmp,3 29 | D:/study/iris/data\020\1\020_1_3.bmp,20 30 | D:/study/iris/data\044\1\044_1_3.bmp,44 31 | D:/study/iris/data\102\1\102_1_2.bmp,102 32 | D:/study/iris/data\078\1\078_1_3.bmp,78 33 | D:/study/iris/data\017\1\017_1_3.bmp,17 34 | D:/study/iris/data\063\1\063_1_2.bmp,63 35 | D:/study/iris/data\052\1\052_1_3.bmp,52 36 | D:/study/iris/data\067\1\067_1_3.bmp,67 37 | D:/study/iris/data\043\1\043_1_3.bmp,43 38 | D:/study/iris/data\047\1\047_1_3.bmp,47 39 | D:/study/iris/data\098\1\098_1_1.bmp,98 40 | D:/study/iris/data\049\1\049_1_2.bmp,49 41 | D:/study/iris/data\013\1\013_1_3.bmp,13 42 | D:/study/iris/data\007\1\007_1_1.bmp,7 43 | D:/study/iris/data\066\1\066_1_1.bmp,66 44 | D:/study/iris/data\080\1\080_1_1.bmp,80 45 | D:/study/iris/data\064\1\064_1_2.bmp,64 46 | D:/study/iris/data\082\1\082_1_3.bmp,82 47 | D:/study/iris/data\074\1\074_1_1.bmp,74 48 | D:/study/iris/data\055\1\055_1_1.bmp,55 49 | D:/study/iris/data\001\1\001_1_2.bmp,1 50 | D:/study/iris/data\103\1\103_1_1.bmp,103 51 | D:/study/iris/data\099\1\099_1_2.bmp,99 52 | D:/study/iris/data\041\1\041_1_3.bmp,41 53 | D:/study/iris/data\093\1\093_1_3.bmp,93 54 | D:/study/iris/data\054\1\054_1_1.bmp,54 55 | D:/study/iris/data\098\1\098_1_2.bmp,98 56 | D:/study/iris/data\108\1\108_1_1.bmp,108 57 | D:/study/iris/data\092\1\092_1_1.bmp,92 58 | D:/study/iris/data\015\1\015_1_3.bmp,15 59 | D:/study/iris/data\071\1\071_1_2.bmp,71 60 | D:/study/iris/data\061\1\061_1_2.bmp,61 61 | D:/study/iris/data\101\1\101_1_1.bmp,101 62 | D:/study/iris/data\015\1\015_1_1.bmp,15 63 | D:/study/iris/data\101\1\101_1_2.bmp,101 64 | D:/study/iris/data\022\1\022_1_1.bmp,22 65 | D:/study/iris/data\107\1\107_1_2.bmp,107 66 | D:/study/iris/data\021\1\021_1_2.bmp,21 67 | D:/study/iris/data\038\1\038_1_3.bmp,38 68 | D:/study/iris/data\012\1\012_1_3.bmp,12 69 | D:/study/iris/data\040\1\040_1_1.bmp,40 70 | D:/study/iris/data\079\1\079_1_3.bmp,79 71 | D:/study/iris/data\104\1\104_1_1.bmp,104 72 | D:/study/iris/data\021\1\021_1_3.bmp,21 73 | D:/study/iris/data\069\1\069_1_2.bmp,69 74 | D:/study/iris/data\033\1\033_1_3.bmp,33 75 | D:/study/iris/data\015\1\015_1_2.bmp,15 76 | D:/study/iris/data\085\1\085_1_1.bmp,85 77 | D:/study/iris/data\051\1\051_1_2.bmp,51 78 | D:/study/iris/data\057\1\057_1_3.bmp,57 79 | D:/study/iris/data\009\1\009_1_3.bmp,9 80 | D:/study/iris/data\033\1\033_1_2.bmp,33 81 | D:/study/iris/data\003\1\003_1_1.bmp,3 82 | D:/study/iris/data\088\1\088_1_1.bmp,88 83 | D:/study/iris/data\082\1\082_1_2.bmp,82 84 | D:/study/iris/data\062\1\062_1_1.bmp,62 85 | D:/study/iris/data\022\1\022_1_3.bmp,22 86 | D:/study/iris/data\003\1\003_1_2.bmp,3 87 | D:/study/iris/data\033\1\033_1_1.bmp,33 88 | D:/study/iris/data\085\1\085_1_2.bmp,85 89 | D:/study/iris/data\061\1\061_1_1.bmp,61 90 | D:/study/iris/data\022\1\022_1_2.bmp,22 91 | D:/study/iris/data\025\1\025_1_3.bmp,25 92 | D:/study/iris/data\051\1\051_1_3.bmp,51 93 | D:/study/iris/data\060\1\060_1_2.bmp,60 94 | D:/study/iris/data\070\1\070_1_1.bmp,70 95 | D:/study/iris/data\095\1\095_1_3.bmp,95 96 | D:/study/iris/data\029\1\029_1_1.bmp,29 97 | D:/study/iris/data\026\1\026_1_1.bmp,26 98 | D:/study/iris/data\004\1\004_1_3.bmp,4 99 | D:/study/iris/data\054\1\054_1_3.bmp,54 100 | D:/study/iris/data\050\1\050_1_2.bmp,50 101 | D:/study/iris/data\065\1\065_1_3.bmp,65 102 | D:/study/iris/data\075\1\075_1_2.bmp,75 103 | D:/study/iris/data\102\1\102_1_3.bmp,102 104 | D:/study/iris/data\081\1\081_1_2.bmp,81 105 | D:/study/iris/data\085\1\085_1_3.bmp,85 106 | D:/study/iris/data\044\1\044_1_1.bmp,44 107 | D:/study/iris/data\049\1\049_1_1.bmp,49 108 | D:/study/iris/data\057\1\057_1_2.bmp,57 109 | D:/study/iris/data\077\1\077_1_2.bmp,77 110 | D:/study/iris/data\019\1\019_1_3.bmp,19 111 | D:/study/iris/data\103\1\103_1_2.bmp,103 112 | D:/study/iris/data\056\1\056_1_1.bmp,56 113 | D:/study/iris/data\016\1\016_1_3.bmp,16 114 | D:/study/iris/data\041\1\041_1_2.bmp,41 115 | D:/study/iris/data\064\1\064_1_1.bmp,64 116 | D:/study/iris/data\095\1\095_1_1.bmp,95 117 | D:/study/iris/data\103\1\103_1_3.bmp,103 118 | D:/study/iris/data\086\1\086_1_2.bmp,86 119 | D:/study/iris/data\079\1\079_1_2.bmp,79 120 | D:/study/iris/data\006\1\006_1_1.bmp,6 121 | D:/study/iris/data\039\1\039_1_3.bmp,39 122 | D:/study/iris/data\031\1\031_1_2.bmp,31 123 | D:/study/iris/data\007\1\007_1_2.bmp,7 124 | D:/study/iris/data\029\1\029_1_2.bmp,29 125 | D:/study/iris/data\011\1\011_1_1.bmp,11 126 | D:/study/iris/data\026\1\026_1_2.bmp,26 127 | D:/study/iris/data\105\1\105_1_3.bmp,105 128 | D:/study/iris/data\106\1\106_1_3.bmp,106 129 | D:/study/iris/data\097\1\097_1_2.bmp,97 130 | D:/study/iris/data\096\1\096_1_2.bmp,96 131 | D:/study/iris/data\072\1\072_1_3.bmp,72 132 | D:/study/iris/data\029\1\029_1_3.bmp,29 133 | D:/study/iris/data\038\1\038_1_1.bmp,38 134 | D:/study/iris/data\002\1\002_1_1.bmp,2 135 | D:/study/iris/data\030\1\030_1_1.bmp,30 136 | D:/study/iris/data\096\1\096_1_1.bmp,96 137 | D:/study/iris/data\045\1\045_1_1.bmp,45 138 | D:/study/iris/data\058\1\058_1_2.bmp,58 139 | D:/study/iris/data\068\1\068_1_2.bmp,68 140 | D:/study/iris/data\100\1\100_1_2.bmp,100 141 | D:/study/iris/data\017\1\017_1_2.bmp,17 142 | D:/study/iris/data\035\1\035_1_1.bmp,35 143 | D:/study/iris/data\081\1\081_1_1.bmp,81 144 | D:/study/iris/data\064\1\064_1_3.bmp,64 145 | D:/study/iris/data\037\1\037_1_1.bmp,37 146 | D:/study/iris/data\008\1\008_1_2.bmp,8 147 | D:/study/iris/data\087\1\087_1_3.bmp,87 148 | D:/study/iris/data\089\1\089_1_2.bmp,89 149 | D:/study/iris/data\089\1\089_1_3.bmp,89 150 | D:/study/iris/data\076\1\076_1_2.bmp,76 151 | D:/study/iris/data\062\1\062_1_2.bmp,62 152 | D:/study/iris/data\042\1\042_1_1.bmp,42 153 | D:/study/iris/data\073\1\073_1_2.bmp,73 154 | D:/study/iris/data\049\1\049_1_3.bmp,49 155 | D:/study/iris/data\050\1\050_1_1.bmp,50 156 | D:/study/iris/data\057\1\057_1_1.bmp,57 157 | D:/study/iris/data\083\1\083_1_3.bmp,83 158 | D:/study/iris/data\023\1\023_1_1.bmp,23 159 | D:/study/iris/data\053\1\053_1_2.bmp,53 160 | D:/study/iris/data\027\1\027_1_2.bmp,27 161 | D:/study/iris/data\069\1\069_1_1.bmp,69 162 | D:/study/iris/data\058\1\058_1_1.bmp,58 163 | D:/study/iris/data\021\1\021_1_1.bmp,21 164 | D:/study/iris/data\035\1\035_1_3.bmp,35 165 | D:/study/iris/data\001\1\001_1_1.bmp,1 166 | D:/study/iris/data\008\1\008_1_1.bmp,8 167 | D:/study/iris/data\106\1\106_1_1.bmp,106 168 | D:/study/iris/data\098\1\098_1_3.bmp,98 169 | D:/study/iris/data\099\1\099_1_1.bmp,99 170 | D:/study/iris/data\072\1\072_1_2.bmp,72 171 | D:/study/iris/data\056\1\056_1_3.bmp,56 172 | D:/study/iris/data\024\1\024_1_1.bmp,24 173 | D:/study/iris/data\100\1\100_1_1.bmp,100 174 | D:/study/iris/data\014\1\014_1_1.bmp,14 175 | D:/study/iris/data\087\1\087_1_2.bmp,87 176 | D:/study/iris/data\048\1\048_1_1.bmp,48 177 | D:/study/iris/data\047\1\047_1_1.bmp,47 178 | D:/study/iris/data\026\1\026_1_3.bmp,26 179 | D:/study/iris/data\066\1\066_1_2.bmp,66 180 | D:/study/iris/data\055\1\055_1_3.bmp,55 181 | D:/study/iris/data\060\1\060_1_3.bmp,60 182 | D:/study/iris/data\035\1\035_1_2.bmp,35 183 | D:/study/iris/data\052\1\052_1_1.bmp,52 184 | D:/study/iris/data\094\1\094_1_2.bmp,94 185 | D:/study/iris/data\014\1\014_1_2.bmp,14 186 | D:/study/iris/data\065\1\065_1_1.bmp,65 187 | D:/study/iris/data\086\1\086_1_3.bmp,86 188 | D:/study/iris/data\020\1\020_1_2.bmp,20 189 | D:/study/iris/data\044\1\044_1_2.bmp,44 190 | D:/study/iris/data\043\1\043_1_2.bmp,43 191 | D:/study/iris/data\051\1\051_1_1.bmp,51 192 | D:/study/iris/data\039\1\039_1_2.bmp,39 193 | D:/study/iris/data\012\1\012_1_1.bmp,12 194 | D:/study/iris/data\083\1\083_1_2.bmp,83 195 | D:/study/iris/data\008\1\008_1_3.bmp,8 196 | D:/study/iris/data\105\1\105_1_1.bmp,105 197 | D:/study/iris/data\080\1\080_1_2.bmp,80 198 | D:/study/iris/data\017\1\017_1_1.bmp,17 199 | D:/study/iris/data\053\1\053_1_3.bmp,53 200 | D:/study/iris/data\048\1\048_1_3.bmp,48 201 | D:/study/iris/data\034\1\034_1_2.bmp,34 202 | D:/study/iris/data\073\1\073_1_1.bmp,73 203 | D:/study/iris/data\106\1\106_1_2.bmp,106 204 | D:/study/iris/data\068\1\068_1_3.bmp,68 205 | D:/study/iris/data\074\1\074_1_2.bmp,74 206 | D:/study/iris/data\010\1\010_1_3.bmp,10 207 | D:/study/iris/data\084\1\084_1_2.bmp,84 208 | D:/study/iris/data\028\1\028_1_2.bmp,28 209 | D:/study/iris/data\065\1\065_1_2.bmp,65 210 | D:/study/iris/data\024\1\024_1_2.bmp,24 211 | D:/study/iris/data\032\1\032_1_3.bmp,32 212 | D:/study/iris/data\090\1\090_1_2.bmp,90 213 | D:/study/iris/data\030\1\030_1_3.bmp,30 214 | D:/study/iris/data\013\1\013_1_1.bmp,13 215 | D:/study/iris/data\052\1\052_1_2.bmp,52 216 | D:/study/iris/data\073\1\073_1_3.bmp,73 217 | D:/study/iris/data\088\1\088_1_2.bmp,88 218 | D:/study/iris/data\036\1\036_1_2.bmp,36 219 | D:/study/iris/data\025\1\025_1_1.bmp,25 220 | D:/study/iris/data\046\1\046_1_2.bmp,46 221 | D:/study/iris/data\054\1\054_1_2.bmp,54 222 | D:/study/iris/data\104\1\104_1_3.bmp,104 223 | D:/study/iris/data\092\1\092_1_3.bmp,92 224 | D:/study/iris/data\010\1\010_1_1.bmp,10 225 | D:/study/iris/data\094\1\094_1_1.bmp,94 226 | D:/study/iris/data\081\1\081_1_3.bmp,81 227 | D:/study/iris/data\007\1\007_1_3.bmp,7 228 | D:/study/iris/data\016\1\016_1_1.bmp,16 229 | D:/study/iris/data\072\1\072_1_1.bmp,72 230 | D:/study/iris/data\050\1\050_1_3.bmp,50 231 | D:/study/iris/data\056\1\056_1_2.bmp,56 232 | D:/study/iris/data\107\1\107_1_3.bmp,107 233 | D:/study/iris/data\067\1\067_1_1.bmp,67 234 | D:/study/iris/data\006\1\006_1_2.bmp,6 235 | D:/study/iris/data\006\1\006_1_3.bmp,6 236 | D:/study/iris/data\045\1\045_1_2.bmp,45 237 | D:/study/iris/data\010\1\010_1_2.bmp,10 238 | D:/study/iris/data\078\1\078_1_1.bmp,78 239 | D:/study/iris/data\011\1\011_1_3.bmp,11 240 | D:/study/iris/data\012\1\012_1_2.bmp,12 241 | D:/study/iris/data\037\1\037_1_3.bmp,37 242 | D:/study/iris/data\043\1\043_1_1.bmp,43 243 | D:/study/iris/data\097\1\097_1_3.bmp,97 244 | D:/study/iris/data\004\1\004_1_1.bmp,4 245 | D:/study/iris/data\046\1\046_1_3.bmp,46 246 | D:/study/iris/data\031\1\031_1_3.bmp,31 247 | D:/study/iris/data\025\1\025_1_2.bmp,25 248 | D:/study/iris/data\092\1\092_1_2.bmp,92 249 | D:/study/iris/data\089\1\089_1_1.bmp,89 250 | D:/study/iris/data\097\1\097_1_1.bmp,97 251 | D:/study/iris/data\005\1\005_1_1.bmp,5 252 | D:/study/iris/data\094\1\094_1_3.bmp,94 253 | D:/study/iris/data\018\1\018_1_3.bmp,18 254 | D:/study/iris/data\023\1\023_1_3.bmp,23 255 | D:/study/iris/data\034\1\034_1_3.bmp,34 256 | D:/study/iris/data\067\1\067_1_2.bmp,67 257 | D:/study/iris/data\108\1\108_1_2.bmp,108 258 | D:/study/iris/data\048\1\048_1_2.bmp,48 259 | D:/study/iris/data\013\1\013_1_2.bmp,13 260 | D:/study/iris/data\045\1\045_1_3.bmp,45 261 | D:/study/iris/data\096\1\096_1_3.bmp,96 262 | D:/study/iris/data\032\1\032_1_1.bmp,32 263 | D:/study/iris/data\095\1\095_1_2.bmp,95 264 | D:/study/iris/data\063\1\063_1_1.bmp,63 265 | D:/study/iris/data\077\1\077_1_1.bmp,77 266 | D:/study/iris/data\040\1\040_1_3.bmp,40 267 | D:/study/iris/data\068\1\068_1_1.bmp,68 268 | D:/study/iris/data\088\1\088_1_3.bmp,88 269 | D:/study/iris/data\108\1\108_1_3.bmp,108 270 | D:/study/iris/data\101\1\101_1_3.bmp,101 271 | D:/study/iris/data\070\1\070_1_2.bmp,70 272 | D:/study/iris/data\080\1\080_1_3.bmp,80 273 | D:/study/iris/data\090\1\090_1_1.bmp,90 274 | D:/study/iris/data\005\1\005_1_2.bmp,5 275 | D:/study/iris/data\002\1\002_1_2.bmp,2 276 | D:/study/iris/data\090\1\090_1_3.bmp,90 277 | D:/study/iris/data\014\1\014_1_3.bmp,14 278 | D:/study/iris/data\099\1\099_1_3.bmp,99 279 | D:/study/iris/data\027\1\027_1_3.bmp,27 280 | D:/study/iris/data\018\1\018_1_2.bmp,18 281 | D:/study/iris/data\046\1\046_1_1.bmp,46 282 | D:/study/iris/data\036\1\036_1_3.bmp,36 283 | D:/study/iris/data\019\1\019_1_1.bmp,19 284 | D:/study/iris/data\028\1\028_1_3.bmp,28 285 | D:/study/iris/data\102\1\102_1_1.bmp,102 286 | D:/study/iris/data\069\1\069_1_3.bmp,69 287 | D:/study/iris/data\076\1\076_1_3.bmp,76 288 | D:/study/iris/data\024\1\024_1_3.bmp,24 289 | D:/study/iris/data\061\1\061_1_3.bmp,61 290 | D:/study/iris/data\058\1\058_1_3.bmp,58 291 | D:/study/iris/data\019\1\019_1_2.bmp,19 292 | D:/study/iris/data\023\1\023_1_2.bmp,23 293 | D:/study/iris/data\059\1\059_1_1.bmp,59 294 | D:/study/iris/data\037\1\037_1_2.bmp,37 295 | D:/study/iris/data\042\1\042_1_3.bmp,42 296 | D:/study/iris/data\004\1\004_1_2.bmp,4 297 | D:/study/iris/data\078\1\078_1_2.bmp,78 298 | D:/study/iris/data\016\1\016_1_2.bmp,16 299 | D:/study/iris/data\041\1\041_1_1.bmp,41 300 | D:/study/iris/data\042\1\042_1_2.bmp,42 301 | D:/study/iris/data\034\1\034_1_1.bmp,34 302 | D:/study/iris/data\001\1\001_1_3.bmp,1 303 | D:/study/iris/data\060\1\060_1_1.bmp,60 304 | D:/study/iris/data\039\1\039_1_1.bmp,39 305 | D:/study/iris/data\063\1\063_1_3.bmp,63 306 | D:/study/iris/data\091\1\091_1_2.bmp,91 307 | D:/study/iris/data\047\1\047_1_2.bmp,47 308 | D:/study/iris/data\084\1\084_1_1.bmp,84 309 | D:/study/iris/data\070\1\070_1_3.bmp,70 310 | D:/study/iris/data\020\1\020_1_1.bmp,20 311 | D:/study/iris/data\071\1\071_1_1.bmp,71 312 | D:/study/iris/data\031\1\031_1_1.bmp,31 313 | D:/study/iris/data\027\1\027_1_1.bmp,27 314 | D:/study/iris/data\087\1\087_1_1.bmp,87 315 | D:/study/iris/data\075\1\075_1_3.bmp,75 316 | D:/study/iris/data\032\1\032_1_2.bmp,32 317 | D:/study/iris/data\009\1\009_1_2.bmp,9 318 | D:/study/iris/data\028\1\028_1_1.bmp,28 319 | D:/study/iris/data\077\1\077_1_3.bmp,77 320 | D:/study/iris/data\036\1\036_1_1.bmp,36 321 | D:/study/iris/data\074\1\074_1_3.bmp,74 322 | D:/study/iris/data\091\1\091_1_3.bmp,91 323 | D:/study/iris/data\104\1\104_1_2.bmp,104 324 | D:/study/iris/data\038\1\038_1_2.bmp,38 325 | D:/study/iris/data\005\1\005_1_3.bmp,5 326 | -------------------------------------------------------------------------------- /code/CNN_feature/inceptionv4.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Contains the definition of the Inception V4 architecture. 16 | As described in http://arxiv.org/abs/1602.07261. 17 | Inception-v4, Inception-ResNet and the Impact of Residual Connections 18 | on Learning 19 | Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi 20 | """ 21 | from __future__ import absolute_import 22 | from __future__ import division 23 | from __future__ import print_function 24 | 25 | import tensorflow as tf 26 | 27 | import inception_utils 28 | 29 | slim = tf.contrib.slim 30 | 31 | 32 | def block_inception_a(inputs, scope=None, reuse=None): 33 | """Builds Inception-A block for Inception v4 network.""" 34 | # By default use stride=1 and SAME padding 35 | with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], 36 | stride=1, padding='SAME'): 37 | with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse): 38 | with tf.variable_scope('Branch_0'): 39 | branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1') 40 | with tf.variable_scope('Branch_1'): 41 | branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') 42 | branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3') 43 | with tf.variable_scope('Branch_2'): 44 | branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') 45 | branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3') 46 | branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3') 47 | with tf.variable_scope('Branch_3'): 48 | branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') 49 | branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1') 50 | return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) 51 | 52 | 53 | def block_reduction_a(inputs, scope=None, reuse=None): 54 | """Builds Reduction-A block for Inception v4 network.""" 55 | # By default use stride=1 and SAME padding 56 | with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], 57 | stride=1, padding='SAME'): 58 | with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse): 59 | with tf.variable_scope('Branch_0'): 60 | branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID', 61 | scope='Conv2d_1a_3x3') 62 | with tf.variable_scope('Branch_1'): 63 | branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') 64 | branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3') 65 | branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2, 66 | padding='VALID', scope='Conv2d_1a_3x3') 67 | with tf.variable_scope('Branch_2'): 68 | branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', 69 | scope='MaxPool_1a_3x3') 70 | return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) 71 | 72 | 73 | def block_inception_b(inputs, scope=None, reuse=None): 74 | """Builds Inception-B block for Inception v4 network.""" 75 | # By default use stride=1 and SAME padding 76 | with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], 77 | stride=1, padding='SAME'): 78 | with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse): 79 | with tf.variable_scope('Branch_0'): 80 | branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') 81 | with tf.variable_scope('Branch_1'): 82 | branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') 83 | branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7') 84 | branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1') 85 | with tf.variable_scope('Branch_2'): 86 | branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') 87 | branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1') 88 | branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7') 89 | branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1') 90 | branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7') 91 | with tf.variable_scope('Branch_3'): 92 | branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') 93 | branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') 94 | return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) 95 | 96 | 97 | def block_reduction_b(inputs, scope=None, reuse=None): 98 | """Builds Reduction-B block for Inception v4 network.""" 99 | # By default use stride=1 and SAME padding 100 | with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], 101 | stride=1, padding='SAME'): 102 | with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse): 103 | with tf.variable_scope('Branch_0'): 104 | branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') 105 | branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2, 106 | padding='VALID', scope='Conv2d_1a_3x3') 107 | with tf.variable_scope('Branch_1'): 108 | branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') 109 | branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7') 110 | branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1') 111 | branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2, 112 | padding='VALID', scope='Conv2d_1a_3x3') 113 | with tf.variable_scope('Branch_2'): 114 | branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', 115 | scope='MaxPool_1a_3x3') 116 | return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) 117 | 118 | 119 | def block_inception_c(inputs, scope=None, reuse=None): 120 | """Builds Inception-C block for Inception v4 network.""" 121 | # By default use stride=1 and SAME padding 122 | with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], 123 | stride=1, padding='SAME'): 124 | with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse): 125 | with tf.variable_scope('Branch_0'): 126 | branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') 127 | with tf.variable_scope('Branch_1'): 128 | branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') 129 | branch_1 = tf.concat(axis=3, values=[ 130 | slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), 131 | slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')]) 132 | with tf.variable_scope('Branch_2'): 133 | branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') 134 | branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1') 135 | branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3') 136 | branch_2 = tf.concat(axis=3, values=[ 137 | slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), 138 | slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')]) 139 | with tf.variable_scope('Branch_3'): 140 | branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') 141 | branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1') 142 | return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) 143 | 144 | 145 | def inception_v4_base(inputs, final_endpoint='Mixed_6b', scope=None): 146 | """Creates the Inception V4 network up to the given final endpoint. 147 | Args: 148 | inputs: a 4-D tensor of size [batch_size, height, width, 3]. 149 | final_endpoint: specifies the endpoint to construct the network up to. 150 | It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 151 | 'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 152 | 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 153 | 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c', 154 | 'Mixed_7d'] 155 | scope: Optional variable_scope. 156 | Returns: 157 | logits: the logits outputs of the model. 158 | end_points: the set of end_points from the inception model. 159 | Raises: 160 | ValueError: if final_endpoint is not set to one of the predefined values, 161 | """ 162 | end_points = {} 163 | 164 | def add_and_check_final(name, net): 165 | end_points[name] = net 166 | return name == final_endpoint 167 | 168 | with tf.variable_scope(scope, 'InceptionV4', [inputs]): 169 | with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], 170 | stride=1, padding='SAME'): 171 | # 299 x 299 x 3 172 | net = slim.conv2d(inputs, 32, [3, 3], stride=2, 173 | padding='VALID', scope='Conv2d_1a_3x3') 174 | if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points 175 | # 149 x 149 x 32 176 | net = slim.conv2d(net, 32, [3, 3], padding='VALID', 177 | scope='Conv2d_2a_3x3') 178 | if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points 179 | # 147 x 147 x 32 180 | net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3') 181 | if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points 182 | # 147 x 147 x 64 183 | with tf.variable_scope('Mixed_3a'): 184 | with tf.variable_scope('Branch_0'): 185 | branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', 186 | scope='MaxPool_0a_3x3') 187 | with tf.variable_scope('Branch_1'): 188 | branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID', 189 | scope='Conv2d_0a_3x3') 190 | net = tf.concat(axis=3, values=[branch_0, branch_1]) 191 | print(net) 192 | if add_and_check_final('Mixed_3a', net): return net, end_points 193 | 194 | # 73 x 73 x 160 195 | with tf.variable_scope('Mixed_4a'): 196 | with tf.variable_scope('Branch_0'): 197 | branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') 198 | branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID', 199 | scope='Conv2d_1a_3x3') 200 | with tf.variable_scope('Branch_1'): 201 | branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') 202 | branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7') 203 | branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1') 204 | branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID', 205 | scope='Conv2d_1a_3x3') 206 | net = tf.concat(axis=3, values=[branch_0, branch_1]) 207 | if add_and_check_final('Mixed_4a', net): return net, end_points 208 | 209 | # 71 x 71 x 192 210 | with tf.variable_scope('Mixed_5a'): 211 | with tf.variable_scope('Branch_0'): 212 | branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID', 213 | scope='Conv2d_1a_3x3') 214 | with tf.variable_scope('Branch_1'): 215 | branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', 216 | scope='MaxPool_1a_3x3') 217 | net = tf.concat(axis=3, values=[branch_0, branch_1]) 218 | if add_and_check_final('Mixed_5a', net): return net, end_points 219 | 220 | # 35 x 35 x 384 221 | # 4 x Inception-A blocks 222 | for idx in range(4): 223 | block_scope = 'Mixed_5' + chr(ord('b') + idx) 224 | net = block_inception_a(net, block_scope) 225 | if add_and_check_final(block_scope, net): return net, end_points 226 | 227 | # 35 x 35 x 384 228 | # Reduction-A block 229 | net = block_reduction_a(net, 'Mixed_6a') 230 | if add_and_check_final('Mixed_6a', net): return net, end_points 231 | 232 | # 17 x 17 x 1024 233 | # 7 x Inception-B blocks 234 | for idx in range(7): 235 | block_scope = 'Mixed_6' + chr(ord('b') + idx) 236 | net = block_inception_b(net, block_scope) 237 | if add_and_check_final(block_scope, net): return net, end_points 238 | 239 | # 17 x 17 x 1024 240 | # Reduction-B block 241 | net = block_reduction_b(net, 'Mixed_7a') 242 | if add_and_check_final('Mixed_7a', net): return net, end_points 243 | 244 | # 8 x 8 x 1536 245 | # 3 x Inception-C blocks 246 | for idx in range(3): 247 | block_scope = 'Mixed_7' + chr(ord('b') + idx) 248 | net = block_inception_c(net, block_scope) 249 | if add_and_check_final(block_scope, net): return net, end_points 250 | raise ValueError('Unknown final endpoint %s' % final_endpoint) 251 | 252 | 253 | def inception_v4(inputs, num_classes=1001, is_training=True, 254 | dropout_keep_prob=0.8, 255 | reuse=None, 256 | scope='InceptionV4', 257 | create_aux_logits=True): 258 | """Creates the Inception V4 model. 259 | Args: 260 | inputs: a 4-D tensor of size [batch_size, height, width, 3]. 261 | num_classes: number of predicted classes. If 0 or None, the logits layer 262 | is omitted and the input features to the logits layer (before dropout) 263 | are returned instead. 264 | is_training: whether is training or not. 265 | dropout_keep_prob: float, the fraction to keep before final layer. 266 | reuse: whether or not the network and its variables should be reused. To be 267 | able to reuse 'scope' must be given. 268 | scope: Optional variable_scope. 269 | create_aux_logits: Whether to include the auxiliary logits. 270 | Returns: 271 | net: a Tensor with the logits (pre-softmax activations) if num_classes 272 | is a non-zero integer, or the non-dropped input to the logits layer 273 | if num_classes is 0 or None. 274 | end_points: the set of end_points from the inception model. 275 | """ 276 | end_points = {} 277 | with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope: 278 | with slim.arg_scope([slim.batch_norm, slim.dropout], 279 | is_training=is_training): 280 | net, end_points = inception_v4_base(inputs, scope=scope) 281 | 282 | with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], 283 | stride=1, padding='SAME'): 284 | # Auxiliary Head logits 285 | if create_aux_logits and num_classes: 286 | with tf.variable_scope('AuxLogits'): 287 | # 17 x 17 x 1024 288 | aux_logits = end_points['Mixed_6h'] 289 | aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, 290 | padding='VALID', 291 | scope='AvgPool_1a_5x5') 292 | aux_logits = slim.conv2d(aux_logits, 128, [1, 1], 293 | scope='Conv2d_1b_1x1') 294 | aux_logits = slim.conv2d(aux_logits, 768, 295 | aux_logits.get_shape()[1:3], 296 | padding='VALID', scope='Conv2d_2a') 297 | aux_logits = slim.flatten(aux_logits) 298 | aux_logits = slim.fully_connected(aux_logits, num_classes, 299 | activation_fn=None, 300 | scope='Aux_logits') 301 | end_points['AuxLogits'] = aux_logits 302 | 303 | # Final pooling and prediction 304 | # TODO(sguada,arnoegw): Consider adding a parameter global_pool which 305 | # can be set to False to disable pooling here (as in resnet_*()). 306 | with tf.variable_scope('Logits'): 307 | # 8 x 8 x 1536 308 | kernel_size = net.get_shape()[1:3] 309 | if kernel_size.is_fully_defined(): 310 | net = slim.avg_pool2d(net, kernel_size, padding='VALID', 311 | scope='AvgPool_1a') 312 | else: 313 | net = tf.reduce_mean(net, [1, 2], keep_dims=True, 314 | name='global_pool') 315 | end_points['global_pool'] = net 316 | if not num_classes: 317 | return net, end_points 318 | # 1 x 1 x 1536 319 | net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b') 320 | net = slim.flatten(net, scope='PreLogitsFlatten') 321 | end_points['PreLogitsFlatten'] = net 322 | # 1536 323 | logits = slim.fully_connected(net, num_classes, activation_fn=None, 324 | scope='Logits') 325 | end_points['Logits'] = logits 326 | end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions') 327 | return logits, end_points 328 | inception_v4.default_image_size = 299 329 | 330 | 331 | inception_v4_arg_scope = inception_utils.inception_arg_scope -------------------------------------------------------------------------------- /csv/test1.csv: -------------------------------------------------------------------------------- 1 | img,label 2 | D:/study/iris/data\055\2\055_2_2.bmp,55 3 | D:/study/iris/data\006\2\006_2_1.bmp,6 4 | D:/study/iris/data\036\2\036_2_2.bmp,36 5 | D:/study/iris/data\078\2\078_2_1.bmp,78 6 | D:/study/iris/data\014\2\014_2_4.bmp,14 7 | D:/study/iris/data\047\2\047_2_2.bmp,47 8 | D:/study/iris/data\049\2\049_2_3.bmp,49 9 | D:/study/iris/data\033\2\033_2_4.bmp,33 10 | D:/study/iris/data\080\2\080_2_2.bmp,80 11 | D:/study/iris/data\040\2\040_2_1.bmp,40 12 | D:/study/iris/data\063\2\063_2_1.bmp,63 13 | D:/study/iris/data\068\2\068_2_4.bmp,68 14 | D:/study/iris/data\046\2\046_2_4.bmp,46 15 | D:/study/iris/data\005\2\005_2_4.bmp,5 16 | D:/study/iris/data\089\2\089_2_3.bmp,89 17 | D:/study/iris/data\014\2\014_2_2.bmp,14 18 | D:/study/iris/data\057\2\057_2_3.bmp,57 19 | D:/study/iris/data\101\2\101_2_3.bmp,101 20 | D:/study/iris/data\093\2\093_2_4.bmp,93 21 | D:/study/iris/data\048\2\048_2_2.bmp,48 22 | D:/study/iris/data\029\2\029_2_1.bmp,29 23 | D:/study/iris/data\101\2\101_2_4.bmp,101 24 | D:/study/iris/data\034\2\034_2_1.bmp,34 25 | D:/study/iris/data\027\2\027_2_3.bmp,27 26 | D:/study/iris/data\043\2\043_2_3.bmp,43 27 | D:/study/iris/data\089\2\089_2_2.bmp,89 28 | D:/study/iris/data\017\2\017_2_2.bmp,17 29 | D:/study/iris/data\017\2\017_2_1.bmp,17 30 | D:/study/iris/data\021\2\021_2_4.bmp,21 31 | D:/study/iris/data\033\2\033_2_1.bmp,33 32 | D:/study/iris/data\056\2\056_2_2.bmp,56 33 | D:/study/iris/data\044\2\044_2_3.bmp,44 34 | D:/study/iris/data\001\2\001_2_2.bmp,1 35 | D:/study/iris/data\037\2\037_2_4.bmp,37 36 | D:/study/iris/data\057\2\057_2_1.bmp,57 37 | D:/study/iris/data\057\2\057_2_4.bmp,57 38 | D:/study/iris/data\054\2\054_2_3.bmp,54 39 | D:/study/iris/data\035\2\035_2_3.bmp,35 40 | D:/study/iris/data\061\2\061_2_1.bmp,61 41 | D:/study/iris/data\082\2\082_2_2.bmp,82 42 | D:/study/iris/data\006\2\006_2_2.bmp,6 43 | D:/study/iris/data\082\2\082_2_1.bmp,82 44 | D:/study/iris/data\001\2\001_2_4.bmp,1 45 | D:/study/iris/data\044\2\044_2_2.bmp,44 46 | D:/study/iris/data\026\2\026_2_2.bmp,26 47 | D:/study/iris/data\011\2\011_2_3.bmp,11 48 | D:/study/iris/data\043\2\043_2_2.bmp,43 49 | D:/study/iris/data\093\2\093_2_3.bmp,93 50 | D:/study/iris/data\077\2\077_2_3.bmp,77 51 | D:/study/iris/data\002\2\002_2_4.bmp,2 52 | D:/study/iris/data\108\2\108_2_4.bmp,108 53 | D:/study/iris/data\071\2\071_2_1.bmp,71 54 | D:/study/iris/data\061\2\061_2_3.bmp,61 55 | D:/study/iris/data\072\2\072_2_3.bmp,72 56 | D:/study/iris/data\013\2\013_2_1.bmp,13 57 | D:/study/iris/data\084\2\084_2_3.bmp,84 58 | D:/study/iris/data\084\2\084_2_1.bmp,84 59 | D:/study/iris/data\076\2\076_2_4.bmp,76 60 | D:/study/iris/data\020\2\020_2_3.bmp,20 61 | D:/study/iris/data\009\2\009_2_4.bmp,9 62 | D:/study/iris/data\047\2\047_2_3.bmp,47 63 | D:/study/iris/data\027\2\027_2_4.bmp,27 64 | D:/study/iris/data\002\2\002_2_2.bmp,2 65 | D:/study/iris/data\076\2\076_2_3.bmp,76 66 | D:/study/iris/data\099\2\099_2_1.bmp,99 67 | D:/study/iris/data\036\2\036_2_4.bmp,36 68 | D:/study/iris/data\062\2\062_2_3.bmp,62 69 | D:/study/iris/data\105\2\105_2_3.bmp,105 70 | D:/study/iris/data\022\2\022_2_1.bmp,22 71 | D:/study/iris/data\053\2\053_2_3.bmp,53 72 | D:/study/iris/data\083\2\083_2_1.bmp,83 73 | D:/study/iris/data\064\2\064_2_3.bmp,64 74 | D:/study/iris/data\056\2\056_2_1.bmp,56 75 | D:/study/iris/data\054\2\054_2_4.bmp,54 76 | D:/study/iris/data\042\2\042_2_3.bmp,42 77 | D:/study/iris/data\023\2\023_2_1.bmp,23 78 | D:/study/iris/data\056\2\056_2_4.bmp,56 79 | D:/study/iris/data\049\2\049_2_1.bmp,49 80 | D:/study/iris/data\056\2\056_2_3.bmp,56 81 | D:/study/iris/data\008\2\008_2_2.bmp,8 82 | D:/study/iris/data\041\2\041_2_2.bmp,41 83 | D:/study/iris/data\044\2\044_2_1.bmp,44 84 | D:/study/iris/data\025\2\025_2_1.bmp,25 85 | D:/study/iris/data\029\2\029_2_3.bmp,29 86 | D:/study/iris/data\048\2\048_2_4.bmp,48 87 | D:/study/iris/data\025\2\025_2_4.bmp,25 88 | D:/study/iris/data\105\2\105_2_4.bmp,105 89 | D:/study/iris/data\059\2\059_2_2.bmp,59 90 | D:/study/iris/data\072\2\072_2_1.bmp,72 91 | D:/study/iris/data\076\2\076_2_1.bmp,76 92 | D:/study/iris/data\063\2\063_2_2.bmp,63 93 | D:/study/iris/data\061\2\061_2_4.bmp,61 94 | D:/study/iris/data\040\2\040_2_4.bmp,40 95 | D:/study/iris/data\012\2\012_2_3.bmp,12 96 | D:/study/iris/data\007\2\007_2_4.bmp,7 97 | D:/study/iris/data\093\2\093_2_2.bmp,93 98 | D:/study/iris/data\078\2\078_2_3.bmp,78 99 | D:/study/iris/data\008\2\008_2_1.bmp,8 100 | D:/study/iris/data\008\2\008_2_3.bmp,8 101 | D:/study/iris/data\102\2\102_2_3.bmp,102 102 | D:/study/iris/data\016\2\016_2_4.bmp,16 103 | D:/study/iris/data\094\2\094_2_3.bmp,94 104 | D:/study/iris/data\044\2\044_2_4.bmp,44 105 | D:/study/iris/data\058\2\058_2_4.bmp,58 106 | D:/study/iris/data\015\2\015_2_1.bmp,15 107 | D:/study/iris/data\041\2\041_2_4.bmp,41 108 | D:/study/iris/data\087\2\087_2_3.bmp,87 109 | D:/study/iris/data\021\2\021_2_3.bmp,21 110 | D:/study/iris/data\046\2\046_2_1.bmp,46 111 | D:/study/iris/data\102\2\102_2_4.bmp,102 112 | D:/study/iris/data\087\2\087_2_2.bmp,87 113 | D:/study/iris/data\066\2\066_2_2.bmp,66 114 | D:/study/iris/data\081\2\081_2_4.bmp,81 115 | D:/study/iris/data\022\2\022_2_3.bmp,22 116 | D:/study/iris/data\079\2\079_2_2.bmp,79 117 | D:/study/iris/data\024\2\024_2_4.bmp,24 118 | D:/study/iris/data\103\2\103_2_2.bmp,103 119 | D:/study/iris/data\040\2\040_2_3.bmp,40 120 | D:/study/iris/data\059\2\059_2_3.bmp,59 121 | D:/study/iris/data\034\2\034_2_3.bmp,34 122 | D:/study/iris/data\102\2\102_2_1.bmp,102 123 | D:/study/iris/data\083\2\083_2_4.bmp,83 124 | D:/study/iris/data\062\2\062_2_4.bmp,62 125 | D:/study/iris/data\051\2\051_2_2.bmp,51 126 | D:/study/iris/data\046\2\046_2_3.bmp,46 127 | D:/study/iris/data\013\2\013_2_4.bmp,13 128 | D:/study/iris/data\053\2\053_2_4.bmp,53 129 | D:/study/iris/data\003\2\003_2_1.bmp,3 130 | D:/study/iris/data\079\2\079_2_4.bmp,79 131 | D:/study/iris/data\106\2\106_2_1.bmp,106 132 | D:/study/iris/data\073\2\073_2_4.bmp,73 133 | D:/study/iris/data\045\2\045_2_4.bmp,45 134 | D:/study/iris/data\075\2\075_2_4.bmp,75 135 | D:/study/iris/data\086\2\086_2_3.bmp,86 136 | D:/study/iris/data\023\2\023_2_3.bmp,23 137 | D:/study/iris/data\096\2\096_2_3.bmp,96 138 | D:/study/iris/data\094\2\094_2_4.bmp,94 139 | D:/study/iris/data\099\2\099_2_3.bmp,99 140 | D:/study/iris/data\007\2\007_2_3.bmp,7 141 | D:/study/iris/data\096\2\096_2_1.bmp,96 142 | D:/study/iris/data\107\2\107_2_1.bmp,107 143 | D:/study/iris/data\020\2\020_2_4.bmp,20 144 | D:/study/iris/data\010\2\010_2_4.bmp,10 145 | D:/study/iris/data\017\2\017_2_4.bmp,17 146 | D:/study/iris/data\062\2\062_2_1.bmp,62 147 | D:/study/iris/data\085\2\085_2_4.bmp,85 148 | D:/study/iris/data\091\2\091_2_3.bmp,91 149 | D:/study/iris/data\009\2\009_2_2.bmp,9 150 | D:/study/iris/data\076\2\076_2_2.bmp,76 151 | D:/study/iris/data\074\2\074_2_3.bmp,74 152 | D:/study/iris/data\036\2\036_2_1.bmp,36 153 | D:/study/iris/data\084\2\084_2_2.bmp,84 154 | D:/study/iris/data\043\2\043_2_1.bmp,43 155 | D:/study/iris/data\066\2\066_2_4.bmp,66 156 | D:/study/iris/data\097\2\097_2_3.bmp,97 157 | D:/study/iris/data\095\2\095_2_3.bmp,95 158 | D:/study/iris/data\034\2\034_2_2.bmp,34 159 | D:/study/iris/data\020\2\020_2_1.bmp,20 160 | D:/study/iris/data\065\2\065_2_4.bmp,65 161 | D:/study/iris/data\095\2\095_2_1.bmp,95 162 | D:/study/iris/data\108\2\108_2_3.bmp,108 163 | D:/study/iris/data\097\2\097_2_4.bmp,97 164 | D:/study/iris/data\103\2\103_2_1.bmp,103 165 | D:/study/iris/data\052\2\052_2_4.bmp,52 166 | D:/study/iris/data\052\2\052_2_1.bmp,52 167 | D:/study/iris/data\075\2\075_2_3.bmp,75 168 | D:/study/iris/data\018\2\018_2_3.bmp,18 169 | D:/study/iris/data\065\2\065_2_3.bmp,65 170 | D:/study/iris/data\098\2\098_2_1.bmp,98 171 | D:/study/iris/data\064\2\064_2_1.bmp,64 172 | D:/study/iris/data\050\2\050_2_1.bmp,50 173 | D:/study/iris/data\080\2\080_2_3.bmp,80 174 | D:/study/iris/data\049\2\049_2_4.bmp,49 175 | D:/study/iris/data\039\2\039_2_1.bmp,39 176 | D:/study/iris/data\004\2\004_2_3.bmp,4 177 | D:/study/iris/data\013\2\013_2_3.bmp,13 178 | D:/study/iris/data\086\2\086_2_1.bmp,86 179 | D:/study/iris/data\067\2\067_2_2.bmp,67 180 | D:/study/iris/data\054\2\054_2_1.bmp,54 181 | D:/study/iris/data\038\2\038_2_1.bmp,38 182 | D:/study/iris/data\099\2\099_2_4.bmp,99 183 | D:/study/iris/data\018\2\018_2_2.bmp,18 184 | D:/study/iris/data\030\2\030_2_2.bmp,30 185 | D:/study/iris/data\103\2\103_2_4.bmp,103 186 | D:/study/iris/data\058\2\058_2_3.bmp,58 187 | D:/study/iris/data\006\2\006_2_3.bmp,6 188 | D:/study/iris/data\031\2\031_2_4.bmp,31 189 | D:/study/iris/data\071\2\071_2_3.bmp,71 190 | D:/study/iris/data\075\2\075_2_2.bmp,75 191 | D:/study/iris/data\063\2\063_2_3.bmp,63 192 | D:/study/iris/data\091\2\091_2_2.bmp,91 193 | D:/study/iris/data\077\2\077_2_1.bmp,77 194 | D:/study/iris/data\100\2\100_2_1.bmp,100 195 | D:/study/iris/data\103\2\103_2_3.bmp,103 196 | D:/study/iris/data\011\2\011_2_1.bmp,11 197 | D:/study/iris/data\011\2\011_2_2.bmp,11 198 | D:/study/iris/data\003\2\003_2_3.bmp,3 199 | D:/study/iris/data\038\2\038_2_4.bmp,38 200 | D:/study/iris/data\021\2\021_2_1.bmp,21 201 | D:/study/iris/data\085\2\085_2_1.bmp,85 202 | D:/study/iris/data\066\2\066_2_3.bmp,66 203 | D:/study/iris/data\070\2\070_2_4.bmp,70 204 | D:/study/iris/data\019\2\019_2_1.bmp,19 205 | D:/study/iris/data\072\2\072_2_4.bmp,72 206 | D:/study/iris/data\027\2\027_2_2.bmp,27 207 | D:/study/iris/data\091\2\091_2_4.bmp,91 208 | D:/study/iris/data\028\2\028_2_2.bmp,28 209 | D:/study/iris/data\013\2\013_2_2.bmp,13 210 | D:/study/iris/data\003\2\003_2_4.bmp,3 211 | D:/study/iris/data\071\2\071_2_4.bmp,71 212 | D:/study/iris/data\081\2\081_2_2.bmp,81 213 | D:/study/iris/data\067\2\067_2_3.bmp,67 214 | D:/study/iris/data\070\2\070_2_2.bmp,70 215 | D:/study/iris/data\059\2\059_2_4.bmp,59 216 | D:/study/iris/data\021\2\021_2_2.bmp,21 217 | D:/study/iris/data\027\2\027_2_1.bmp,27 218 | D:/study/iris/data\099\2\099_2_2.bmp,99 219 | D:/study/iris/data\106\2\106_2_4.bmp,106 220 | D:/study/iris/data\015\2\015_2_4.bmp,15 221 | D:/study/iris/data\012\2\012_2_4.bmp,12 222 | D:/study/iris/data\092\2\092_2_2.bmp,92 223 | D:/study/iris/data\098\2\098_2_4.bmp,98 224 | D:/study/iris/data\012\2\012_2_1.bmp,12 225 | D:/study/iris/data\019\2\019_2_4.bmp,19 226 | D:/study/iris/data\047\2\047_2_4.bmp,47 227 | D:/study/iris/data\048\2\048_2_1.bmp,48 228 | D:/study/iris/data\070\2\070_2_1.bmp,70 229 | D:/study/iris/data\082\2\082_2_4.bmp,82 230 | D:/study/iris/data\042\2\042_2_1.bmp,42 231 | D:/study/iris/data\065\2\065_2_1.bmp,65 232 | D:/study/iris/data\064\2\064_2_4.bmp,64 233 | D:/study/iris/data\060\2\060_2_4.bmp,60 234 | D:/study/iris/data\101\2\101_2_1.bmp,101 235 | D:/study/iris/data\031\2\031_2_1.bmp,31 236 | D:/study/iris/data\022\2\022_2_4.bmp,22 237 | D:/study/iris/data\023\2\023_2_4.bmp,23 238 | D:/study/iris/data\050\2\050_2_2.bmp,50 239 | D:/study/iris/data\025\2\025_2_2.bmp,25 240 | D:/study/iris/data\035\2\035_2_1.bmp,35 241 | D:/study/iris/data\048\2\048_2_3.bmp,48 242 | D:/study/iris/data\026\2\026_2_3.bmp,26 243 | D:/study/iris/data\050\2\050_2_4.bmp,50 244 | D:/study/iris/data\077\2\077_2_4.bmp,77 245 | D:/study/iris/data\030\2\030_2_3.bmp,30 246 | D:/study/iris/data\085\2\085_2_3.bmp,85 247 | D:/study/iris/data\058\2\058_2_2.bmp,58 248 | D:/study/iris/data\053\2\053_2_1.bmp,53 249 | D:/study/iris/data\094\2\094_2_2.bmp,94 250 | D:/study/iris/data\077\2\077_2_2.bmp,77 251 | D:/study/iris/data\070\2\070_2_3.bmp,70 252 | D:/study/iris/data\002\2\002_2_1.bmp,2 253 | D:/study/iris/data\088\2\088_2_1.bmp,88 254 | D:/study/iris/data\104\2\104_2_1.bmp,104 255 | D:/study/iris/data\095\2\095_2_2.bmp,95 256 | D:/study/iris/data\026\2\026_2_1.bmp,26 257 | D:/study/iris/data\005\2\005_2_1.bmp,5 258 | D:/study/iris/data\062\2\062_2_2.bmp,62 259 | D:/study/iris/data\038\2\038_2_3.bmp,38 260 | D:/study/iris/data\090\2\090_2_4.bmp,90 261 | D:/study/iris/data\094\2\094_2_1.bmp,94 262 | D:/study/iris/data\073\2\073_2_2.bmp,73 263 | D:/study/iris/data\052\2\052_2_3.bmp,52 264 | D:/study/iris/data\068\2\068_2_2.bmp,68 265 | D:/study/iris/data\069\2\069_2_3.bmp,69 266 | D:/study/iris/data\072\2\072_2_2.bmp,72 267 | D:/study/iris/data\095\2\095_2_4.bmp,95 268 | D:/study/iris/data\005\2\005_2_2.bmp,5 269 | D:/study/iris/data\088\2\088_2_2.bmp,88 270 | D:/study/iris/data\089\2\089_2_4.bmp,89 271 | D:/study/iris/data\090\2\090_2_1.bmp,90 272 | D:/study/iris/data\012\2\012_2_2.bmp,12 273 | D:/study/iris/data\107\2\107_2_2.bmp,107 274 | D:/study/iris/data\036\2\036_2_3.bmp,36 275 | D:/study/iris/data\003\2\003_2_2.bmp,3 276 | D:/study/iris/data\004\2\004_2_1.bmp,4 277 | D:/study/iris/data\051\2\051_2_3.bmp,51 278 | D:/study/iris/data\010\2\010_2_1.bmp,10 279 | D:/study/iris/data\106\2\106_2_2.bmp,106 280 | D:/study/iris/data\075\2\075_2_1.bmp,75 281 | D:/study/iris/data\078\2\078_2_4.bmp,78 282 | D:/study/iris/data\057\2\057_2_2.bmp,57 283 | D:/study/iris/data\032\2\032_2_4.bmp,32 284 | D:/study/iris/data\090\2\090_2_2.bmp,90 285 | D:/study/iris/data\016\2\016_2_3.bmp,16 286 | D:/study/iris/data\069\2\069_2_4.bmp,69 287 | D:/study/iris/data\086\2\086_2_4.bmp,86 288 | D:/study/iris/data\108\2\108_2_1.bmp,108 289 | D:/study/iris/data\096\2\096_2_4.bmp,96 290 | D:/study/iris/data\067\2\067_2_1.bmp,67 291 | D:/study/iris/data\105\2\105_2_2.bmp,105 292 | D:/study/iris/data\041\2\041_2_3.bmp,41 293 | D:/study/iris/data\107\2\107_2_4.bmp,107 294 | D:/study/iris/data\067\2\067_2_4.bmp,67 295 | D:/study/iris/data\068\2\068_2_1.bmp,68 296 | D:/study/iris/data\014\2\014_2_3.bmp,14 297 | D:/study/iris/data\051\2\051_2_4.bmp,51 298 | D:/study/iris/data\016\2\016_2_2.bmp,16 299 | D:/study/iris/data\045\2\045_2_1.bmp,45 300 | D:/study/iris/data\053\2\053_2_2.bmp,53 301 | D:/study/iris/data\096\2\096_2_2.bmp,96 302 | D:/study/iris/data\066\2\066_2_1.bmp,66 303 | D:/study/iris/data\083\2\083_2_2.bmp,83 304 | D:/study/iris/data\098\2\098_2_2.bmp,98 305 | D:/study/iris/data\017\2\017_2_3.bmp,17 306 | D:/study/iris/data\007\2\007_2_2.bmp,7 307 | D:/study/iris/data\030\2\030_2_4.bmp,30 308 | D:/study/iris/data\023\2\023_2_2.bmp,23 309 | D:/study/iris/data\060\2\060_2_3.bmp,60 310 | D:/study/iris/data\032\2\032_2_1.bmp,32 311 | D:/study/iris/data\035\2\035_2_2.bmp,35 312 | D:/study/iris/data\039\2\039_2_2.bmp,39 313 | D:/study/iris/data\039\2\039_2_4.bmp,39 314 | D:/study/iris/data\035\2\035_2_4.bmp,35 315 | D:/study/iris/data\073\2\073_2_3.bmp,73 316 | D:/study/iris/data\092\2\092_2_3.bmp,92 317 | D:/study/iris/data\038\2\038_2_2.bmp,38 318 | D:/study/iris/data\092\2\092_2_4.bmp,92 319 | D:/study/iris/data\071\2\071_2_2.bmp,71 320 | D:/study/iris/data\079\2\079_2_1.bmp,79 321 | D:/study/iris/data\010\2\010_2_2.bmp,10 322 | D:/study/iris/data\060\2\060_2_2.bmp,60 323 | D:/study/iris/data\028\2\028_2_4.bmp,28 324 | D:/study/iris/data\100\2\100_2_3.bmp,100 325 | D:/study/iris/data\014\2\014_2_1.bmp,14 326 | D:/study/iris/data\018\2\018_2_4.bmp,18 327 | D:/study/iris/data\030\2\030_2_1.bmp,30 328 | D:/study/iris/data\087\2\087_2_1.bmp,87 329 | D:/study/iris/data\085\2\085_2_2.bmp,85 330 | D:/study/iris/data\034\2\034_2_4.bmp,34 331 | D:/study/iris/data\024\2\024_2_1.bmp,24 332 | D:/study/iris/data\089\2\089_2_1.bmp,89 333 | D:/study/iris/data\008\2\008_2_4.bmp,8 334 | D:/study/iris/data\049\2\049_2_2.bmp,49 335 | D:/study/iris/data\098\2\098_2_3.bmp,98 336 | D:/study/iris/data\016\2\016_2_1.bmp,16 337 | D:/study/iris/data\100\2\100_2_4.bmp,100 338 | D:/study/iris/data\086\2\086_2_2.bmp,86 339 | D:/study/iris/data\031\2\031_2_2.bmp,31 340 | D:/study/iris/data\074\2\074_2_2.bmp,74 341 | D:/study/iris/data\042\2\042_2_4.bmp,42 342 | D:/study/iris/data\055\2\055_2_3.bmp,55 343 | D:/study/iris/data\074\2\074_2_4.bmp,74 344 | D:/study/iris/data\033\2\033_2_3.bmp,33 345 | D:/study/iris/data\028\2\028_2_3.bmp,28 346 | D:/study/iris/data\039\2\039_2_3.bmp,39 347 | D:/study/iris/data\007\2\007_2_1.bmp,7 348 | D:/study/iris/data\107\2\107_2_3.bmp,107 349 | D:/study/iris/data\091\2\091_2_1.bmp,91 350 | D:/study/iris/data\019\2\019_2_2.bmp,19 351 | D:/study/iris/data\032\2\032_2_3.bmp,32 352 | D:/study/iris/data\088\2\088_2_3.bmp,88 353 | D:/study/iris/data\073\2\073_2_1.bmp,73 354 | D:/study/iris/data\001\2\001_2_1.bmp,1 355 | D:/study/iris/data\068\2\068_2_3.bmp,68 356 | D:/study/iris/data\037\2\037_2_2.bmp,37 357 | D:/study/iris/data\037\2\037_2_3.bmp,37 358 | D:/study/iris/data\047\2\047_2_1.bmp,47 359 | D:/study/iris/data\024\2\024_2_2.bmp,24 360 | D:/study/iris/data\015\2\015_2_3.bmp,15 361 | D:/study/iris/data\059\2\059_2_1.bmp,59 362 | D:/study/iris/data\025\2\025_2_3.bmp,25 363 | D:/study/iris/data\051\2\051_2_1.bmp,51 364 | D:/study/iris/data\108\2\108_2_2.bmp,108 365 | D:/study/iris/data\001\2\001_2_3.bmp,1 366 | D:/study/iris/data\092\2\092_2_1.bmp,92 367 | D:/study/iris/data\026\2\026_2_4.bmp,26 368 | D:/study/iris/data\079\2\079_2_3.bmp,79 369 | D:/study/iris/data\004\2\004_2_2.bmp,4 370 | D:/study/iris/data\019\2\019_2_3.bmp,19 371 | D:/study/iris/data\015\2\015_2_2.bmp,15 372 | D:/study/iris/data\045\2\045_2_3.bmp,45 373 | D:/study/iris/data\083\2\083_2_3.bmp,83 374 | D:/study/iris/data\006\2\006_2_4.bmp,6 375 | D:/study/iris/data\037\2\037_2_1.bmp,37 376 | D:/study/iris/data\046\2\046_2_2.bmp,46 377 | D:/study/iris/data\106\2\106_2_3.bmp,106 378 | D:/study/iris/data\058\2\058_2_1.bmp,58 379 | D:/study/iris/data\104\2\104_2_2.bmp,104 380 | D:/study/iris/data\033\2\033_2_2.bmp,33 381 | D:/study/iris/data\097\2\097_2_2.bmp,97 382 | D:/study/iris/data\087\2\087_2_4.bmp,87 383 | D:/study/iris/data\032\2\032_2_2.bmp,32 384 | D:/study/iris/data\090\2\090_2_3.bmp,90 385 | D:/study/iris/data\052\2\052_2_2.bmp,52 386 | D:/study/iris/data\060\2\060_2_1.bmp,60 387 | D:/study/iris/data\055\2\055_2_1.bmp,55 388 | D:/study/iris/data\029\2\029_2_4.bmp,29 389 | D:/study/iris/data\020\2\020_2_2.bmp,20 390 | D:/study/iris/data\042\2\042_2_2.bmp,42 391 | D:/study/iris/data\088\2\088_2_4.bmp,88 392 | D:/study/iris/data\097\2\097_2_1.bmp,97 393 | D:/study/iris/data\102\2\102_2_2.bmp,102 394 | D:/study/iris/data\028\2\028_2_1.bmp,28 395 | D:/study/iris/data\005\2\005_2_3.bmp,5 396 | D:/study/iris/data\080\2\080_2_4.bmp,80 397 | D:/study/iris/data\069\2\069_2_1.bmp,69 398 | D:/study/iris/data\045\2\045_2_2.bmp,45 399 | D:/study/iris/data\009\2\009_2_1.bmp,9 400 | D:/study/iris/data\022\2\022_2_2.bmp,22 401 | D:/study/iris/data\084\2\084_2_4.bmp,84 402 | D:/study/iris/data\074\2\074_2_1.bmp,74 403 | D:/study/iris/data\050\2\050_2_3.bmp,50 404 | D:/study/iris/data\055\2\055_2_4.bmp,55 405 | D:/study/iris/data\009\2\009_2_3.bmp,9 406 | D:/study/iris/data\043\2\043_2_4.bmp,43 407 | D:/study/iris/data\064\2\064_2_2.bmp,64 408 | D:/study/iris/data\081\2\081_2_3.bmp,81 409 | D:/study/iris/data\081\2\081_2_1.bmp,81 410 | D:/study/iris/data\104\2\104_2_4.bmp,104 411 | D:/study/iris/data\100\2\100_2_2.bmp,100 412 | D:/study/iris/data\065\2\065_2_2.bmp,65 413 | D:/study/iris/data\054\2\054_2_2.bmp,54 414 | D:/study/iris/data\040\2\040_2_2.bmp,40 415 | D:/study/iris/data\010\2\010_2_3.bmp,10 416 | D:/study/iris/data\041\2\041_2_1.bmp,41 417 | D:/study/iris/data\082\2\082_2_3.bmp,82 418 | D:/study/iris/data\078\2\078_2_2.bmp,78 419 | D:/study/iris/data\031\2\031_2_3.bmp,31 420 | D:/study/iris/data\018\2\018_2_1.bmp,18 421 | D:/study/iris/data\101\2\101_2_2.bmp,101 422 | D:/study/iris/data\029\2\029_2_2.bmp,29 423 | D:/study/iris/data\002\2\002_2_3.bmp,2 424 | D:/study/iris/data\063\2\063_2_4.bmp,63 425 | D:/study/iris/data\104\2\104_2_3.bmp,104 426 | D:/study/iris/data\024\2\024_2_3.bmp,24 427 | D:/study/iris/data\080\2\080_2_1.bmp,80 428 | D:/study/iris/data\004\2\004_2_4.bmp,4 429 | D:/study/iris/data\061\2\061_2_2.bmp,61 430 | D:/study/iris/data\093\2\093_2_1.bmp,93 431 | D:/study/iris/data\011\2\011_2_4.bmp,11 432 | D:/study/iris/data\105\2\105_2_1.bmp,105 433 | D:/study/iris/data\069\2\069_2_2.bmp,69 434 | -------------------------------------------------------------------------------- /code/CNN_feature/1.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Generic training script that trains a model using a given dataset.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import tensorflow as tf 22 | 23 | from datasets import dataset_factory 24 | from deployment import model_deploy 25 | from nets import nets_factory 26 | from preprocessing import preprocessing_factory 27 | 28 | slim = tf.contrib.slim 29 | 30 | tf.app.flags.DEFINE_string( 31 | 'master', '', 'The address of the TensorFlow master to use.') 32 | 33 | tf.app.flags.DEFINE_string( 34 | 'train_dir', '/tmp/tfmodel/', 35 | 'Directory where checkpoints and event logs are written to.') 36 | 37 | tf.app.flags.DEFINE_integer('num_clones', 1, 38 | 'Number of model clones to deploy.') 39 | 40 | tf.app.flags.DEFINE_boolean('clone_on_cpu', False, 41 | 'Use CPUs to deploy clones.') 42 | 43 | tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.') 44 | 45 | tf.app.flags.DEFINE_integer( 46 | 'num_ps_tasks', 0, 47 | 'The number of parameter servers. If the value is 0, then the parameters ' 48 | 'are handled locally by the worker.') 49 | 50 | tf.app.flags.DEFINE_integer( 51 | 'num_readers', 4, 52 | 'The number of parallel readers that read data from the dataset.') 53 | 54 | tf.app.flags.DEFINE_integer( 55 | 'num_preprocessing_threads', 4, 56 | 'The number of threads used to create the batches.') 57 | 58 | tf.app.flags.DEFINE_integer( 59 | 'log_every_n_steps', 10, 60 | 'The frequency with which logs are print.') 61 | 62 | tf.app.flags.DEFINE_integer( 63 | 'save_summaries_secs', 600, 64 | 'The frequency with which summaries are saved, in seconds.') 65 | 66 | tf.app.flags.DEFINE_integer( 67 | 'save_interval_secs', 600, 68 | 'The frequency with which the model is saved, in seconds.') 69 | 70 | tf.app.flags.DEFINE_integer( 71 | 'task', 0, 'Task id of the replica running the training.') 72 | 73 | ###################### 74 | # Optimization Flags # 75 | ###################### 76 | 77 | tf.app.flags.DEFINE_float( 78 | 'weight_decay', 0.00004, 'The weight decay on the model weights.') 79 | 80 | tf.app.flags.DEFINE_string( 81 | 'optimizer', 'rmsprop', 82 | 'The name of the optimizer, one of "adadelta", "adagrad", "adam",' 83 | '"ftrl", "momentum", "sgd" or "rmsprop".') 84 | 85 | tf.app.flags.DEFINE_float( 86 | 'adadelta_rho', 0.95, 87 | 'The decay rate for adadelta.') 88 | 89 | tf.app.flags.DEFINE_float( 90 | 'adagrad_initial_accumulator_value', 0.1, 91 | 'Starting value for the AdaGrad accumulators.') 92 | 93 | tf.app.flags.DEFINE_float( 94 | 'adam_beta1', 0.9, 95 | 'The exponential decay rate for the 1st moment estimates.') 96 | 97 | tf.app.flags.DEFINE_float( 98 | 'adam_beta2', 0.999, 99 | 'The exponential decay rate for the 2nd moment estimates.') 100 | 101 | tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.') 102 | 103 | tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5, 104 | 'The learning rate power.') 105 | 106 | tf.app.flags.DEFINE_float( 107 | 'ftrl_initial_accumulator_value', 0.1, 108 | 'Starting value for the FTRL accumulators.') 109 | 110 | tf.app.flags.DEFINE_float( 111 | 'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.') 112 | 113 | tf.app.flags.DEFINE_float( 114 | 'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.') 115 | 116 | tf.app.flags.DEFINE_float( 117 | 'momentum', 0.9, 118 | 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') 119 | 120 | tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.') 121 | 122 | ####################### 123 | # Learning Rate Flags # 124 | ####################### 125 | 126 | tf.app.flags.DEFINE_string( 127 | 'learning_rate_decay_type', 128 | 'exponential', 129 | 'Specifies how the learning rate is decayed. One of "fixed", "exponential",' 130 | ' or "polynomial"') 131 | 132 | tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.') 133 | 134 | tf.app.flags.DEFINE_float( 135 | 'end_learning_rate', 0.0001, 136 | 'The minimal end learning rate used by a polynomial decay learning rate.') 137 | 138 | tf.app.flags.DEFINE_float( 139 | 'label_smoothing', 0.0, 'The amount of label smoothing.') 140 | 141 | tf.app.flags.DEFINE_float( 142 | 'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.') 143 | 144 | tf.app.flags.DEFINE_float( 145 | 'num_epochs_per_decay', 2.0, 146 | 'Number of epochs after which learning rate decays.') 147 | 148 | tf.app.flags.DEFINE_bool( 149 | 'sync_replicas', False, 150 | 'Whether or not to synchronize the replicas during training.') 151 | 152 | tf.app.flags.DEFINE_integer( 153 | 'replicas_to_aggregate', 1, 154 | 'The Number of gradients to collect before updating params.') 155 | 156 | tf.app.flags.DEFINE_float( 157 | 'moving_average_decay', None, 158 | 'The decay to use for the moving average.' 159 | 'If left as None, then moving averages are not used.') 160 | 161 | ####################### 162 | # Dataset Flags # 163 | ####################### 164 | 165 | tf.app.flags.DEFINE_string( 166 | 'dataset_name', 'imagenet', 'The name of the dataset to load.') 167 | 168 | tf.app.flags.DEFINE_string( 169 | 'dataset_split_name', 'train', 'The name of the train/test split.') 170 | 171 | tf.app.flags.DEFINE_string( 172 | 'dataset_dir', None, 'The directory where the dataset files are stored.') 173 | 174 | tf.app.flags.DEFINE_integer( 175 | 'labels_offset', 0, 176 | 'An offset for the labels in the dataset. This flag is primarily used to ' 177 | 'evaluate the VGG and ResNet architectures which do not use a background ' 178 | 'class for the ImageNet dataset.') 179 | 180 | tf.app.flags.DEFINE_string( 181 | 'model_name', 'densenet121', 'The name of the architecture to train.') 182 | 183 | tf.app.flags.DEFINE_string( 184 | 'data_format', 'NHWC', 'The structure of the Tensor. NHWC or NCHW.') 185 | 186 | tf.app.flags.DEFINE_string( 187 | 'preprocessing_name', None, 'The name of the preprocessing to use. If left ' 188 | 'as `None`, then the model_name flag is used.') 189 | 190 | tf.app.flags.DEFINE_integer( 191 | 'batch_size', 32, 'The number of samples in each batch.') 192 | 193 | tf.app.flags.DEFINE_integer( 194 | 'train_image_size', None, 'Train image size') 195 | 196 | tf.app.flags.DEFINE_integer('max_number_of_steps', None, 197 | 'The maximum number of training steps.') 198 | 199 | ##################### 200 | # Fine-Tuning Flags # 201 | ##################### 202 | 203 | tf.app.flags.DEFINE_string( 204 | 'checkpoint_path', None, 205 | 'The path to a checkpoint from which to fine-tune.') 206 | 207 | tf.app.flags.DEFINE_string( 208 | 'checkpoint_exclude_scopes', None, 209 | 'Comma-separated list of scopes of variables to exclude when restoring ' 210 | 'from a checkpoint.') 211 | 212 | tf.app.flags.DEFINE_string( 213 | 'trainable_scopes', None, 214 | 'Comma-separated list of scopes to filter the set of variables to train.' 215 | 'By default, None would train all the variables.') 216 | 217 | tf.app.flags.DEFINE_boolean( 218 | 'ignore_missing_vars', False, 219 | 'When restoring a checkpoint would ignore missing variables.') 220 | 221 | FLAGS = tf.app.flags.FLAGS 222 | 223 | 224 | def _configure_learning_rate(num_samples_per_epoch, global_step): 225 | """Configures the learning rate. 226 | Args: 227 | num_samples_per_epoch: The number of samples in each epoch of training. 228 | global_step: The global_step tensor. 229 | Returns: 230 | A `Tensor` representing the learning rate. 231 | Raises: 232 | ValueError: if 233 | """ 234 | decay_steps = int(num_samples_per_epoch / FLAGS.batch_size * 235 | FLAGS.num_epochs_per_decay) 236 | if FLAGS.sync_replicas: 237 | decay_steps /= FLAGS.replicas_to_aggregate 238 | 239 | if FLAGS.learning_rate_decay_type == 'exponential': 240 | return tf.train.exponential_decay(FLAGS.learning_rate, 241 | global_step, 242 | decay_steps, 243 | FLAGS.learning_rate_decay_factor, 244 | staircase=True, 245 | name='exponential_decay_learning_rate') 246 | elif FLAGS.learning_rate_decay_type == 'fixed': 247 | return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate') 248 | elif FLAGS.learning_rate_decay_type == 'polynomial': 249 | return tf.train.polynomial_decay(FLAGS.learning_rate, 250 | global_step, 251 | decay_steps, 252 | FLAGS.end_learning_rate, 253 | power=1.0, 254 | cycle=False, 255 | name='polynomial_decay_learning_rate') 256 | else: 257 | raise ValueError('learning_rate_decay_type [%s] was not recognized', 258 | FLAGS.learning_rate_decay_type) 259 | 260 | 261 | def _configure_optimizer(learning_rate): 262 | """Configures the optimizer used for training. 263 | Args: 264 | learning_rate: A scalar or `Tensor` learning rate. 265 | Returns: 266 | An instance of an optimizer. 267 | Raises: 268 | ValueError: if FLAGS.optimizer is not recognized. 269 | """ 270 | if FLAGS.optimizer == 'adadelta': 271 | optimizer = tf.train.AdadeltaOptimizer( 272 | learning_rate, 273 | rho=FLAGS.adadelta_rho, 274 | epsilon=FLAGS.opt_epsilon) 275 | elif FLAGS.optimizer == 'adagrad': 276 | optimizer = tf.train.AdagradOptimizer( 277 | learning_rate, 278 | initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value) 279 | elif FLAGS.optimizer == 'adam': 280 | optimizer = tf.train.AdamOptimizer( 281 | learning_rate, 282 | beta1=FLAGS.adam_beta1, 283 | beta2=FLAGS.adam_beta2, 284 | epsilon=FLAGS.opt_epsilon) 285 | elif FLAGS.optimizer == 'ftrl': 286 | optimizer = tf.train.FtrlOptimizer( 287 | learning_rate, 288 | learning_rate_power=FLAGS.ftrl_learning_rate_power, 289 | initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value, 290 | l1_regularization_strength=FLAGS.ftrl_l1, 291 | l2_regularization_strength=FLAGS.ftrl_l2) 292 | elif FLAGS.optimizer == 'momentum': 293 | optimizer = tf.train.MomentumOptimizer( 294 | learning_rate, 295 | momentum=FLAGS.momentum, 296 | name='Momentum') 297 | elif FLAGS.optimizer == 'rmsprop': 298 | optimizer = tf.train.RMSPropOptimizer( 299 | learning_rate, 300 | decay=FLAGS.rmsprop_decay, 301 | momentum=FLAGS.momentum, 302 | epsilon=FLAGS.opt_epsilon) 303 | elif FLAGS.optimizer == 'sgd': 304 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 305 | else: 306 | raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer) 307 | return optimizer 308 | 309 | def _get_init_fn(): 310 | """Returns a function run by the chief worker to warm-start the training. 311 | Note that the init_fn is only run when initializing the model during the very 312 | first global step. 313 | Returns: 314 | An init function run by the supervisor. 315 | """ 316 | if FLAGS.checkpoint_path is None: 317 | return None 318 | 319 | # Warn the user if a checkpoint exists in the train_dir. Then we'll be 320 | # ignoring the checkpoint anyway. 321 | if tf.train.latest_checkpoint(FLAGS.train_dir): 322 | tf.logging.info( 323 | 'Ignoring --checkpoint_path because a checkpoint already exists in %s' 324 | % FLAGS.train_dir) 325 | return None 326 | 327 | exclusions = [] 328 | if FLAGS.checkpoint_exclude_scopes: 329 | exclusions = [scope.strip() 330 | for scope in FLAGS.checkpoint_exclude_scopes.split(',')] 331 | 332 | # TODO(sguada) variables.filter_variables() 333 | variables_to_restore = [] 334 | for var in slim.get_model_variables(): 335 | excluded = False 336 | for exclusion in exclusions: 337 | if var.op.name.startswith(exclusion): 338 | excluded = True 339 | break 340 | if not excluded: 341 | variables_to_restore.append(var) 342 | 343 | if tf.gfile.IsDirectory(FLAGS.checkpoint_path): 344 | checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path) 345 | else: 346 | checkpoint_path = FLAGS.checkpoint_path 347 | 348 | tf.logging.info('Fine-tuning from %s' % checkpoint_path) 349 | 350 | return slim.assign_from_checkpoint_fn( 351 | checkpoint_path, 352 | variables_to_restore, 353 | ignore_missing_vars=FLAGS.ignore_missing_vars) 354 | 355 | 356 | def _get_variables_to_train(): 357 | """Returns a list of variables to train. 358 | Returns: 359 | A list of variables to train by the optimizer. 360 | """ 361 | if FLAGS.trainable_scopes is None: 362 | return tf.trainable_variables() 363 | else: 364 | scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')] 365 | 366 | variables_to_train = [] 367 | for scope in scopes: 368 | variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) 369 | variables_to_train.extend(variables) 370 | return variables_to_train 371 | 372 | 373 | def main(_): 374 | if not FLAGS.dataset_dir: 375 | raise ValueError('You must supply the dataset directory with --dataset_dir') 376 | 377 | tf.logging.set_verbosity(tf.logging.INFO) 378 | with tf.Graph().as_default(): 379 | ####################### 380 | # Config model_deploy # 381 | ####################### 382 | deploy_config = model_deploy.DeploymentConfig( 383 | num_clones=FLAGS.num_clones, 384 | clone_on_cpu=FLAGS.clone_on_cpu, 385 | replica_id=FLAGS.task, 386 | num_replicas=FLAGS.worker_replicas, 387 | num_ps_tasks=FLAGS.num_ps_tasks) 388 | 389 | # Create global_step 390 | with tf.device(deploy_config.variables_device()): 391 | global_step = slim.create_global_step() 392 | 393 | ###################### 394 | # Select the dataset # 395 | ###################### 396 | dataset = dataset_factory.get_dataset( 397 | FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) 398 | 399 | ###################### 400 | # Select the network # 401 | ###################### 402 | network_fn = nets_factory.get_network_fn( 403 | FLAGS.model_name, 404 | num_classes=(dataset.num_classes - FLAGS.labels_offset), 405 | weight_decay=FLAGS.weight_decay, 406 | data_format=FLAGS.data_format, 407 | is_training=True) 408 | 409 | ##################################### 410 | # Select the preprocessing function # 411 | ##################################### 412 | preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name 413 | image_preprocessing_fn = preprocessing_factory.get_preprocessing( 414 | preprocessing_name, 415 | is_training=True) 416 | 417 | ############################################################## 418 | # Create a dataset provider that loads data from the dataset # 419 | ############################################################## 420 | with tf.device(deploy_config.inputs_device()): 421 | provider = slim.dataset_data_provider.DatasetDataProvider( 422 | dataset, 423 | num_readers=FLAGS.num_readers, 424 | common_queue_capacity=20 * FLAGS.batch_size, 425 | common_queue_min=10 * FLAGS.batch_size) 426 | [image, label] = provider.get(['image', 'label']) 427 | label -= FLAGS.labels_offset 428 | 429 | train_image_size = FLAGS.train_image_size or network_fn.default_image_size 430 | 431 | image = image_preprocessing_fn(image, train_image_size, train_image_size) 432 | 433 | images, labels = tf.train.batch( 434 | [image, label], 435 | batch_size=FLAGS.batch_size, 436 | num_threads=FLAGS.num_preprocessing_threads, 437 | capacity=5 * FLAGS.batch_size) 438 | labels = slim.one_hot_encoding( 439 | labels, dataset.num_classes - FLAGS.labels_offset) 440 | batch_queue = slim.prefetch_queue.prefetch_queue( 441 | [images, labels], capacity=2 * deploy_config.num_clones) 442 | 443 | #################### 444 | # Define the model # 445 | #################### 446 | def clone_fn(batch_queue): 447 | """Allows data parallelism by creating multiple clones of network_fn.""" 448 | with tf.device(deploy_config.inputs_device()): 449 | images, labels = batch_queue.dequeue() 450 | logits, end_points = network_fn(images) 451 | logits = tf.squeeze(logits) 452 | 453 | ############################# 454 | # Specify the loss function # 455 | ############################# 456 | if 'AuxLogits' in end_points: 457 | tf.losses.softmax_cross_entropy( 458 | logits=end_points['AuxLogits'], onehot_labels=labels, 459 | label_smoothing=FLAGS.label_smoothing, weights=0.4, scope='aux_loss') 460 | tf.losses.softmax_cross_entropy( 461 | logits=logits, onehot_labels=labels, 462 | label_smoothing=FLAGS.label_smoothing, weights=1.0) 463 | return end_points 464 | 465 | # Gather initial summaries. 466 | summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) 467 | 468 | clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue]) 469 | first_clone_scope = deploy_config.clone_scope(0) 470 | # Gather update_ops from the first clone. These contain, for example, 471 | # the updates for the batch_norm variables created by network_fn. 472 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) 473 | 474 | # Add summaries for end_points. 475 | end_points = clones[0].outputs 476 | for end_point in end_points: 477 | x = end_points[end_point] 478 | summaries.add(tf.summary.histogram('activations/' + end_point, x)) 479 | summaries.add(tf.summary.scalar('sparsity/' + end_point, 480 | tf.nn.zero_fraction(x))) 481 | 482 | # Add summaries for losses. 483 | for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope): 484 | summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) 485 | 486 | # Add summaries for variables. 487 | for variable in slim.get_model_variables(): 488 | summaries.add(tf.summary.histogram(variable.op.name, variable)) 489 | 490 | ################################# 491 | # Configure the moving averages # 492 | ################################# 493 | if FLAGS.moving_average_decay: 494 | moving_average_variables = slim.get_model_variables() 495 | variable_averages = tf.train.ExponentialMovingAverage( 496 | FLAGS.moving_average_decay, global_step) 497 | else: 498 | moving_average_variables, variable_averages = None, None 499 | 500 | ######################################### 501 | # Configure the optimization procedure. # 502 | ######################################### 503 | with tf.device(deploy_config.optimizer_device()): 504 | learning_rate = _configure_learning_rate(dataset.num_samples, global_step) 505 | optimizer = _configure_optimizer(learning_rate) 506 | summaries.add(tf.summary.scalar('learning_rate', learning_rate)) 507 | 508 | if FLAGS.sync_replicas: 509 | # If sync_replicas is enabled, the averaging will be done in the chief 510 | # queue runner. 511 | optimizer = tf.train.SyncReplicasOptimizer( 512 | opt=optimizer, 513 | replicas_to_aggregate=FLAGS.replicas_to_aggregate, 514 | variable_averages=variable_averages, 515 | variables_to_average=moving_average_variables, 516 | replica_id=tf.constant(FLAGS.task, tf.int32, shape=()), 517 | total_num_replicas=FLAGS.worker_replicas) 518 | elif FLAGS.moving_average_decay: 519 | # Update ops executed locally by trainer. 520 | update_ops.append(variable_averages.apply(moving_average_variables)) 521 | 522 | # Variables to train. 523 | variables_to_train = _get_variables_to_train() 524 | 525 | # and returns a train_tensor and summary_op 526 | total_loss, clones_gradients = model_deploy.optimize_clones( 527 | clones, 528 | optimizer, 529 | var_list=variables_to_train) 530 | # Add total_loss to summary. 531 | summaries.add(tf.summary.scalar('total_loss', total_loss)) 532 | 533 | # Create gradient updates. 534 | grad_updates = optimizer.apply_gradients(clones_gradients, 535 | global_step=global_step) 536 | update_ops.append(grad_updates) 537 | 538 | update_op = tf.group(*update_ops) 539 | with tf.control_dependencies([update_op]): 540 | train_tensor = tf.identity(total_loss, name='train_op') 541 | 542 | # Add the summaries from the first clone. These contain the summaries 543 | # created by model_fn and either optimize_clones() or _gather_clone_loss(). 544 | summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, 545 | first_clone_scope)) 546 | 547 | # Merge all summaries together. 548 | summary_op = tf.summary.merge(list(summaries), name='summary_op') 549 | 550 | 551 | ########################### 552 | # Kicks off the training. # 553 | ########################### 554 | slim.learning.train( 555 | train_tensor, 556 | logdir=FLAGS.train_dir, 557 | master=FLAGS.master, 558 | is_chief=(FLAGS.task == 0), 559 | init_fn=_get_init_fn(), 560 | summary_op=summary_op, 561 | number_of_steps=FLAGS.max_number_of_steps, 562 | log_every_n_steps=FLAGS.log_every_n_steps, 563 | save_summaries_secs=FLAGS.save_summaries_secs, 564 | save_interval_secs=FLAGS.save_interval_secs, 565 | sync_optimizer=optimizer if FLAGS.sync_replicas else None) 566 | 567 | 568 | if __name__ == '__main__': 569 | tf.app.run() --------------------------------------------------------------------------------