├── README.md
├── Source code
├── cnnproject.py
└── svm.py
├── cnn
└── CNNplaceholder
├── haarcascade_frontalface_default.xml
├── knn
└── KNNplaceholder
├── main.py
└── svm
└── SVMplaceholder
/README.md:
--------------------------------------------------------------------------------
1 | # Age Estimation project !
2 |
3 | This is our project for Pattern Recognition CS-342 course.
4 | The project contributors:
5 | - **Ahmed Mohamadeen** [@ahmeed2m](https://git.io/ahmed)
6 | - **Ali elrafei** [@alielrafeiFCIH](https://github.com/alielrafeiFCIH)
7 | - **Ali Khaled** [@ali-khaled-elsayed](https://github.com/ali-khaled-elsayed)
8 | - **Aly Moataz** [@Aly-Moataz-Shorosh](https://github.com/Aly-Moataz-Shorosh)
9 | - **Omar Farouk** [@Onsymers](https://github.com/Onsymers)
10 | - **Marwan Bedeir** [@marwanBedeir](https://github.com/marwanBedeir)
11 |
12 |
13 | ## Brief
14 | The problem is Age Estimation for the popular **[UTKFace](https://susanqq.github.io/UTKFace/)** data-set.
15 |
16 | The project has a gui interface for camera capturing and take the picture localize a face and give that face for out three implemented methods of estimating *KNN, SVM and CNN* trained from the previously mentioned data-set.
17 | For now the project only runs on linux due to the pickle files of the models only readable from a linux enviroment.
18 |
19 | ### Requirements
20 | Temporary : Linux machine with Python 3 or later
21 | python libraries:
22 | ```
23 | tensorflow
24 | opencv-contrib-python or opencv
25 | Pillow
26 | ```
27 | ### How to run
28 | Clone the project
29 | >git clone https://github.com/Ahmeed2m/Age-estimation-project.git
30 |
31 | Download the models pickles and tensorflow session from this link and put it in the matching folders
32 |
33 | Open terminal/cmd in repo path and run
34 |
35 | >python ./main.py
36 |
37 | ### Project preview
38 |
39 | 
40 |
41 | ### Accuracy details
42 |
43 |
44 | | |ACCURACY |
45 | |----------------|---------------|
46 | |KNN |38% |
47 | |SVM |50% |
48 | |CNN |60% |
49 |
50 |
--------------------------------------------------------------------------------
/Source code/cnnproject.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib.pyplot as plt
3 | from sklearn.model_selection import train_test_split
4 | import numpy as np
5 | import datetime
6 | import cv2
7 | import pickle as pk
8 | import tensorflow as tf
9 |
10 | # dir = "UTKFace/"
11 | # filenames = os.listdir(dir)
12 | # filenames = filenames[:23708]
13 | # lst = [dir + f for f in filenames]
14 | # # arr=np.array(lst)
15 | # data = []
16 | # age_label = []
17 | # classes = []
18 | # labels = []
19 | # targetreg = []
20 | # race_label = []
21 | # gender_label = []
22 | # print(filenames[0])
23 | # print(lst[0])
24 | #
25 | # t = zip(lst, filenames, list(range(len(filenames))))
26 | # # print(len(list(t)))
27 | #
28 | # a = datetime.datetime.now().replace(microsecond=0)
29 | # for f, source, i in t:
30 | # img = cv2.imread(f)
31 | # # SCALING
32 | # scale = 40
33 | # dim = (scale, scale)
34 | # img = cv2.resize(img, dim)
35 | #
36 | # # Grayscaling
37 | # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
38 | #
39 | # sp = source.split("_")
40 | # age = sp[0]
41 | # gender = sp[1]
42 | # race = sp[2]
43 | # flat = img.flatten()
44 | # if "jpg" in race:
45 | # print("race: " + race + "\nsource:" + source)
46 | # continue
47 | # row = np.array([[age, gender, race]])
48 | # age = int(age)
49 | # targetreg.append(age)
50 | # if age <= 14:
51 | # labels.append([1, 0, 0, 0, 0])
52 | # if (age > 14) and (age <= 25):
53 | # labels.append([0, 1, 0, 0, 0])
54 | # if (age > 25) and (age < 40):
55 | # labels.append([0, 0, 1, 0, 0])
56 | # if (age >= 40) and (age < 60):
57 | # labels.append([0, 0, 0, 1, 0])
58 | # if age >= 60:
59 | # labels.append([0, 0, 0, 0, 1])
60 | #
61 | # gender_label.append(gender)
62 | # race_label.append(race)
63 | # age_label.append(age)
64 | # data.append(flat)
65 | #
66 | # b = datetime.datetime.now().replace(microsecond=0)
67 | # print("Finished import in " + str(b - a))
68 | # data = np.asarray(data)
69 | # labels = np.asarray(labels)
70 | # print(data.shape)
71 |
72 | # pick_out = open('data.pickle', 'wb')
73 | # pk.dump(data, pick_out)
74 | # pick_out.close()
75 | #
76 | # pick_out = open('labels.pickle', 'wb')
77 | # pk.dump(labels, pick_out)
78 | # pick_out.close()
79 |
80 |
81 | pick_in = open('data.pickle', 'rb')
82 | data= pk.load(pick_in)
83 | pick_in.close()
84 |
85 | pick_in = open('labels.pickle', 'rb')
86 | labels = pk.load(pick_in)
87 | pick_in.close()
88 |
89 | n_classes = 5
90 | batch_size = 1000
91 |
92 | x = tf.placeholder('float', [None, 1600])
93 | y = tf.placeholder('float')
94 |
95 | keep_rate = 0.8
96 | keep_prob = tf.placeholder(tf.float32)
97 |
98 |
99 | def conv2d(x, W):
100 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
101 |
102 |
103 | def maxpool2d(x):
104 | # size of window movement of window
105 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
106 |
107 |
108 | X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.3, random_state=32)
109 |
110 |
111 | def convolution_neural_network(x):
112 | weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
113 | 'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
114 | 'W_fc': tf.Variable(tf.random_normal([10 * 10 * 64, 1024])),
115 | 'out': tf.Variable(tf.random_normal([1024, n_classes]))}
116 |
117 | # weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
118 | # 'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
119 | # 'W_conv3': tf.Variable(tf.random_normal([5, , 64, 128])),
120 | # 'W_fc': tf.Variable(tf.random_normal([13 * 13 * 128, 128])),
121 | # 'out': tf.Variable(tf.random_normal([128, n_classes]))}
122 |
123 | biases = {'b_conv1': tf.Variable(tf.random_normal([32])),
124 | 'b_conv2': tf.Variable(tf.random_normal([64])),
125 | # 'b_conv3': tf.Variable(tf.random_normal([128])),
126 | 'b_fc': tf.Variable(tf.random_normal([1024])),
127 | 'out': tf.Variable(tf.random_normal([n_classes]))}
128 |
129 | x = tf.reshape(x, shape=[-1, 40, 40, 1])
130 |
131 | conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])
132 | conv1 = maxpool2d(conv1)
133 |
134 | conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])
135 | conv2 = maxpool2d(conv2)
136 |
137 | # conv3 = conv2d(conv2,weights['W_conv3']) + biases['b_conv3']
138 | # conv3 = maxpool2d(conv3)
139 |
140 | # fc = tf.reshape(conv2, [-1, 7*7*64])
141 | fc = tf.reshape(conv2, [-1, 10 * 10 * 64])
142 |
143 | fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])
144 |
145 | output = tf.matmul(fc, weights['out']) + biases['out']
146 | return output
147 |
148 |
149 | saver = tf.train.Saver()
150 |
151 |
152 | def train_neural_network(x):
153 | prediction = convolution_neural_network(x)
154 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
155 | optimizer = tf.train.AdamOptimizer().minimize(cost)
156 |
157 | hm_epochs = 15
158 | with tf.Session() as sess:
159 | sess.run(tf.global_variables_initializer())
160 |
161 | for epoch in range(hm_epochs):
162 | tr_acc_list = []
163 | epoch_loss = 0
164 | i = 0
165 | while i < len(X_train):
166 | start = i
167 | end = i + batch_size
168 |
169 | x_batch = X_train[start:end]
170 | y_batch = y_train[start:end]
171 | # print("label: ", epoch_y[1])
172 | # print("shape image: ",epoch_x.shape ,'\nlabel : ', epoch_y.shape)
173 | _, c = sess.run([optimizer, cost], feed_dict={x: x_batch, y: y_batch})
174 |
175 | epoch_loss += c
176 | # tr_acc_list.append(train_acc)
177 |
178 | i += batch_size
179 |
180 | print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
181 |
182 | correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
183 |
184 | accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
185 | # print('train Accuracy:', accuracy.eval({x: X_train, y: y_train}))
186 | acc = accuracy.eval({x: X_test, y: y_test})
187 |
188 | print('Test Accuracy:', accuracy.eval({x: X_test, y: y_test}))
189 |
190 | save_path = saver.save(sess, "cnnmodel.ckpt")
191 | pk_out = open('acc.pickle', 'wb')
192 | pk.dump(acc, pk_out)
193 | pk_out.close()
194 |
195 |
196 | train_neural_network(x)
197 |
198 |
199 | def Cnn_predict(image):
200 | prediction = convolution_neural_network(x)
201 | img = cv2.resize(image, (40, 40))
202 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
203 | data = []
204 | flat = img.flatten()
205 | data.append(flat)
206 | data = np.asarray(data)
207 |
208 | with tf.Session() as sess:
209 | sess.run(tf.global_variables_initializer())
210 | saver.restore(sess, "cnnmodel.ckpt")
211 |
212 | result = sess.run(tf.argmax(prediction.eval(feed_dict={x: data}), 1))
213 | print(result[0])
214 |
215 |
216 |
217 |
218 |
--------------------------------------------------------------------------------
/Source code/svm.py:
--------------------------------------------------------------------------------
1 | clf = svm.SVC(gamma=0.0000001,kernel='rbf',C=1000,verbose=True,max_iter=100,decision_function_shape='ovo')
2 |
3 | clf.fit(X_train,y_train)
4 |
5 |
6 |
7 | pk_out = open("SVM_gamma=0.0000001,kernel='rbf',C=1000.pickle","rb")
8 | clf = pickle.load(pk_out)
9 | img=plt.imread('1_0_0_20161219140627985.jpg.chip.jpg')
10 | scale = 100
11 | dim = (scale, scale)
12 | img = cv2.resize(img, dim)
13 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14 | img = img.flatten()
15 | array = [img]
16 | print
17 | age=clf.predict(array)
18 | print(age)
19 | pk_out.close()
20 |
--------------------------------------------------------------------------------
/cnn/CNNplaceholder:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/knn/KNNplaceholder:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | #
4 | # GUI module generated by PAGE version 4.19
5 | # in conjunction with Tcl version 8.6
6 | # Apr 22, 2019 03:43:23 PM PDT platform: Windows NT
7 |
8 | import sys
9 | import cv2
10 | from PIL import Image, ImageTk
11 | import pickle
12 | import numpy as np
13 | import tensorflow as tf
14 | import matplotlib as plt
15 |
16 | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
17 | imageSource = ""
18 | img_counter = 0
19 | rio = ""
20 | knn = None
21 | svm = None
22 |
23 | try:
24 | import Tkinter as tk
25 | except ImportError:
26 | import tkinter as tk
27 |
28 | try:
29 | import ttk
30 | py3 = False
31 | except ImportError:
32 | import tkinter.ttk as ttk
33 | py3 = True
34 |
35 | n_classes = 5
36 | batch_size = 1000
37 |
38 | x = tf.placeholder('float', [None, 1600])
39 | y = tf.placeholder('float')
40 |
41 | keep_rate = 0.8
42 | keep_prob = tf.placeholder(tf.float32)
43 |
44 |
45 | def conv2d(x, W):
46 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
47 |
48 |
49 | def maxpool2d(x):
50 | # size of window movement of window
51 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
52 |
53 |
54 | def convolution_neural_network(x):
55 | weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
56 | 'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
57 | 'W_fc': tf.Variable(tf.random_normal([10 * 10 * 64, 1024])),
58 | 'out': tf.Variable(tf.random_normal([1024, n_classes]))}
59 |
60 | # weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
61 | # 'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
62 | # 'W_conv3': tf.Variable(tf.random_normal([5, , 64, 128])),
63 | # 'W_fc': tf.Variable(tf.random_normal([13 * 13 * 128, 128])),
64 | # 'out': tf.Variable(tf.random_normal([128, n_classes]))}
65 |
66 | biases = {'b_conv1': tf.Variable(tf.random_normal([32])),
67 | 'b_conv2': tf.Variable(tf.random_normal([64])),
68 | # 'b_conv3': tf.Variable(tf.random_normal([128])),
69 | 'b_fc': tf.Variable(tf.random_normal([1024])),
70 | 'out': tf.Variable(tf.random_normal([n_classes]))}
71 |
72 | x = tf.reshape(x, shape=[-1, 40, 40, 1])
73 |
74 | conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])
75 | conv1 = maxpool2d(conv1)
76 |
77 | conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])
78 | conv2 = maxpool2d(conv2)
79 |
80 | # conv3 = conv2d(conv2,weights['W_conv3']) + biases['b_conv3']
81 | # conv3 = maxpool2d(conv3)
82 |
83 | # fc = tf.reshape(conv2, [-1, 7*7*64])
84 | fc = tf.reshape(conv2, [-1, 10 * 10 * 64])
85 |
86 | fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])
87 |
88 | output = tf.matmul(fc, weights['out']) + biases['out']
89 | return output
90 |
91 |
92 | # saver = tf.train.Saver()
93 |
94 |
95 | def Cnn_predict(image):
96 | img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
97 | img = cv2.resize(img, (40, 40))
98 | # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
99 | data = []
100 | flat = img.flatten()
101 | data.append(flat)
102 | data = np.asarray(data)
103 |
104 | prediction = convolution_neural_network(x)
105 |
106 | with tf.Session() as sess:
107 | sess.run(tf.global_variables_initializer())
108 | tf.train.Saver().restore(sess, "cnn/cnnmodel.ckpt")
109 |
110 | result = sess.run(tf.argmax(prediction.eval(feed_dict={x: data}), 1))
111 | print(result[0])
112 | return result[0]
113 |
114 |
115 | def onclick(btnNum,Label1,group):
116 | global img_counter
117 | if btnNum == 1:
118 | temp = capture()
119 | if temp is not None:
120 | imageSource = temp
121 | img = cv2.imread(imageSource)
122 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
123 |
124 | faces = face_cascade.detectMultiScale(gray, 1.3, 5)
125 | print(faces)
126 | for (x, y, w, h) in faces:
127 | roi_gray = gray[y:y + h, x:x + w]
128 | roi_color = img[y - 30:y + 30 + h, x - 30:x + 30 + w]
129 | img_name = "roi_{}.jpg".format(img_counter)
130 | global rio
131 | rio = img_name
132 | cv2.imwrite(img_name, roi_color)
133 | cv2.rectangle(img, (x-10, y-60), (x + w, y+10 + h), (255, 0, 0), 2)
134 |
135 | img_name = "opencv_image_face_{}.jpg".format(img_counter)
136 | cv2.imwrite(img_name, img)
137 |
138 | imageToShow = Image.open(temp)
139 | img3 = cv2.imread(temp)
140 | dim = (320, 240)
141 | imageToShow = cv2.resize(img, dim)
142 | img_name = "opencv_image_face__rect_{}.jpg".format(img_counter)
143 | cv2.imwrite(img_name, imageToShow)
144 |
145 | imageToShow = Image.open(img_name)
146 | img2 = ImageTk.PhotoImage(imageToShow)
147 |
148 | Label1.configure(image=img2)
149 | Label1.image = img2
150 | img_counter += 1
151 | #print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
152 |
153 | # elif btnNum == 2:
154 |
155 | elif btnNum == 3:
156 | if group == 0:
157 | # global rio
158 | # path = rio
159 | print(rio)
160 | # img = cv2.cvtColor(cv2.UMat(rio), cv2.COLOR_BGR2GRAY,ranges=[])
161 | img = cv2.imread(rio,cv2.IMREAD_GRAYSCALE)
162 | # img1 = cv2.cvtColor(rio, cv2.COLOR_BGR2GRAY)
163 | scale = 50
164 | dim = (scale, scale)
165 | img = cv2.resize(img, dim)
166 | img = img.flatten()
167 | img = np.asarray(img)
168 | # plt.imshow(img, cmap='gray')
169 | # plt.show
170 | global knn
171 | y_pred = knn.predict(img.reshape(1, -1))
172 | print(y_pred)
173 | if y_pred ==0:
174 | Label1.configure(text="Knn estimate is from Age 1 to 14")
175 | if y_pred ==1:
176 | Label1.configure(text="Knn estimate is from Age 15 to 25")
177 | if y_pred ==2:
178 | Label1.configure(text="Knn estimate is from Age 26 to 40")
179 | if y_pred ==3:
180 | Label1.configure(text="Knn estimate is from Age 41 to 60")
181 | if y_pred ==4:
182 | Label1.configure(text="Knn estimate is from Age 61 to 100")
183 | elif group == 1:
184 | img = cv2.imread(rio, cv2.IMREAD_GRAYSCALE)
185 | # img1 = cv2.cvtColor(rio, cv2.COLOR_BGR2GRAY)
186 | scale = 100
187 | dim = (scale, scale)
188 | img = cv2.resize(img, dim)
189 | img = img.flatten()
190 | img = np.asarray(img)
191 | # plt.imshow(img, cmap='gray')
192 | # plt.show
193 | global svm
194 | y_pred = svm.predict(img.reshape(1, -1))
195 | print(y_pred)
196 | Label1.configure(text="SVM estimate is "+ str(y_pred[0]))
197 |
198 | # if y_pred == 0:
199 | # Label1.configure(text="Knn estimate is from Age 1 to 14")
200 | # if y_pred == 1:
201 | # Label1.configure(text="Knn estimate is from Age 15 to 25")
202 | # if y_pred == 2:
203 | # Label1.configure(text="Knn estimate is from Age 26 to 40")
204 | # if y_pred == 3:
205 | # Label1.configure(text="Knn estimate is from Age 41 to 60")
206 | # if y_pred == 4:
207 | # Label1.configure(text="Knn estimate is from Age 61 to 100")
208 | elif group == 2:
209 | # cnn predict
210 | img = cv2.imread(rio)
211 | y_pred=Cnn_predict(rio)
212 | if y_pred ==0:
213 | Label1.configure(text="Cnn estimate is from Age 1 to 14")
214 | if y_pred ==1:
215 | Label1.configure(text="Cnn estimate is from Age 15 to 25")
216 | if y_pred ==2:
217 | Label1.configure(text="Cnn estimate is from Age 26 to 40")
218 | if y_pred ==3:
219 | Label1.configure(text="Cnn estimate is from Age 41 to 60")
220 | if y_pred ==4:
221 | Label1.configure(text="Cnn estimate is from Age 61 to 100")
222 |
223 | '''
224 | conda remove opencv
225 | conda install -c menpo opencv
226 | pip install --upgrade pip
227 | pip install opencv-contrib-python
228 | '''
229 |
230 |
231 | def capture():
232 | cam = cv2.VideoCapture(0)
233 |
234 | cv2.namedWindow("age estimation camera")
235 |
236 | while True:
237 | ret, image = cam.read()
238 | cv2.imshow("age estimation camera", image)
239 | if not ret:
240 | break
241 | k = cv2.waitKey(1)
242 | global img_counter
243 | if k % 256 == 27:
244 | # ESC pressed
245 | print("Escape hit, closing...")
246 | cam.release()
247 | cv2.destroyAllWindows()
248 | return None
249 | elif k % 256 == 32:
250 | # SPACE pressed
251 | img_name = "opencv_image_{}.jpg".format(img_counter)
252 | cv2.imwrite(img_name, image)
253 | img_source = "{}".format(img_name)
254 | print(img_source)
255 | cam.release()
256 | cv2.destroyAllWindows()
257 | return img_source
258 |
259 | def vp_start_gui():
260 | global knn
261 | knn = getKnn().load()
262 | global svm
263 | svm = getsvm().load()
264 | '''Starting point when module is the main routine.'''
265 | global val, w, root
266 | root = tk.Tk()
267 | top = Toplevel1(root)
268 | root.mainloop()
269 |
270 | w = None
271 | def create_Toplevel1(root, *args, **kwargs):
272 | '''Starting point when module is imported by another program.'''
273 | global w, w_win, rt
274 | rt = root
275 | w = tk.Toplevel (root)
276 | top = Toplevel1 (w)
277 | return (w, top)
278 |
279 | def destroy_Toplevel1():
280 | global w
281 | w.destroy()
282 | w = None
283 |
284 | class Toplevel1:
285 | def __init__(self, top=None):
286 | '''This class configures and populates the toplevel window.
287 | top is the toplevel containing window.'''
288 | _bgcolor = '#d9d9d9' # X11 color: 'gray85'
289 | _fgcolor = '#000000' # X11 color: 'black'
290 | _compcolor = '#d9d9d9' # X11 color: 'gray85'
291 | _ana1color = '#d9d9d9' # X11 color: 'gray85'
292 | _ana2color = '#ececec' # Closest X11 color: 'gray92'
293 |
294 | top.geometry("600x450+678+223")
295 | top.title("New Toplevel")
296 | top.configure(background="#d9d9d9")
297 |
298 | group = tk.IntVar()
299 |
300 | self.Label1 = tk.Label(top)
301 | self.Label1.place(relx=0.233, rely=0.044, height=261, width=454)
302 | self.Label1.configure(background="#d9d9d9")
303 | self.Label1.configure(disabledforeground="#a3a3a3")
304 | self.Label1.configure(foreground="#000000")
305 | self.Label1.configure(text='''IMPORT YOUR IMAGE''')
306 | self.Label1.configure(width=454)
307 |
308 | self.Label2 = tk.Label(top)
309 | self.Label2.place(relx=0.417, rely=0.778, height=51, width=254)
310 | self.Label2.configure(background="#d9d9d9")
311 | self.Label2.configure(disabledforeground="#a3a3a3")
312 | self.Label2.configure(foreground="#000000")
313 | self.Label2.configure(text='''YOUR PREDICT''')
314 | self.Label2.configure(width=254)
315 |
316 |
317 | # self.browse = tk.Button(top , command = lambda:onclick(2,self.Label1))
318 | # self.browse.place(relx=0.033, rely=0.022, height=34, width=107)
319 | # self.browse.configure(activebackground="#ececec")
320 | # self.browse.configure(activeforeground="#000000")
321 | # self.browse.configure(background="#d9d9d9")
322 | # self.browse.configure(disabledforeground="#a3a3a3")
323 | # self.browse.configure(foreground="#000000")
324 | # self.browse.configure(highlightbackground="#d9d9d9")
325 | # self.browse.configure(highlightcolor="black")
326 | # self.browse.configure(pady="0")
327 | # self.browse.configure(text='''Browse''')
328 | # self.browse.configure(width=107)
329 |
330 | self.capture = tk.Button(top , command = lambda:onclick(1,self.Label1,0))
331 | self.capture.place(relx=0.033, rely=0.111, height=34, width=107)
332 | self.capture.configure(activebackground="#ececec")
333 | self.capture.configure(activeforeground="#000000")
334 | self.capture.configure(background="#d9d9d9")
335 | self.capture.configure(disabledforeground="#a3a3a3")
336 | self.capture.configure(foreground="#000000")
337 | self.capture.configure(highlightbackground="#d9d9d9")
338 | self.capture.configure(highlightcolor="black")
339 | self.capture.configure(pady="0")
340 | self.capture.configure(text='''Capture''')
341 | self.capture.configure(width=107)
342 |
343 | self.knn = tk.Radiobutton(top)
344 | self.knn.place(relx=0.05, rely=0.444, relheight=0.056, relwidth=0.08)
345 | self.knn.configure(activebackground="#ececec")
346 | self.knn.configure(activeforeground="#000000")
347 | self.knn.configure(background="#d9d9d9")
348 | self.knn.configure(disabledforeground="#a3a3a3")
349 | self.knn.configure(foreground="#000000")
350 | self.knn.configure(highlightbackground="#d9d9d9")
351 | self.knn.configure(highlightcolor="black")
352 | self.knn.configure(justify='left')
353 | self.knn.configure(text='''knn''')
354 | self.knn.configure(value="0")
355 | self.knn.configure(variable=group)
356 |
357 | self.svm = tk.Radiobutton(top)
358 | self.svm.place(relx=0.05, rely=0.511, relheight=0.056, relwidth=0.083)
359 | self.svm.configure(activebackground="#ececec")
360 | self.svm.configure(activeforeground="#000000")
361 | self.svm.configure(background="#d9d9d9")
362 | self.svm.configure(disabledforeground="#a3a3a3")
363 | self.svm.configure(foreground="#000000")
364 | self.svm.configure(highlightbackground="#d9d9d9")
365 | self.svm.configure(highlightcolor="black")
366 | self.svm.configure(justify='left')
367 | self.svm.configure(text='''svm''')
368 | self.svm.configure(value="1")
369 | self.svm.configure(variable=group)
370 |
371 | self.cnn = tk.Radiobutton(top)
372 | self.cnn.place(relx=0.05, rely=0.578, relheight=0.056, relwidth=0.08)
373 | self.cnn.configure(activebackground="#ececec")
374 | self.cnn.configure(activeforeground="#000000")
375 | self.cnn.configure(background="#d9d9d9")
376 | self.cnn.configure(disabledforeground="#a3a3a3")
377 | self.cnn.configure(foreground="#000000")
378 | self.cnn.configure(highlightbackground="#d9d9d9")
379 | self.cnn.configure(highlightcolor="black")
380 | self.cnn.configure(justify='left')
381 | self.cnn.configure(text='''cnn''')
382 | self.cnn.configure(value="2")
383 | self.cnn.configure(variable=group)
384 |
385 |
386 |
387 | self.predict = tk.Button(top,command = lambda:onclick(3,self.Label2,group.get()))
388 | self.predict.place(relx=0.033, rely=0.8, height=34, width=107)
389 | self.predict.configure(activebackground="#ececec")
390 | self.predict.configure(activeforeground="#000000")
391 | self.predict.configure(background="#d9d9d9")
392 | self.predict.configure(disabledforeground="#a3a3a3")
393 | self.predict.configure(foreground="#000000")
394 | self.predict.configure(highlightbackground="#d9d9d9")
395 | self.predict.configure(highlightcolor="black")
396 | self.predict.configure(pady="0")
397 | self.predict.configure(text='''Predict''')
398 | self.predict.configure(width=107)
399 |
400 |
401 | def getKnn():
402 | # global knn
403 | pk_out = open("Knn/Knn_model.pickle", "rb")
404 | knn = pickle.Unpickler(pk_out)
405 | return knn
406 |
407 |
408 | def getsvm():
409 | pk_out = open("svm/SVM_gamma=0.0000001,kernel='rbf',C=1000.pickle", "rb")
410 | svm = pickle.Unpickler(pk_out)
411 | return svm
412 |
413 |
414 | if __name__ == '__main__':
415 | img_counter = 0
416 | vp_start_gui()
417 |
418 |
419 |
420 |
421 |
422 |
--------------------------------------------------------------------------------
/svm/SVMplaceholder:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------