├── .gitignore
├── README.md
├── data_handler.py
├── main.py
├── misc_py
├── pre.py
└── test.py
├── mtTkinter.py
├── neural_network_handler.py
├── resources
├── Perceptron-icon.icns
├── perceptron-header.png
└── settings.json
└── user_interface_handler.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | *.pyc
3 | .DS_Store
4 |
5 | /.eggs/
6 | /original_datasets/
7 | /processed_datasets/
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | NOTE: This is an incomplete project. I may decide to spend a lot more time on it if people persist giving it positive attention.
2 |
3 | # Perceptron
4 | A flexible artificial neural network builder to analysis performance, and optimise the best model.
5 |
6 | Perceptron is a software that will help researchers, students, and programmers to
7 | design, compare, and test artificial neural networks. As it stands, there are few visual
8 | tools that do this for free, and with simplicity.
9 | This software is largely for educational purposes, allowing people to experiment
10 | and understand how the different parameters within an ANN can result in
11 | different performances and better results. I have not used libraries like TensorFlow specifally so people
12 | can see what goes on lower level, all within the code.
13 |
14 | 
15 |
16 |
17 |
18 | 
19 |
--------------------------------------------------------------------------------
/data_handler.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import re, numpy as np, random, math, time, decimal, os, json
3 | import FileDialog
4 |
5 |
6 | class data_processor():
7 |
8 | folders_for_data = {"new": "processed_datasets", "old": "original_datasets"}
9 |
10 | def __init__(self, ui):
11 | self.user_interface = ui
12 |
13 | found_alphas = {}
14 | prev_file = ""
15 | row_len = -1
16 | target_has_encoded_alphas = False
17 |
18 | def struct_dataset(self, for_viewer, return_str, prepro_vals):
19 | # Update live viewer and if ready, update dataset
20 | new_dataset_str = ""
21 | new_dataset = []
22 | if (len(prepro_vals["row_separator_char"]) > 0 and
23 | os.path.isfile(self.folders_for_data["old"] + "/" +
24 | prepro_vals["original_file"])):
25 | data_by_row = open(
26 | self.folders_for_data["old"] + "/" + prepro_vals["original_file"],
27 | 'r').read().split(prepro_vals["row_separator_char"])
28 | dataset_meta = {}
29 | dataset_meta["target_info"] = [
30 | prepro_vals["target_type"], prepro_vals["bin_range"],
31 | prepro_vals["target_val_pos"]
32 | ]
33 | dataset_meta["minimisations"] = prepro_vals["minimisations"]
34 | dataset_meta["alphas"] = self.found_alphas
35 | dataset_meta["fields_to_ignore"] = prepro_vals["fields_to_ignore"]
36 | if not for_viewer:
37 | name_for_new = prepro_vals["original_file"][
38 | 0:prepro_vals["original_file"].rfind(".")]
39 | new_txt_file = open(
40 | self.folders_for_data["new"] + "/" + name_for_new + "_new.txt", "a")
41 | if (prepro_vals["bin_range"] == None): prepro_vals["bin_range"] = ""
42 | dataset_meta_str = json.dumps(dataset_meta)
43 | new_txt_file.write(dataset_meta_str)
44 | new_txt_file.write("\n--\n")
45 | if prepro_vals["rows_for_testing"]:
46 | new_testing_file = open(
47 | self.folders_for_data["old"] + "/" + name_for_new +
48 | "_testing.txt", "a")
49 | self.user_interface.print_console("WRITING PROCESSED DATASET...")
50 | if (len(data_by_row) > 1):
51 | if (for_viewer):
52 | end = 8
53 | else:
54 | end = len(data_by_row)
55 | start = 0
56 |
57 | if (len(self.found_alphas) > 0):
58 | for ig in prepro_vals["fields_to_ignore"]:
59 | if (ig in self.found_alphas.keys()):
60 | del self.found_alphas[ig]
61 |
62 | if prepro_vals["ignore_first_row"]:
63 | start += 1
64 | for row_i in range(start, end):
65 | if (row_i not in prepro_vals["rows_for_testing"]):
66 | row = data_by_row[row_i].split(",")
67 | if row_i == start: self.row_len = len(row)
68 | row = self.strip_row_list(row)
69 | new_row, new_target_list, sing_bin_target = self.change_data_to_processed(
70 | dataset_meta, row, prepro_vals["target_val_pos"])
71 | new_target_list = new_target_list[1:]
72 | encoded_row_str = ""
73 | if not for_viewer:
74 | new_row.append(new_target_list)
75 | elif self.user_interface.is_viewing_trans:
76 | encoded_row = []
77 | temp_new_row = new_row
78 | for to_ig in prepro_vals["fields_to_ignore"]:
79 | temp_new_row.insert(to_ig, None)
80 | for el_i in range(0, len(temp_new_row)):
81 | if temp_new_row[el_i]:
82 |
83 | vec = self.alpha_class_to_binary_vector(
84 | temp_new_row[el_i], self.found_alphas[el_i])
85 | if (len(vec) > 0): encoded_row.append(vec)
86 |
87 | encoded_row_str = ','.join(str(e) for e in encoded_row)
88 |
89 | if (len(encoded_row_str) > 0):
90 | row_str = encoded_row_str
91 | else:
92 | row_str = ','.join(str(e) for e in new_row)
93 |
94 | self.target_has_encoded_alphas = new_target_list and not new_target_list.replace(
95 | "/", "").replace(".", "").isdigit()
96 |
97 | if (self.user_interface.is_viewing_trans):
98 | encoded_targets = []
99 | for targ_pos in prepro_vals["target_val_pos"]:
100 | vec = self.alpha_class_to_binary_vector(
101 | row[targ_pos], self.found_alphas[targ_pos])
102 | if (len(vec) > 0):
103 | encoded_targets.append(vec)
104 |
105 | new_target_list_for_dis = "[" + (','.join(
106 | str(e) for e in encoded_targets)) + "]"
107 | else:
108 | new_target_list_for_dis = "[" + new_target_list.replace("/",
109 | ",") + "]"
110 | # Make target structure and turn into string for viewing
111 | if (new_target_list_for_dis != "[]"):
112 | new_target_list_for_dis = "with target(s): " + new_target_list_for_dis
113 | else:
114 | new_target_list_for_dis = ""
115 | if (for_viewer and sing_bin_target and
116 | str(sing_bin_target).isdigit()):
117 | range_ = prepro_vals['bin_range']
118 | target_vec_example = self.populate_binary_vector(
119 | int(sing_bin_target), int(range_))
120 | target_vec_ex_str = ','.join(str(e) for e in target_vec_example)
121 | target_vec_ex_str = " (as binary vector: [" + target_vec_ex_str + "] )"
122 | new_target_list_for_dis += target_vec_ex_str
123 |
124 | new_dataset.append(new_row)
125 | vis_sep = "\n"
126 | if (for_viewer):
127 | if (len(new_target_list_for_dis) > 0):
128 | vis_sep = "\n *** " + new_target_list_for_dis + " *** \n\n"
129 | new_dataset_str += row_str + vis_sep + "\n"
130 | else:
131 | new_txt_file.write(row_str + vis_sep)
132 | if (len(data_by_row) > 20):
133 | if (row_i % (int(len(data_by_row) / 6)) == 0):
134 | percentage = int(row_i / len(data_by_row)) * 100
135 | msg = "Written " + str(row_i) + "/" + str(
136 | len(data_by_row)) + " rows"
137 | self.user_interface.print_console(msg)
138 |
139 | elif not for_viewer:
140 | new_testing_row = []
141 | testing_row = data_by_row[row_i].split(",")
142 | targs_show = []
143 | for targs in prepro_vals["target_val_pos"]:
144 | targs_show.append(testing_row[targs])
145 | del testing_row[targs]
146 | new_testing_file.write((','.join(
147 | str(e) for e in testing_row)) + "\n" +
148 | "...With correct targets: " + (','.join(
149 | str(e) for e in targs_show)) + "\n\n")
150 |
151 | if (for_viewer and (len(self.found_alphas) == 0 or
152 | prepro_vals["original_file"] != self.prev_file)):
153 | self.find_alpha_classes(data_by_row, prepro_vals["fields_to_ignore"])
154 | self.target_has_encoded_alphas = False
155 |
156 | self.prev_file = prepro_vals["original_file"]
157 | if not for_viewer:
158 | self.user_interface.print_console(
159 | "Finished processing " + name_for_new + ".txt, Check the " +
160 | self.folders_for_data["new"] + " folder")
161 | self.user_interface.render_dataset_opts(True)
162 | if (return_str):
163 | return new_dataset_str
164 | else:
165 | return new_dataset
166 |
167 | def find_alpha_classes(self, data_by_row, f_ig):
168 | self.found_alphas = {}
169 | has_found_alpha = False
170 | # Start finding all classification alphas per field
171 | for row_i in range(1, len(data_by_row)):
172 | row = data_by_row[row_i].split(",")
173 | row = self.strip_row_list(row)
174 | for el_i in range(0, len(row)):
175 | if str(el_i) not in f_ig:
176 | if (row_i == 1):
177 | self.found_alphas[el_i] = []
178 | elif row_i == 2 and not has_found_alpha:
179 | break
180 |
181 | element = self.real_strip(row[el_i])
182 | if (element not in self.found_alphas[el_i] and
183 | not str(element).replace(".", "").replace("-", "").isdigit()):
184 | self.found_alphas[el_i].append(element)
185 | has_found_alpha = True
186 | if not has_found_alpha:
187 | break
188 |
189 | def alpha_class_to_binary_vector(self, alpha_val, dataset_meta_alphas_list):
190 | class_range = len(dataset_meta_alphas_list)
191 | if class_range > 0:
192 | if alpha_val:
193 | if (str(alpha_val) not in dataset_meta_alphas_list):
194 | bin_vector = []
195 | else:
196 | target = dataset_meta_alphas_list.index(alpha_val)
197 | bin_vector = self.populate_binary_vector(target, class_range)
198 | else:
199 | bin_vector = []
200 | else:
201 | bin_vector = [float(alpha_val)]
202 |
203 | return bin_vector
204 |
205 | def validate_prepro(self):
206 |
207 | prepro_vals = {}
208 | valid_for_viewer = True
209 | error = ""
210 | prepro_vals["original_file"] = self.user_interface.prepro[
211 | "original_file"].get()
212 | prepro_vals["row_separator_char"] = self.user_interface.prepro[
213 | "row_separator_char"].get()
214 | prepro_vals["ignore_first_row"] = self.user_interface.prepro[
215 | "ignore_first_row"].get()
216 | prepro_vals["fields_to_min"] = self.user_interface.prepro[
217 | "fields_to_min"].get()
218 | prepro_vals["fields_to_ignore"] = self.user_interface.prepro[
219 | "fields_to_ignore"].get()
220 | prepro_vals["target_val_pos"] = self.user_interface.prepro[
221 | "target_val_pos"].get()
222 | prepro_vals["target_type"] = self.user_interface.prepro["target_type"].get()
223 | prepro_vals["rows_for_testing"] = self.user_interface.prepro[
224 | "rows_for_testing"].get()
225 | if not self.user_interface.prepro["bin_range"]:
226 | prepro_vals["bin_range"] = None
227 | else:
228 | prepro_vals["bin_range"] = self.user_interface.prepro["bin_range"].get()
229 | prepro_vals["error"] = ""
230 |
231 | prepro_vals["ignore_first_row"] = prepro_vals["ignore_first_row"] == "Yes"
232 |
233 | if prepro_vals["bin_range"]:
234 | if not prepro_vals["bin_range"].isdigit():
235 | error = "Invalid binary range, must be integer"
236 | else:
237 | prepro_vals["bin_range"] = int(prepro_vals["bin_range"])
238 |
239 | if not os.path.isfile(self.folders_for_data["old"] + "/" +
240 | prepro_vals["original_file"]):
241 | error = "File does not exist or is not in " + self.folders_for_data[
242 | "old"] + " folder"
243 |
244 | if (prepro_vals["target_type"] == "--select--"):
245 | error = "You must choose a target type"
246 |
247 | rows_for_testing_list = self.user_interface.map_to_int_if_valid(
248 | prepro_vals["rows_for_testing"])
249 | if (rows_for_testing_list != False):
250 | prepro_vals["rows_for_testing"] = rows_for_testing_list
251 | else:
252 | as_range = prepro_vals["rows_for_testing"].split("-")
253 | if (len(as_range) == 2):
254 | if (as_range[0].isdigit() and as_range[1].isdigit()):
255 | prepro_vals["rows_for_testing"] = range(
256 | int(as_range[0]), int(as_range[1]))
257 | else:
258 | error = "Invalid rows for testing"
259 | prepro_vals["rows_for_testing"] = None
260 |
261 | field_targ_try = prepro_vals["target_val_pos"].split(",")
262 | targ_kwords = {"first": 0, "last": self.row_len - 1}
263 | prepro_vals["target_val_pos"] = []
264 | for pos in field_targ_try:
265 | if (pos.isdigit() or pos in targ_kwords):
266 | if (pos in targ_kwords):
267 | pos = targ_kwords[pos]
268 | else:
269 | pos = int(pos)
270 | prepro_vals["target_val_pos"].append(pos)
271 | else:
272 | field_targ_try = False
273 | break
274 | if not field_targ_try:
275 | error = "Invalid target position(s)"
276 | else:
277 | if (prepro_vals["target_val_pos"] != False):
278 | if (len(prepro_vals["target_val_pos"]) > 1 and
279 | prepro_vals["target_type"] == "Binary"):
280 | error = "If you are using binary vectors, you can only have one target position"
281 |
282 | field_to_ig_try = self.user_interface.map_to_int_if_valid(
283 | prepro_vals["fields_to_ignore"])
284 | if not field_to_ig_try:
285 | error = "Invalid values to ignore"
286 | else:
287 | prepro_vals["fields_to_ignore"] = field_to_ig_try
288 |
289 | def validate_divider(val):
290 | if (not val.replace(".", "").isdigit() or val.replace(".", "") == "0"):
291 | return 1
292 | else:
293 | return float(val)
294 |
295 | prepro_vals["minimisations"] = {}
296 | prepro_vals["minimisations"]["all"] = None
297 | prepro_vals["minimisations"]["except"] = []
298 | field_min_try = self.user_interface.map_to_int_if_valid(
299 | prepro_vals["fields_to_min"])
300 | if not field_min_try:
301 | if (prepro_vals["fields_to_min"] == "all"):
302 | prepro_vals["minimisations"]["all"] = validate_divider(
303 | self.user_interface.min_fields[0].get())
304 | prepro_vals["minimisations"][
305 | "except"] = self.user_interface.map_to_int_if_valid(
306 | self.user_interface.min_fields[1].get())
307 | else:
308 | error = "Invalid alpha to num translation"
309 | else:
310 | prepro_vals["fields_to_min"] = field_min_try
311 | c = 0
312 | for min_field in prepro_vals["fields_to_min"]:
313 | min_vals = self.user_interface.min_fields
314 | divider = validate_divider(min_vals[c].get())
315 | prepro_vals["minimisations"][min_field] = divider
316 | c += 1
317 |
318 | prepro_vals["error"] = error
319 | return prepro_vals
320 |
321 | def image_dir_to_matrix_txt(self, dirname):
322 | new_txt_file = open("processed_datasets/" + dirname + "_new.txt", "a")
323 | image_file_names = os.listdir(dirname)
324 | for image_file_name in image_file_names:
325 | if (image_file_name[0:1] != "."):
326 | pre_file_type_loc = image_file_name.rfind(".")
327 | image_name_data = image_file_name[0:pre_file_type_loc]
328 | target_val = image_name_data.split(",")[1]
329 | image_matrix = cv2.imread(dirname + "/" + image_file_name)
330 | image_matrix = cv2.cvtColor(image_matrix, cv2.COLOR_BGR2GRAY)
331 | c = 0
332 | new_txt_file.write(target_val + "")
333 | for row_px in range(0, len(image_matrix)):
334 | for col_px in range(0, len(image_matrix[0])):
335 | new_txt_file.write(str(image_matrix[row_px][col_px]) + ",")
336 | c += 1
337 |
338 | def change_data_to_processed(self, dataset_meta, row, target_pos=None):
339 | new_row = []
340 | sing_bin_target = None
341 | new_target_list = ""
342 | if target_pos:
343 | is_valid_bin_target = (dataset_meta["target_info"][1] and
344 | len(target_pos) == 1)
345 | if len(row) > 1:
346 | for el_i in range(0, len(row)):
347 | new_el = row[el_i]
348 | if el_i not in (dataset_meta["fields_to_ignore"] or []):
349 | if (new_el == str(new_el)):
350 | new_el = self.real_strip(new_el)
351 | if (dataset_meta["minimisations"]["except"] and
352 | str(new_el).replace(".", "").isdigit()):
353 | if dataset_meta["minimisations"]["all"] and (
354 | el_i not in dataset_meta["minimisations"]["except"]):
355 | new_el = str(
356 | float(new_el) / dataset_meta["minimisations"]["all"])
357 | if not dataset_meta["minimisations"]["all"]:
358 | if (str(el_i) in dataset_meta["minimisations"]):
359 | el_i = str(el_i)
360 | if (el_i in dataset_meta["minimisations"]):
361 | new_el = str(
362 | float(new_el) / dataset_meta["minimisations"][el_i])
363 |
364 | if target_pos:
365 | if el_i in target_pos:
366 | if is_valid_bin_target:
367 | sing_bin_target = new_el
368 | new_target_list += "/" + str(new_el)
369 | else:
370 | new_row.append(new_el)
371 | else:
372 | new_row.append(new_el)
373 |
374 | return new_row, new_target_list, sing_bin_target
375 |
376 | def load_matrix_data(self, to_retrieve, file_name, user_interface):
377 | self.user_interface = user_interface
378 | self.to_retrieve = to_retrieve
379 | self.file_name = file_name
380 | self.user_interface.print_console("Loading " + str(self.to_retrieve) +
381 | " items from " + self.file_name +
382 | "... \n")
383 | self.dataset = open(file_name, 'r').read().split("\n")
384 | self.dataset_meta = json.loads(self.dataset[0])
385 | self.dataset_meta["alphas"] = self.sort_dataset_meta_alphas(
386 | self.dataset_meta["alphas"])
387 | self.has_alphas = self.meta_has_alphas(self.dataset_meta)
388 | self.matrices = []
389 | self.targets = []
390 | self.max_data_amount = int(len(self.dataset)) - 2
391 | if (self.to_retrieve == "all"):
392 | self.to_retrieve = self.max_data_amount
393 |
394 | def real_strip(self, string, extra_chars=None):
395 | discount_chars = ("'", '"')
396 | if extra_chars:
397 | discount_chars = discount_chars + extra_chars
398 | string = string.strip()
399 | for char in discount_chars:
400 | if (len(string) >= 2):
401 | if (string[0] == char and string[-1] == char):
402 | string = string[1:-1]
403 | break
404 | return string
405 |
406 | def strip_row_list(self, row):
407 | if not self.real_strip(row[-1]):
408 | del row[-1]
409 | elif not self.real_strip(row[0]):
410 | del row[0]
411 | return row
412 |
413 | def sort_dataset_meta_alphas(self, dataset_meta_alphas):
414 | keys = []
415 | for field_pos in dataset_meta_alphas:
416 | keys.append(int(field_pos))
417 | keys.sort()
418 | new_meta = {}
419 | for key in keys:
420 | new_meta[key] = dataset_meta_alphas[str(key)]
421 | return new_meta
422 |
423 | def meta_has_alphas(self, meta):
424 | has_alphas = False
425 | for i in meta["alphas"]:
426 | if (len(meta["alphas"][i]) > 0):
427 | has_alphas = True
428 | break
429 |
430 | return has_alphas
431 |
432 | def populate_matrices(self):
433 |
434 | px_count = 0
435 | done_msg = "Finished loading data \n "
436 | prev_pos_of_matrix = 0
437 | for i in range(2, self.to_retrieve - 1):
438 | if self.user_interface.cancel_training:
439 | done_msg = "**CANCELLED** \n "
440 | break
441 | flat_single_item = self.dataset[i].split(",")
442 | if len(flat_single_item) > 0:
443 | target_string = flat_single_item[-1]
444 | target_vals = target_string.split("/")
445 | del flat_single_item[-1]
446 | if self.has_alphas:
447 | item_as_array = np.array(flat_single_item)
448 | else:
449 | item_as_array = np.asarray(flat_single_item, dtype=np.float32)
450 | self.matrices.append(item_as_array)
451 | self.targets.append(target_vals)
452 | if self.to_retrieve > 10:
453 | if i % (int(self.to_retrieve / 5)) == 0:
454 | self.user_interface.print_console("Loaded " + str(i) + "/" +
455 | str(self.to_retrieve))
456 | self.user_interface.print_console(done_msg)
457 |
458 | def prep_matrix_for_input(self, matrix):
459 | matrix_float = matrix.astype(np.float32)
460 | matrix_for_input = matrix_float / float(255)
461 | return matrix_for_input
462 |
463 | def get_avaliable_datasets(self, from_):
464 | # Search orginial_datasets folder for text files
465 | avaliable_txts = []
466 | for f in os.listdir(self.folders_for_data[from_]):
467 | if (f[-4:] == ".txt"):
468 | avaliable_txts.append(f)
469 | return avaliable_txts
470 |
471 | def populate_binary_vector(self, target, output_count):
472 | # Take index value, and construct binary vector where element of index is 1
473 | vector = []
474 | target = int(target)
475 | if (target < output_count):
476 | for i in range(0, int(output_count)):
477 | vector.append(0)
478 | vector[target] = 1
479 | return vector
480 | else:
481 | return 0
482 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | from mtTkinter import *
2 | from user_interface_handler import user_interface
3 | import sys
4 |
5 | tk_main = Tk()
6 | user_interface = user_interface(tk_main)
7 | tk_main.mainloop()
8 |
9 |
--------------------------------------------------------------------------------
/misc_py/pre.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import cv2,numpy as np,os
3 | from pprint import pprint
4 | np.set_printoptions(threshold=np.nan)
5 | class data_preprocessor_handler:
6 | def image_dir_to_matrix_txt(self, dirname):
7 | new_txt_file = open(dirname+".txt", "a")
8 | image_file_names = os.listdir(dirname)
9 | for image_file_name in image_file_names:
10 | if(image_file_name[0:1] != "."):
11 | pre_file_type_loc = image_file_name.rfind(".")
12 | image_name_data = image_file_name[0:pre_file_type_loc]
13 | target_val = image_name_data.split(",")[1]
14 | image_matrix = cv2.imread(dirname+"/"+image_file_name)
15 | image_matrix = cv2.cvtColor(image_matrix, cv2.COLOR_BGR2GRAY)
16 | c = 0
17 | new_txt_file.write(target_val+"")
18 | for row_px in range(0,len(image_matrix)):
19 | for col_px in range(0,len(image_matrix[0])):
20 | new_txt_file.write(str(image_matrix[row_px][col_px]) + ",")
21 | c+=1
22 |
23 |
24 | print(c)
25 |
26 | def show_matrices_from_file(self,file):
27 | self.matrix_width = 1
28 | self.matrix_height = 30
29 | self.to_retrieve = 30
30 | self.input_total = self.matrix_width * self.matrix_height
31 | self.data_set = open(file, 'r').read().split(",")
32 | matrices = []
33 | targets = []
34 | px_count = 0
35 | prev_pos_of_matrix = 0
36 | target_pos_in_row = -1
37 | for i in range(1,self.to_retrieve):
38 | pos_of_matrix = (i*(self.input_total))+i
39 | flat_single_item = self.data_set[prev_pos_of_matrix:pos_of_matrix]
40 | # print(flat_single_item)
41 | if(len(flat_single_item)>0):
42 | target_val = flat_single_item[target_pos_in_row]
43 | del flat_single_item[target_pos_in_row]
44 | item_as_array = np.asarray(flat_single_item)
45 | array_as_matrix = np.reshape(item_as_array,(self.matrix_width, self.matrix_height),order="A")
46 | matrices.append(array_as_matrix)
47 | targets.append(target_val)
48 |
49 | #cv2.imshow(str(i)+"...."+str(target_val),array_as_matrix)
50 | prev_pos_of_matrix = pos_of_matrix
51 | #print(matrices)
52 | print(targets)
53 |
54 | def normalise_text_file(self,text_file):
55 |
56 | target_val_pos = 1
57 | elements_to_ignore = [target_val_pos,0]
58 | new_txt_file = open(text_file+"_new.txt", "a")
59 | '''p_schar_intervals = []
60 | text_file = open(file, 'r').read()
61 | for schars in poss_split_chars:
62 | p_schar_repeats.append(0)
63 |
64 | for char in text_file:
65 | for schar_c in range(0,len(poss_split_chars)):
66 | if(poss_split_chars[schar_c] != char):
67 | p_schar_intervals[schar_c] += 1
68 | else:
69 | if(p_schar_intervals[schar_c] in range(real_row_count-1,real_row_count+1):
70 | break'''
71 |
72 | data_by_row = open(text_file, 'r').read().split("\n")
73 | for row in data_by_row:
74 | row = row.split(",")
75 | new_row = []
76 | r_count = 0
77 | for element in row:
78 | if(r_count not in elements_to_ignore):
79 | if(element.strip().isdigit()):
80 | element = float(element)/255
81 | new_row.append(str(element))
82 | r_count += 1
83 | new_row.append(row[target_val_pos])
84 | row_str = ','.join(str(e) for e in new_row)
85 | row_str += ","
86 | new_txt_file.write(row_str)
87 |
88 | def find(self,img):
89 | file = open(file, 'r').read()
90 | clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(1,1))
91 | img = clahe.apply(img)
92 | cv2.imshow('c',img)
93 |
94 |
95 |
96 | def real_strip(string):
97 | discount_chars = ["'", '"']
98 | string = string.strip()
99 | for char in discount_chars:
100 | if(string[0] == char and string[-1] == char):
101 | string = string[1:-1]
102 | break
103 | return string
104 |
105 | def main():
106 | print(real_strip('"testsdgheh"'))
107 | #data_handler = data_preprocessor_handler()
108 | # data_handler.image_dir_to_matrix_txt("test-_imgs")
109 | # data_handler.normalise_text_file("digits.txt")
110 | #data_handler.format("train.txt")
111 | # data_handler.show_eq("2,2.png")
112 | # cv2.waitKey(0)
113 | #cv2.destroyWindows()
114 | main()
--------------------------------------------------------------------------------
/misc_py/test.py:
--------------------------------------------------------------------------------
1 | import cv2,numpy as np,os
2 | from pprint import pprint
3 | np.set_printoptions(threshold=np.nan)
4 | class data_preprocessor_handler:
5 | def image_dir_to_matrix_txt(self, dirname):
6 | new_txt_file = open(dirname+".txt", "a")
7 | image_file_names = os.listdir(dirname)
8 | for image_file_name in image_file_names:
9 | if(image_file_name[0:1] != "."):
10 | pre_file_type_loc = image_file_name.rfind(".")
11 | image_name_data = image_file_name[0:pre_file_type_loc]
12 | target_val = image_name_data.split(",")[1]
13 | image_matrix = cv2.imread(dirname+"/"+image_file_name)
14 | image_matrix = cv2.cvtColor(image_matrix, cv2.COLOR_BGR2GRAY)
15 | c = 0
16 | new_txt_file.write(target_val+"")
17 | for row_px in range(0,len(image_matrix)):
18 | for col_px in range(0,len(image_matrix[0])):
19 | new_txt_file.write(str(image_matrix[row_px][col_px]) + ",")
20 | c+=1
21 |
22 |
23 | print(c)
24 |
25 | def show_matrices_from_file(self,file):
26 | matrix_width = 22
27 | to_retreive = 5
28 | data_set = open(file, 'r').read().split(",")
29 | matrices = []
30 | targets = []
31 | px_count = 0
32 | print(int(data_set[1569]))
33 | for i in range(to_retreive):
34 | matrix = np.zeros((matrix_width,matrix_width), dtype=np.uint8)
35 | for px_col in range(matrix_width):
36 | for px_row in range(matrix_width):
37 | if(px_count%((matrix_width*matrix_width)+1)==0):
38 | targets.append(int(data_set[px_count]))
39 | else:
40 | matrix[px_col][px_row] = float(data_set[px_count])
41 | px_count += 1
42 | matrices.append(matrix)
43 | print(targets)
44 |
45 |
46 | for i in range(0,len(matrices)):
47 | cv2.imshow(str(i)+","+str(targets[i]),matrices[i])
48 |
49 |
50 | def format(self,file):
51 | new_txt_file = open("voice_new.txt", "a")
52 | file = open(file, 'r').read()
53 | for i in range(0,len(file)):
54 | w = ""
55 | if(file[i] == "\r" or file[i] == ","):
56 | w = ","
57 | elif(file[i].isdigit() == True or file[i] == "."):
58 | w = file[i]
59 | new_txt_file.write(w)
60 |
61 | def see_chars(self,file):
62 | file = open(file, 'r').read().replace("\r\n", ",",1).replace(" ", "",1).split(",")
63 | for i in range(0,len(file)):
64 | print(file[i])
65 |
66 | def main():
67 | data_handler = data_preprocessor_handler()
68 | # data_handler.image_dir_to_matrix_txt("test_imgs")
69 | # data_handler.show_matrices_from_file("test_imgs.txt")
70 | data_handler.format("voice.txt")
71 | # cv2.waitKey(0)
72 | #cv2.destroyWindows()
73 | main()
--------------------------------------------------------------------------------
/mtTkinter.py:
--------------------------------------------------------------------------------
1 | """
2 | Thread-safe version of Tkinter
3 |
4 | Copyright (c) 2009, Allen B. Taylor
5 | Copyright (c) 2017, baldk
6 | Copyright (c) 2018, RedFantom
7 |
8 | This module is free software: you can redistribute it and/or modify
9 | it under the terms of the GNU Lesser Public License as published by
10 | the Free Software Foundation, either version 3 of the License, or
11 | (at your option) any later version.
12 |
13 | This program is distributed in the hope that it will be useful,
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 | GNU Lesser Public License for more details.
17 |
18 | You should have received a copy of the GNU Lesser Public License
19 | along with this program. If not, see .
20 |
21 | Usage:
22 |
23 | import mtTkinter as Tkinter
24 | # Use "Tkinter." as usual.
25 |
26 | or
27 |
28 | from mtTkinter import *
29 | # Use Tkinter module definitions as usual.
30 |
31 | This module modifies the original Tkinter module in memory, making all
32 | functionality thread-safe. It does this by wrapping the Tk class' tk
33 | instance with an object that diverts calls through an event queue when
34 | the call is issued from a thread other than the thread in which the Tk
35 | instance was created. The events are processed in the creation thread
36 | via an 'after' event.
37 |
38 | Note that, because it modifies the original Tkinter module (in memory),
39 | other modules that use Tkinter (e.g., Pmw) reap the benefits automagically
40 | as long as mtTkinter is imported at some point before extra threads are
41 | created.
42 |
43 | Authors:
44 | Allen B. Taylor, a.b.taylor@gmail.com
45 | RedFantom, redfantom@outlook.com
46 | baldk, baldk@users.noreply.github.com
47 |
48 | Docstrings and line-comments wrapped to 80 characters, code wrapped to
49 | 100 characters.
50 | """
51 | import sys
52 | import threading
53 | if sys.version_info[0] == 2:
54 | # Python 2
55 | from Tkinter import *
56 | import Queue as queue
57 | else:
58 | # Python 3
59 | from tkinter import *
60 | import queue
61 |
62 |
63 | class _Tk(object):
64 | """Wrapper for underlying attribute tk of class Tk"""
65 |
66 | def __init__(self, tk, mt_debug=0, mt_check_period=10):
67 | """
68 | :param tk: Tkinter.Tk.tk Tk interpreter object
69 | :param mt_debug: Determines amount of debug output.
70 | 0 = No debug output (default)
71 | 1 = Minimal debug output
72 | ...
73 | 9 = Full debug output
74 | :param mt_check_period: Amount of time in milliseconds (default
75 | 10) between checks for out-of-thread events when things are
76 | otherwise idle. Decreasing this value can improve GUI
77 | responsiveness, but at the expense of consuming more CPU
78 | cycles.
79 |
80 | # TODO: Replace custom logging functionality with standard
81 | # TODO: logging.Logger for easier access and standardization
82 | """
83 | self._tk = tk
84 |
85 | # Create the incoming event queue
86 | self._event_queue = queue.Queue(1)
87 |
88 | # Identify the thread from which this object is being created
89 | # so we can tell later whether an event is coming from another
90 | # thread.
91 | self._creation_thread = threading.current_thread()
92 |
93 | # Create attributes for kwargs
94 | self._debug = mt_debug
95 | self._check_period = mt_check_period
96 | # Destroying flag to be set by the .destroy() hook
97 | self._destroying = False
98 |
99 | def __getattr__(self, name):
100 | """
101 | Diverts attribute accesses to a wrapper around the underlying tk
102 | object.
103 | """
104 | return _TkAttr(self, getattr(self._tk, name))
105 |
106 |
107 | class _TkAttr(object):
108 | """Thread-safe callable attribute wrapper"""
109 |
110 | def __init__(self, tk, attr):
111 | self._tk = tk
112 | self._attr = attr
113 |
114 | def __call__(self, *args, **kwargs):
115 | """
116 | Thread-safe method invocation. Diverts out-of-thread calls
117 | through the event queue. Forwards all other method calls to the
118 | underlying tk object directly.
119 | """
120 |
121 | # Check if we're in the creation thread
122 | if threading.current_thread() == self._tk._creation_thread:
123 | # We're in the creation thread; just call the event directly
124 | if self._tk._debug >= 8 or \
125 | self._tk._debug >= 3 and self._attr.__name__ == 'call' and \
126 | len(args) >= 1 and args[0] == 'after':
127 | print('Calling event directly:', self._attr.__name__, args, kwargs)
128 | return self._attr(*args, **kwargs)
129 | else:
130 | if not self._tk._destroying:
131 | # We're in a different thread than the creation thread;
132 | # enqueue the event, and then wait for the response.
133 | response_queue = queue.Queue(1)
134 | if self._tk._debug >= 1:
135 | print('Marshalling event:', self._attr.__name__, args, kwargs)
136 | self._tk._event_queue.put((self._attr, args, kwargs, response_queue), True, 1)
137 | is_exception, response = response_queue.get(True, None)
138 |
139 | # Handle the response, whether it's a normal return value or
140 | # an exception.
141 | if is_exception:
142 | ex_type, ex_value, ex_tb = response
143 | raise ex_type(ex_value, ex_tb)
144 | return response
145 |
146 |
147 | def _Tk__init__(self, *args, **kwargs):
148 | """
149 | Hook for Tkinter.Tk.__init__ method
150 | :param self: Tk instance
151 | :param args, kwargs: Arguments for Tk initializer
152 | """
153 | # We support some new keyword arguments that the original __init__ method
154 | # doesn't expect, so separate those out before doing anything else.
155 | new_kwnames = ('mt_check_period', 'mt_debug')
156 | new_kwargs = {
157 | kw_name: kwargs.pop(kw_name) for kw_name in new_kwnames
158 | if kwargs.get(kw_name, None) is not None
159 | }
160 |
161 | # Call the original __init__ method, creating the internal tk member.
162 | self.__original__init__mtTkinter(*args, **kwargs)
163 |
164 | # Replace the internal tk member with a wrapper that handles calls from
165 | # other threads.
166 | self.tk = _Tk(self.tk, **new_kwargs)
167 |
168 | # Set up the first event to check for out-of-thread events.
169 | self.after_idle(_check_events, self)
170 |
171 |
172 | # Define a hook for class Tk's destroy method.
173 | def _Tk_destroy(self):
174 | self.tk._destroying = True
175 | self.__original__destroy()
176 |
177 |
178 | def _check_events(tk):
179 | """Checks events in the queue on a given Tk instance"""
180 |
181 | used = False
182 | try:
183 | # Process all enqueued events, then exit.
184 | while True:
185 | try:
186 | # Get an event request from the queue.
187 | method, args, kwargs, response_queue = tk.tk._event_queue.get_nowait()
188 | except queue.Empty:
189 | # No more events to process.
190 | break
191 | else:
192 | # Call the event with the given arguments, and then return
193 | # the result back to the caller via the response queue.
194 | used = True
195 | if tk.tk._debug >= 2:
196 | print('Calling event from main thread:', method.__name__, args, kwargs)
197 | try:
198 | response_queue.put((False, method(*args, **kwargs)))
199 | except SystemExit:
200 | raise # Raises original SystemExit
201 | except Exception:
202 | # Calling the event caused an exception; return the
203 | # exception back to the caller so that it can be raised
204 | # in the caller's thread.
205 | from sys import exc_info # Python 2 requirement
206 | ex_type, ex_value, ex_tb = exc_info()
207 | response_queue.put((True, (ex_type, ex_value, ex_tb)))
208 | finally:
209 | # Schedule to check again. If we just processed an event, check
210 | # immediately; if we didn't, check later.
211 | if used:
212 | tk.after_idle(_check_events, tk)
213 | else:
214 | tk.after(tk.tk._check_period, _check_events, tk)
215 |
216 |
217 | """Perform in-memory modification of Tkinter module"""
218 | # Replace Tk's original __init__ with the hook.
219 | Tk.__original__init__mtTkinter = Tk.__init__
220 | Tk.__init__ = _Tk__init__
221 |
222 | # Replace Tk's original destroy with the hook.
223 | Tk.__original__destroy = Tk.destroy
224 | Tk.destroy = _Tk_destroy
225 |
--------------------------------------------------------------------------------
/neural_network_handler.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import cv2, re, numpy as np, random, math, time, thread, decimal, tkMessageBox, tkSimpleDialog, timeit
3 |
4 |
5 | class neural_network:
6 |
7 | # Construct object to develop specific network structure
8 | def initilize_nn(self, hidden_layers, input_count, output_count, matrix_data,
9 | matrix_targets, biases_for_non_input_layers,
10 | learning_constant, testing_mode, weight_range, epochs,
11 | data_to_test, dataset_meta, data_total, has_alphas,
12 | user_interface):
13 |
14 | self.user_interface = user_interface
15 | if not self.user_interface.cancel_training:
16 | # Set all values from request if not cancelled
17 | self.user_interface.print_console(
18 | "\n\n\n--------------------------- \n Constructing neural network \n\n"
19 | )
20 | self.all_weights = []
21 | self.nn_neurons = []
22 | self.biases_weights = []
23 | self.epochs = epochs
24 | divider_to_test = float(data_to_test) / 100.0
25 | self.test_data_amount = int(round(divider_to_test * data_total))
26 | self.dataset_meta = dataset_meta
27 | self.has_alphas = has_alphas
28 | self.matrix_data = matrix_data
29 | self.hidden_layers = hidden_layers
30 | self.matrix_targets = matrix_targets
31 | self.learning_constant = learning_constant
32 | self.output_count = output_count
33 | self.input_count = input_count
34 | self.testing_mode = testing_mode
35 | self.biases_for_non_input_layers = biases_for_non_input_layers
36 | self.weight_range = weight_range
37 | self.success_records = []
38 | self.is_small_data = len(self.matrix_targets) <= 1000
39 |
40 | self.populate_nn_neurons()
41 | self.populate_all_weights()
42 |
43 | # Design neuron structure based on requested amounts
44 | def populate_nn_neurons(self):
45 | nn_inputs = np.zeros(self.input_count)
46 | nn_outputs = np.zeros(self.output_count) # Start will zero values
47 | self.nn_neurons.append(nn_inputs)
48 | for i in self.hidden_layers:
49 | hidden_layer = np.zeros(i)
50 | self.nn_neurons.append(hidden_layer)
51 | self.nn_neurons.append(nn_outputs)
52 |
53 | def populate_all_weights(self):
54 | for neuron_layer in range(1, len(
55 | self.nn_neurons)): # For all neuron layers, process weight values
56 | layer_length = len(self.nn_neurons[neuron_layer])
57 | weight_layer = []
58 |
59 | for single_neuron in range(0, layer_length):
60 | prev_layer_count = len(self.nn_neurons[neuron_layer - 1])
61 | neuron_weights = self.initilize_weights(
62 | prev_layer_count) # Produce weight values for parent neuron
63 | weights_change_record_neuron = np.zeros(prev_layer_count)
64 |
65 | weight_layer.append(neuron_weights)
66 |
67 | self.all_weights.append(weight_layer)
68 |
69 | # Do the same for bias weights
70 | for layer_count in range(0, len(self.biases_for_non_input_layers)):
71 | single_bias_weights = []
72 | single_bias_weights_change = []
73 | if (self.biases_for_non_input_layers[layer_count] != 0):
74 | bias_input_count = len(self.nn_neurons[layer_count + 1])
75 | single_bias_weights = self.initilize_weights(bias_input_count)
76 | single_bias_weights_change = np.zeros(bias_input_count)
77 | self.biases_weights.append(single_bias_weights)
78 |
79 | def initilize_weights(
80 | self, size): # Get weight values as random values within bounds
81 | if (len(self.weight_range) == 1):
82 | upper_bound = self.weight_range[0]
83 | lower_bound = upper_bound
84 | else:
85 | upper_bound = self.weight_range[1]
86 | lower_bound = self.weight_range[0]
87 |
88 | return np.random.uniform(low=lower_bound, high=upper_bound, size=(size))
89 |
90 | def feed_forward(self, matrix):
91 |
92 | self.populate_input_layer(matrix) # Send single data row to network
93 |
94 | for after_input_layer in range(1, len(self.nn_neurons)):
95 | hidden_neuron_sums = np.dot(
96 | np.asarray(self.all_weights[after_input_layer - 1]),
97 | self.nn_neurons[after_input_layer - 1])
98 | if len(self.biases_weights[after_input_layer - 1]) > 0:
99 | bias_vals = (self.biases_for_non_input_layers[after_input_layer - 1] *
100 | self.biases_weights[after_input_layer - 1])
101 | hidden_neuron_sums += bias_vals
102 | self.nn_neurons[after_input_layer] = self.activate_threshold(
103 | hidden_neuron_sums, "sigmoid")
104 |
105 | def populate_input_layer(
106 | self, data): # Put data row on to input layer ready for feed forward
107 | if (self.has_alphas):
108 | encoded_input = []
109 | item_i = 0
110 | for item_pos in self.dataset_meta["alphas"]:
111 | if (int(item_pos) not in self.dataset_meta["target_info"][2]
112 | ): #If the value is not a target value, add to input
113 | # Process each bit of data, and construct vector if values are classified
114 | bin_vec = self.user_interface.data_processor.alpha_class_to_binary_vector(
115 | data[item_i], self.dataset_meta["alphas"][item_pos])
116 | encoded_input += bin_vec
117 | item_i += 1
118 |
119 | else:
120 | encoded_input = data
121 | self.nn_neurons[0] = encoded_input
122 |
123 | testing_output_mode = False
124 | test_counter = 0
125 | correct_count = 0
126 | error_by_1000 = 0
127 | error_by_1000_counter = 1
128 | output_error_total = 0
129 |
130 | def construct_target_for_bp(self, target_val):
131 | # Construct binary vector if numeric classification
132 | if (self.dataset_meta["target_info"][0] == "Binary" and
133 | str(target_val[0]).isdigit()):
134 | target_vector = self.user_interface.data_processor.populate_binary_vector(
135 | target_val[0], self.output_count)
136 | else:
137 | # Construct binary vector if alpha classification
138 | target_vector = []
139 | t_i = 0
140 | for t_val in target_val:
141 | t_pos = self.dataset_meta["target_info"][2][t_i]
142 | bin_vec = self.user_interface.data_processor.alpha_class_to_binary_vector(
143 | t_val, self.dataset_meta["alphas"][t_pos])
144 | target_vector += bin_vec
145 | t_i += 1
146 | return target_vector
147 |
148 | def back_propagate(self, target_val, repeat_count):
149 |
150 | # Ready target values to be compared to output in conforming structure
151 | target_vector = self.construct_target_for_bp(target_val)
152 |
153 | # Determine how success must be judged
154 | if (len(self.nn_neurons[-1]) > 1):
155 | outputs_as_list = self.nn_neurons[-1].tolist()
156 | # Judge by one-hot encoding output value being index of highest target value
157 | success_condition = (outputs_as_list.index(
158 | max(outputs_as_list)) == target_vector.index(max(target_vector)))
159 | else:
160 | # Judge by accuracy of real value
161 | success_condition = (round(self.nn_neurons[-1][0]) == target_vector)
162 |
163 | # Measure/track success for graphs
164 | if (self.test_counter >= len(self.matrix_data) - self.test_data_amount):
165 | if success_condition:
166 | self.correct_count += 1
167 | if not success_condition:
168 | self.error_by_1000 += 1
169 |
170 | if self.error_by_1000_counter % 1000 == 0:
171 | # Feed error data to graph
172 | self.user_interface.animate_graph_figures(0, self.error_by_1000 / 10)
173 | self.error_by_1000 = 0
174 | self.error_by_1000_counter = 0
175 |
176 | # The backpropagation. Start at output layer, and work backwards...
177 | for weight_layer_count in range(len(self.all_weights) - 1, -1, -1):
178 |
179 | # Get neuron values of given layer, and add dimension for conforming with activated_to_sum_step
180 | weight_neuron_vals = np.expand_dims(
181 | self.nn_neurons[weight_layer_count + 1], axis=1)
182 | target_vector = np.expand_dims(target_vector, axis=1)
183 |
184 | activated_to_sum_step = weight_neuron_vals * (1 - weight_neuron_vals)
185 |
186 | # If output layer (first step of BP), compare to target value
187 | if (weight_layer_count == len(self.all_weights) - 1):
188 | back_prop_cost_to_sum = (
189 | weight_neuron_vals - target_vector) * activated_to_sum_step
190 | else: # Otherwise, compare to previous propagated layer values
191 | trans_prev_weights = np.asarray(
192 | self.all_weights[weight_layer_count + 1]).transpose()
193 | back_prop_cost_to_sum = np.dot(
194 | trans_prev_weights, back_prop_cost_to_sum) * activated_to_sum_step
195 |
196 | # If biases being used, BP them too.
197 | if len(self.biases_weights[weight_layer_count]) > 0:
198 | current_bias_weight_vals = self.biases_weights[weight_layer_count]
199 | final_bias_change = self.learning_constant * back_prop_cost_to_sum.flatten(
200 | )
201 | self.biases_weights[
202 | weight_layer_count] = current_bias_weight_vals - final_bias_change
203 |
204 | # Get neuron values on layer ahead and BP to the weights
205 | input_neuron_vals = np.expand_dims(
206 | self.nn_neurons[weight_layer_count], axis=1)
207 | full_back_prop_sum_to_input = np.dot(back_prop_cost_to_sum,
208 | input_neuron_vals.transpose())
209 |
210 | # Update weight values using learning rate
211 | current_weight_vals = self.all_weights[weight_layer_count]
212 | new_weight_vals = current_weight_vals - (
213 | self.learning_constant * full_back_prop_sum_to_input)
214 | self.all_weights[weight_layer_count] = new_weight_vals
215 |
216 | self.test_counter += 1
217 | self.error_by_1000_counter += 1
218 |
219 | def train(self):
220 | if not self.user_interface.cancel_training:
221 | success_list = []
222 | hidden_layer_str = ""
223 | for layerc in self.hidden_layers: # Construct a list of hidden layer values for console history
224 | hidden_layer_str += str(layerc) + ","
225 | hidden_layer_str = hidden_layer_str[0:-1]
226 | cancel_training = False
227 | # Output main neural network hyperparameters for console history r
228 | self.user_interface.print_console(" **TRAINING** \n")
229 | self.user_interface.print_console("With learning rate: " +
230 | str(self.learning_constant))
231 | self.user_interface.print_console("With hidden layers: " +
232 | str(hidden_layer_str))
233 | self.user_interface.print_console("With test amount by epoch size: " +
234 | str(self.test_data_amount) + "/" +
235 | str(len(self.matrix_targets)))
236 | self.user_interface.print_console("With epoch count: " + str(self.epochs))
237 |
238 | if self.testing_mode:
239 | self.repeat_count = 5000
240 |
241 | epoch_times = []
242 | # Iterate over dataset for each epoch
243 | for epoch in range(1, self.epochs + 1):
244 | pre_epoch_time = time.time() # Get inital time for epoch time tracking
245 | matrix_count = 0
246 | for matrix in self.matrix_data:
247 | if self.user_interface.cancel_training:
248 | # Cancel training if requested
249 | break
250 | target_vals = self.matrix_targets[matrix_count]
251 | self.feed_forward(
252 | matrix) # Send data to network and initiate the feed forward
253 | self.back_propagate(target_vals, epoch) # After outputs produced, BP.
254 | matrix_count += 1
255 | if self.user_interface.cancel_training:
256 | break
257 |
258 | success_p = (float(self.correct_count) / float(
259 | self.test_data_amount)) * 100 # Measure success for one epoch
260 |
261 | #Send success data to UI for graph
262 | self.user_interface.animate_graph_figures(1, success_p)
263 | e_note_str = " (ep. " + str(epoch) + ")"
264 | success_list.append(success_p)
265 |
266 | #Output epoch time and latest success values on UI
267 | if not self.is_small_data:
268 | self.user_interface.update_canvas_info_label(
269 | "Latest Success",
270 | str(round(success_p, 2)) + "%" + e_note_str)
271 |
272 | self.test_counter = 0
273 | self.correct_count = 0
274 | post_epoch_time = time.time() - pre_epoch_time
275 | if not self.is_small_data:
276 | self.user_interface.update_canvas_info_label(
277 | "Epoch Duration",
278 | str(round(post_epoch_time, 2)) + "s " + e_note_str)
279 | epoch_times.append(post_epoch_time)
280 |
281 | #Complete training, cancel it, output results.
282 | if len(success_list) > 0:
283 | av_success = sum(success_list) / len(success_list)
284 | highest_success = max(success_list)
285 | av_epoch_time = round(sum(epoch_times) / len(epoch_times), 5)
286 | else:
287 | av_success = "N/A"
288 | highest_success = "N/A"
289 | av_epoch_time = "N/A"
290 | training_done_msg = "**FINISHED**"
291 | if self.user_interface.cancel_training:
292 | training_done_msg = "**CANCELLED**"
293 | else:
294 | self.user_interface.cancel_learning()
295 | self.user_interface.print_console(training_done_msg)
296 | self.user_interface.print_console("AVERAGE SUCCESS: " + str(av_success) +
297 | "%")
298 | self.user_interface.print_console("HIGHEST SUCCESS: " +
299 | str(highest_success) + "%")
300 | self.user_interface.print_console("TOTAL TIME: " + str(sum(epoch_times)) +
301 | "s")
302 | self.user_interface.print_console("AVERAGE EPOCH TIME: " +
303 | str(av_epoch_time) + "s")
304 |
305 | def activate_threshold(self, value, type):
306 | if (type == "step"):
307 | if (value >= 0.5):
308 | return 1
309 | else:
310 | return 0
311 | elif (type == "sigmoid"):
312 | return 1 / (1 + np.exp(-value))
313 |
--------------------------------------------------------------------------------
/resources/Perceptron-icon.icns:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/casparwylie/Perceptron/7b9bf461deda9888334450717367e2506d627871/resources/Perceptron-icon.icns
--------------------------------------------------------------------------------
/resources/perceptron-header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/casparwylie/Perceptron/7b9bf461deda9888334450717367e2506d627871/resources/perceptron-header.png
--------------------------------------------------------------------------------
/resources/settings.json:
--------------------------------------------------------------------------------
1 | {"stnadates": {"weight_range": "-1,1", "learning_rate": "0.5", "output_count": "5", "matrix_dims": "20", "data_to_retrieve": "all", "epochs": "100", "hidden_layer": "10,20", "test_data_partition": "10", "dataset_name": "--select--", "bias_vals": "1,1,0"}, "or_example": {"weight_range": "-1,1", "test_input_val": "", "learning_rate": "0.5", "output_count": "1", "matrix_dims": "2", "data_to_retrieve": "all", "epochs": "2000", "hidden_layer": "4", "test_data_partition": "1", "dataset_name": "or_data.txt", "bias_vals": "1,1"}, "mnist_digit": {"weight_range": "-1,1", "test_input_val": "", "learning_rate": "0.5", "output_count": "10", "matrix_dims": "28,28", "data_to_retrieve": "all", "epochs": "10", "hidden_layer": "40", "test_data_partition": "10000", "dataset_name": "train_new.txt", "bias_vals": "1,1"}}
--------------------------------------------------------------------------------
/user_interface_handler.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import cv2, re, numpy as np, random, math, time, thread, decimal, tkMessageBox, tkSimpleDialog, matplotlib, os, json
3 | from pprint import pprint
4 | import FileDialog
5 | matplotlib.use('TkAgg')
6 | from mtTkinter import *
7 | from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
8 | import matplotlib.pyplot as plt
9 | import matplotlib.animation as animation
10 | from matplotlib import rcParams
11 | from PIL import Image, ImageTk
12 | from neural_network_handler import neural_network
13 | from data_handler import data_processor
14 |
15 |
16 | class user_interface:
17 |
18 | frame_height = 800
19 | frame_width = 1200
20 |
21 | def __init__(self, tk_main):
22 | # Set up main UI constants
23 | self.tk_main = tk_main
24 | self.main_bg = "#81899f"
25 | self.ui_frame = Frame(self.tk_main)
26 | self.ui_frame.configure(background=self.main_bg)
27 | self.ui_frame.pack()
28 | self.tk_main.title("Perceptron")
29 | self.tk_main.configure(background=self.main_bg)
30 | self.tk_main.minsize(width=self.frame_width, height=self.frame_height)
31 | self.tk_main.maxsize(width=self.frame_width, height=self.frame_height)
32 | self.font_face = "Arial"
33 | self.main_font_size = 13
34 | self.tk_main.protocol('WM_DELETE_WINDOW', self.quit_all)
35 | self.canvas_height = 500
36 | self.canvas_width = 950
37 | self.cancel_training = False
38 | self.new_line_count = 0
39 | self.canvas_labels = []
40 | self.settings_file_name = "resources/settings.json"
41 | self.can_clear_graph = False
42 | self.opt_bgcolor = "#424e6f"
43 | self.data_processor = data_processor(self)
44 | self.render_ui_frames()
45 | self.render_ui_widgets()
46 |
47 | def quit_all(self):
48 | # Quit program
49 | self.tk_main.destroy()
50 | sys.exit()
51 |
52 | def render_ui_frames(self):
53 |
54 | # Render all UI frames, set up dimensions and positions
55 | self.learn_options_frame = Frame(self.ui_frame, background=self.main_bg)
56 | self.learn_options_frame.pack(fill=BOTH, side=LEFT)
57 |
58 | self.c_scrollbar = Scrollbar(self.tk_main)
59 | self.c_scrollbar.pack(side=RIGHT, fill=Y)
60 |
61 | self.lower_frame = Frame(self.ui_frame, background=self.main_bg)
62 | self.lower_frame.pack(side=BOTTOM, fill=BOTH)
63 | self.console_list_box = Text(
64 | self.lower_frame,
65 | height=16,
66 | width=34,
67 | borderwidth=0,
68 | highlightthickness=0,
69 | bg="#212737",
70 | fg="white",
71 | font=("courier", 9))
72 | self.console_list_box.pack(ipady=20, ipadx=10, side=LEFT, fill=Y)
73 | self.console_list_box.config(yscrollcommand=self.c_scrollbar.set)
74 | self.console_list_box.configure(state="disabled")
75 | self.console_list_box.configure(wrap=WORD)
76 | self.c_scrollbar.config(command=self.console_list_box.yview)
77 | self.tk_nn_visual_canvas = Canvas(
78 | self.ui_frame,
79 | width=self.canvas_width,
80 | height=self.canvas_height,
81 | background="#424e6f",
82 | highlightthickness=0)
83 | self.tk_nn_visual_canvas.pack(side=RIGHT)
84 |
85 | self.g_figures = range(2)
86 | self.g_axis = range(2)
87 | self.g_lines = [[], []]
88 | self.g_canvas = range(2)
89 |
90 | rcParams.update({'figure.autolayout': True})
91 | # Render graphs and list of all future line colours for different network representations
92 | self.line_colors = [
93 | "orange", "blue", "green", "red", "cyan", "pink", "gray", "yellow",
94 | "lime", "brown", "black", "purple", "gold"
95 | ]
96 | self.render_graph("% Of Error (from 1000 feedforwards)",
97 | "1000 forward feeds", "%", 0, "r")
98 | self.render_graph("% Of Success (from test data each Epoch)", "Epoch", "%",
99 | 1, "b")
100 | self.prepare_new_line_graph() # Ready the first lines on graph
101 |
102 | def render_graph(self, title, xlabel, ylabel, line_num, col):
103 | # Render a graph, set up plotting with design and axis
104 | self.g_figures[line_num] = plt.figure(facecolor=self.main_bg)
105 | self.g_axis[line_num] = self.g_figures[line_num].add_subplot(
106 | 111, axisbg="#b3b8c5")
107 | self.g_axis[line_num].set_ylabel(ylabel)
108 | self.g_axis[line_num].set_xlabel(xlabel)
109 | self.g_figures[line_num].text(
110 | 0.5, 0.97, title, horizontalalignment='center', fontsize=9)
111 | self.g_axis[line_num].get_yaxis().set_visible(False)
112 | self.g_axis[line_num].get_xaxis().set_visible(False)
113 | self.g_canvas[line_num] = FigureCanvasTkAgg(
114 | self.g_figures[line_num], master=self.lower_frame)
115 | self.g_canvas[line_num].get_tk_widget().config(width=310, height=280)
116 | self.g_canvas[line_num].get_tk_widget().pack(side=LEFT, fill=X)
117 |
118 | def render_canvas_info_labels(self):
119 | # Add labels from labels list just under the text inputs, for general information
120 | self.canvas_info_labels = {}
121 | self.canvas_info_label_vals = {}
122 | self.canvas_label_names = [
123 | "Latest Success", "Epoch Duration", "Dataset Size"
124 | ]
125 | label_y = 30
126 | for label_name in self.canvas_label_names:
127 | self.canvas_info_label_vals[label_name] = StringVar()
128 | self.canvas_info_label_vals[label_name].set(label_name + ": N/A")
129 | self.canvas_info_labels[label_name] = Label(
130 | self.mid_labels_frame,
131 | textvariable=self.canvas_info_label_vals[label_name],
132 | font=(self.font_face, self.main_font_size),
133 | bg=self.main_bg)
134 | self.canvas_info_labels[label_name].pack(side=BOTTOM)
135 | label_y += 20
136 |
137 | def update_canvas_info_label(self, label_name, val):
138 | self.canvas_info_label_vals[label_name].set(label_name + ": " + str(val))
139 |
140 | prev_line_1_data = 0.0
141 | axis_g_showing = [False, False]
142 | all_g1_annotations = []
143 |
144 | def animate_graph_figures(self, line, data):
145 | # Update / animate graphs when new data recieved from neural network training
146 | if not self.axis_g_showing[line]:
147 | self.g_axis[line].get_yaxis().set_visible(True)
148 | self.g_axis[line].get_xaxis().set_visible(True)
149 | self.axis_g_showing[line] = True
150 |
151 | ydata = self.g_lines[line][-1].get_ydata()
152 | ydata = np.append(ydata, data)
153 | self.g_lines[line][-1].set_ydata(ydata)
154 | self.g_lines[line][-1].set_xdata(range(len(ydata)))
155 | self.g_axis[line].relim()
156 | self.g_axis[line].autoscale_view()
157 |
158 | self.g_figures[line].canvas.draw()
159 |
160 | def clear_graphs(self): # Reset graphs, reset colours, etc
161 | if self.can_clear_graph:
162 | for ann in range(len(self.all_g1_annotations)):
163 | self.all_g1_annotations[ann].remove()
164 | self.all_g1_annotations = []
165 | for i in range(2):
166 | for line in range(len(self.g_lines[i])):
167 | self.g_lines[i][line].remove()
168 | self.g_lines[i] = []
169 | self.g_figures[i].canvas.draw()
170 | self.new_line_count = 0
171 | self.can_clear_graph = False
172 | self.prepare_new_line_graph()
173 |
174 | def prepare_new_line_graph(
175 | self): # Add new line to graph, one for each graph,
176 | # where new_line_count is index of colour list
177 | if (self.new_line_count >= len(self.line_colors)):
178 | self.new_line_count = 0
179 | for line in range(2):
180 | new_line, = self.g_axis[line].plot([], [],
181 | self.line_colors[self.new_line_count])
182 | self.g_lines[line].append(new_line)
183 | self.new_line_count += 1
184 |
185 | def show_alert(self, header, msg):
186 | tkMessageBox.showinfo(header, msg)
187 |
188 | # Set default values for hyperparameters to demonstrate visual aid
189 | input_text_length = 8
190 | default_hidden_layers_str = "6,9"
191 | default_bias_str = "1,1,1"
192 | default_input_dims = "8"
193 | default_data_set_str = ".txt"
194 | default_output_count = "2"
195 |
196 | # Event handler when hyparameter changed. On key change, update visual NN aid
197 | def render_nn_vis_trigger(self, event=None):
198 |
199 | if not event: # If first render, use default values
200 | hidden_str = self.default_hidden_layers_str
201 | bias_str = self.default_bias_str
202 | input_dims = self.default_input_dims
203 | output_count = int(self.default_output_count)
204 | else: # Otherwise, update based on text inputs
205 | hidden_str = self.input_fields["hidden_layer"].get()
206 | bias_str = self.input_fields["bias_vals"].get()
207 | input_dims = self.input_fields["matrix_dims"].get()
208 | output_count_str = self.input_fields["output_count"].get()
209 | if (output_count_str.isdigit()):
210 | output_count = int(output_count_str)
211 | else:
212 | output_count = -1
213 | # If values valid to reflect on visual aid, call renderer
214 | if (self.check_str_list_valid(hidden_str + bias_str) and
215 | hidden_str != "" and bias_str != "" and input_dims != ""):
216 | if (hidden_str[-1] == ","):
217 | hidden_str = hidden_str[0:-1]
218 | if (bias_str[-1] == ","):
219 | bias_str = bias_str[0:-1]
220 | if (input_dims[-1] == ","):
221 | input_dims = input_dims[0:-1]
222 | if self.check_str_list_valid(input_dims) and output_count > 0:
223 | input_dims = input_dims.split(",")
224 | inputs_total = int(input_dims[0])
225 | if (len(input_dims) == 2):
226 | inputs_total = inputs_total * int(input_dims[1])
227 | layers = [inputs_total]
228 | hidden_layers = hidden_str.split(",")
229 | layers.extend(hidden_layers)
230 | biases = bias_str.split(",")
231 | layers.append(output_count)
232 | layers = map(int, layers)
233 | biases = map(int, biases)
234 | if (len(layers) > 0 and len(biases) > 0):
235 | # Update visual aid
236 | self.render_neural_net_visualization(layers, biases)
237 |
238 | def render_dataset_opts(self, exists=False):
239 | # Find avaliable datasets from processed datasets folder, and use as options
240 | avaliable_datasets = ["--select--"]
241 | for file in self.data_processor.get_avaliable_datasets("new"):
242 | avaliable_datasets.append(file)
243 | if not exists:
244 | self.input_fields["dataset_name"] = self.render_input_field(
245 | 0,
246 | "Dataset File",
247 | "Chose avaliable text file",
248 | 5,
249 | self.learn_options_frame,
250 | drop=avaliable_datasets,
251 | command=self.update_expected_nn_io_fields)
252 | else:
253 | for field in self.all_drop_frames["Dataset File: "].winfo_children():
254 | field.destroy()
255 | self.input_fields["dataset_name"] = StringVar(self.tk_main)
256 | self.input_fields["dataset_name"]
257 | # Render the drop down menu
258 | opt = OptionMenu(
259 | self.all_drop_frames["Dataset File: "],
260 | self.input_fields["dataset_name"],
261 | command=self.update_expected_nn_io_fields,
262 | *avaliable_datasets)
263 | self.style_drop_manual(opt)
264 | opt.config(width=15)
265 | opt.pack(padx=3)
266 | self.input_fields["dataset_name"].set('--select--')
267 |
268 | def render_ui_widgets(self):
269 |
270 | self.render_nn_vis_trigger()
271 |
272 | # Define all frames and widgets
273 | icon = ImageTk.PhotoImage(
274 | Image.open("resources/perceptron-header.png").resize((230, 100),
275 | Image.ANTIALIAS))
276 | self.icon_view = Label(
277 | self.learn_options_frame,
278 | image=icon,
279 | highlightthickness=0,
280 | bg=self.main_bg)
281 | self.icon_view.image = icon
282 | self.icon_view.pack()
283 |
284 | self.choose_settings_frame = Frame(self.learn_options_frame)
285 | self.choose_settings_frame.pack(pady=(20, 0))
286 | self.render_settings_opts()
287 |
288 | self.input_fields = {}
289 | self.input_labels = {}
290 | self.input_descs = {}
291 | self.widget_frames = {}
292 | self.input_descs_vis = {}
293 | self.all_drop_frames = {}
294 | self.all_drops = {}
295 |
296 | self.open_prepro_window = self.render_option(
297 | "DATA PREPROCESSOR",
298 | self.preprocess_data_render,
299 | self.learn_options_frame,
300 | width=18)
301 |
302 | # Define all input fields
303 | self.render_dataset_opts()
304 | self.input_fields["data_to_retrieve"] = self.render_input_field(
305 | "all", "Data To Use",
306 | "Enter 'all' or number of items to use from dataset",
307 | self.input_text_length, self.learn_options_frame)
308 | self.input_fields["matrix_dims"] = self.render_input_field(
309 | self.default_input_dims,
310 | "Input Count",
311 | "Enter single value or enter height, width of matrix with comma",
312 | self.input_text_length,
313 | self.learn_options_frame,
314 | command=self.render_nn_vis_trigger)
315 | self.input_fields["output_count"] = self.render_input_field(
316 | self.default_output_count,
317 | "Output Count",
318 | "Enter output quantity",
319 | self.input_text_length,
320 | self.learn_options_frame,
321 | command=self.render_nn_vis_trigger)
322 | self.input_fields["hidden_layer"] = self.render_input_field(
323 | self.default_hidden_layers_str,
324 | "Hidden Layers",
325 | "Enter comma seperated list of hidden layer sizes",
326 | self.input_text_length,
327 | self.learn_options_frame,
328 | command=self.render_nn_vis_trigger)
329 | self.input_fields["bias_vals"] = self.render_input_field(
330 | self.default_bias_str,
331 | "Bias Values",
332 | "List must match hidden layer count plus output, but enter 0 for no bias",
333 | self.input_text_length,
334 | self.learn_options_frame,
335 | command=self.render_nn_vis_trigger)
336 | self.input_fields["learning_rate"] = self.render_input_field(
337 | "0.5", "Learning Rate", "Enter decimal or integer",
338 | self.input_text_length, self.learn_options_frame)
339 | self.input_fields["weight_range"] = self.render_input_field(
340 | "-1,1", "Weight Ranges",
341 | "Enter one value (or two for a range) for initial weight values",
342 | self.input_text_length, self.learn_options_frame)
343 | self.input_fields["epochs"] = self.render_input_field(
344 | "100", "Epochs", "Total number of iterations through all data loaded",
345 | self.input_text_length, self.learn_options_frame)
346 | self.input_fields["test_data_partition"] = self.render_input_field(
347 | "10", "Data for Testing (%)",
348 | "Amount of data to partition from dataset for result testing (as a percentage)",
349 | self.input_text_length, self.learn_options_frame)
350 |
351 | # Set frame positions
352 | self.mid_labels_frame = Frame(self.learn_options_frame, bg=self.main_bg)
353 | self.mid_labels_frame.pack(expand=True, fill=BOTH)
354 | self.render_canvas_info_labels()
355 | self.lower_sect = Frame(self.learn_options_frame, background=self.main_bg)
356 | self.lower_sect.pack(expand=True, fill=BOTH)
357 | self.opt_cols = Frame(self.lower_sect, bg="red")
358 | self.opt_cols.pack(side=TOP, expand=True)
359 | self.left_opt_col = Frame(self.opt_cols, background=self.main_bg)
360 | self.left_opt_col.pack(side=LEFT)
361 | self.right_opt_col = Frame(self.opt_cols, background=self.main_bg)
362 | self.right_opt_col.pack(side=RIGHT)
363 |
364 | # Defines all buttons
365 | self.start_learning_opt = self.render_option(
366 | "Start Learning", self.start_learning_ui_request, self.left_opt_col)
367 | self.cancel_learning_opt = self.render_option(
368 | "Stop Learning", self.cancel_learning, self.left_opt_col)
369 | self.cancel_learning_opt.config(state="disabled")
370 | self.clear_graphs_opt = self.render_option(
371 | "Clear Graphs", self.clear_graphs, self.right_opt_col)
372 | self.save_settings_opt = self.render_option(
373 | "Save Settings", self.save_settings, self.right_opt_col)
374 | self.save_nn_opt = self.render_option("Export Trained NN", self.save_nn,
375 | self.right_opt_col)
376 | self.save_nn_opt.config(state="disabled")
377 | self.test_input_opt = self.render_option("Test With Input", self.test_input,
378 | self.left_opt_col)
379 |
380 | self.print_console(
381 | "Welcome to Perceptron. To get started, preprocess a dataset and then design a neural network for it to use. For more information, see the README file. Click this console to scroll it."
382 | )
383 |
384 | def render_input_field(self,
385 | default_value,
386 | label_text,
387 | desc_text,
388 | width,
389 | parent_frame,
390 | command=None,
391 | drop=None):
392 | # Render input field with frame and label
393 | label_text = label_text + ": "
394 | self.widget_frames[label_text] = Frame(
395 | parent_frame, background=self.main_bg)
396 | self.widget_frames[label_text].pack(fill=X, expand=False)
397 |
398 | desc_frame = Frame(
399 | self.widget_frames[label_text],
400 | width=50,
401 | height=0,
402 | background=self.main_bg)
403 | desc_frame.pack(fill=None, side=BOTTOM, expand=False)
404 |
405 | if drop:
406 | self.all_drop_frames[label_text] = Frame(
407 | self.widget_frames[label_text], bg=self.main_bg)
408 | self.all_drop_frames[label_text].pack(side=RIGHT)
409 | input_widget_val = StringVar(self.tk_main)
410 | self.all_drops[label_text] = OptionMenu(
411 | self.all_drop_frames[label_text],
412 | input_widget_val,
413 | command=command,
414 | *drop)
415 | input_widget = self.all_drops[label_text]
416 | self.style_drop_manual(input_widget)
417 | input_widget.config(width=15)
418 | input_widget_val.set(drop[default_value])
419 | else:
420 | input_widget = Entry(
421 | self.widget_frames[label_text],
422 | width=38 - len(label_text),
423 | bg="#545f7d",
424 | font=(self.font_face, 11))
425 | input_widget.insert(0, str(default_value))
426 | if command:
427 | input_widget.bind("", command)
428 |
429 | input_widget.pack(side=RIGHT, padx=3, ipady=3)
430 |
431 | if drop:
432 | input_widget = input_widget_val
433 |
434 | self.input_labels[label_text] = Label(
435 | self.widget_frames[label_text],
436 | text=label_text,
437 | background=self.main_bg,
438 | font=(self.font_face, self.main_font_size))
439 | self.input_labels[label_text].pack(side=LEFT)
440 | self.widget_frames[label_text].bind("", self.toggle_desc_label)
441 | self.widget_frames[label_text].bind("", self.toggle_desc_label)
442 | self.input_descs[label_text] = Label(
443 | desc_frame,
444 | text="*" + desc_text,
445 | background=self.main_bg,
446 | font=(self.font_face, 1),
447 | fg=self.main_bg,
448 | wraplength=210)
449 | self.input_descs[label_text].pack(side=BOTTOM)
450 | self.input_descs_vis[label_text] = 0
451 | return input_widget
452 |
453 | def render_option(self,
454 | text,
455 | command,
456 | parent_frame,
457 | side=None,
458 | anchor=None,
459 | width=None,
460 | bg=None):
461 | if not width:
462 | width = 14
463 | if not bg:
464 | bg = self.opt_bgcolor
465 | option = Button(
466 | parent_frame,
467 | text=text,
468 | command=command,
469 | relief=FLAT,
470 | width=width,
471 | bg=bg,
472 | bd=3,
473 | foreground="white")
474 | option.pack(side=side, anchor=anchor, padx=3, pady=3)
475 | return option
476 |
477 | def update_expected_nn_io_fields(self, event=None):
478 | # Search dataset, get length data and predicted layer dimensions
479 | if (self.input_fields["dataset_name"].get() != "--select--"):
480 | dataset = open(
481 | self.data_processor.folders_for_data["new"] + "/" +
482 | self.input_fields["dataset_name"].get(), 'r').read().split("\n")
483 | self.dataset_row_count = len(dataset) - 1
484 | self.update_canvas_info_label("Dataset Size", self.dataset_row_count)
485 | dataset_meta = json.loads(dataset[0])
486 | dataset_meta["alphas"] = self.data_processor.sort_dataset_meta_alphas(
487 | dataset_meta["alphas"])
488 | sample_dataset_row = dataset[3].split(",")
489 | self.expected_input_count = 0
490 | for item_i in dataset_meta["alphas"]:
491 | if int(item_i) not in dataset_meta["target_info"][2]:
492 | if dataset_meta["alphas"][item_i] != None:
493 | item_class_len = len(dataset_meta["alphas"][item_i])
494 | if not item_class_len:
495 | item_class_len = 1
496 | else:
497 | item_class_len = 0
498 | self.expected_input_count += item_class_len
499 |
500 | self.expected_hidden_count = int(
501 | round(math.sqrt(int(round(self.expected_input_count)))) + 10)
502 | if (dataset_meta["target_info"][0] != "Binary"):
503 | self.expected_output_count = 0
504 | for t_pos in dataset_meta["target_info"][2]:
505 | t_alpha_pos_len = len(dataset_meta["alphas"][t_pos])
506 | if not t_alpha_pos_len:
507 | t_alpha_pos_len = 1
508 | self.expected_output_count += t_alpha_pos_len
509 | else:
510 | self.expected_output_count = int(dataset_meta["target_info"][1])
511 | self.input_fields["output_count"].delete(0, END)
512 | self.input_fields["output_count"].insert(0, self.expected_output_count)
513 | self.input_fields["matrix_dims"].delete(0, END)
514 | self.input_fields["matrix_dims"].insert(0, self.expected_input_count)
515 | self.input_fields["hidden_layer"].delete(0, END)
516 | self.input_fields["hidden_layer"].insert(0, self.expected_hidden_count)
517 | self.input_fields["bias_vals"].delete(0, END)
518 | self.input_fields["bias_vals"].insert(0, "0,0")
519 | self.render_nn_vis_trigger(event=True)
520 |
521 | def toggle_desc_label(self, event):
522 | label_text = event.widget.winfo_children()[2].cget("text")
523 | if (self.input_descs_vis[label_text] % 2 == 0):
524 | self.input_descs[label_text].configure(fg="#60606b")
525 | self.input_descs[label_text].configure(font=(self.font_face, 10))
526 | else:
527 | self.input_descs[label_text].configure(fg=self.main_bg)
528 | self.input_descs[label_text].configure(font=(self.font_face, 1))
529 | self.input_descs_vis[label_text] += 1
530 |
531 | def save_nn(self):
532 | nn_name = tkSimpleDialog.askstring("Saving Neural Network",
533 | "Neural Net Name: ")
534 | if (nn_name):
535 | weight_layers = self.neural_network.all_weights
536 | weights_as_list = []
537 | for layer in weight_layers:
538 | l_layer = []
539 | for w_group in layer:
540 | l_group = []
541 | for w in w_group:
542 | l_group.append(w)
543 | l_layer.append(l_group)
544 | weights_as_list.append(l_layer)
545 |
546 | weights_as_json = json.dumps(weights_as_list)
547 |
548 | new_file = open("saved/nn_" + nn_name + ".txt", "a")
549 | new_file.write(weights_as_json)
550 |
551 | def load_settings(self, value):
552 | # Load JSON settings and put values into input fields
553 | setting_to_load = self.saved_settings_text.get()
554 | if (setting_to_load != self.saved_settings_dis_text):
555 | settings_file_json = json.loads(open(self.settings_file_name, "r").read())
556 | spec_settings = settings_file_json[setting_to_load]
557 | for input_field in self.input_fields:
558 | self.input_fields[input_field].delete(0, END)
559 | self.input_fields[input_field].insert(0, spec_settings[input_field])
560 | self.render_nn_vis_trigger(True)
561 |
562 | def style_drop_manual(self, drop):
563 | drop.config(bg=self.opt_bgcolor)
564 | drop["menu"].config(bg=self.opt_bgcolor)
565 | drop.config(foreground="white")
566 | drop.config(highlightthickness=0)
567 | drop["menu"].config(foreground="white")
568 | drop.config(relief=FLAT)
569 |
570 | def render_settings_opts(self):
571 | self.saved_settings_dis_text = "--Saved Settings--"
572 | settings_str = open(self.settings_file_name, "r").read()
573 | saved_settings = [self.saved_settings_dis_text]
574 | if (len(settings_str) > 0):
575 | settings_file_json = json.loads(settings_str)
576 | for setting in settings_file_json:
577 | saved_settings.append(setting)
578 | self.saved_settings_text = StringVar(self.tk_main)
579 | self.saved_settings_opts = OptionMenu(
580 | self.choose_settings_frame,
581 | self.saved_settings_text,
582 | command=self.load_settings,
583 | *saved_settings)
584 | self.saved_settings_opts.config(width=16)
585 | self.style_drop_manual(self.saved_settings_opts)
586 | self.saved_settings_opts.pack()
587 | self.saved_settings_text.set(saved_settings[0])
588 |
589 | def save_settings(self):
590 | # Take values from input fields and construct JSON object to save
591 | # Also give setting state a name for future ident.
592 | settings_name = tkSimpleDialog.askstring("Saving Settings",
593 | "Setting's Name: ")
594 | if settings_name:
595 | if len(settings_name) > 1:
596 | settings_file_read = open(self.settings_file_name, "r")
597 | settings_str = settings_file_read.read()
598 | if len(settings_str) == 0:
599 | settings_str = "{}"
600 | all_settings_as_json = json.loads(settings_str)
601 | input_values = {}
602 | for input_field in self.input_fields:
603 | input_values[input_field] = self.input_fields[input_field].get()
604 | all_settings_as_json[settings_name] = input_values
605 | all_settings_as_str = json.dumps(all_settings_as_json)
606 | settings_file_write = open(self.settings_file_name, "w")
607 | settings_file_write.write(all_settings_as_str)
608 | settings_file_read.close()
609 | settings_file_write.close()
610 | self.saved_settings_opts.destroy()
611 | self.render_settings_opts()
612 |
613 | def check_str_list_valid(self, string):
614 | valid_str_entry = True
615 | for char in string:
616 | if char != "," and not char.isdigit():
617 | valid_str_entry = False
618 | break
619 |
620 | return valid_str_entry
621 |
622 | def map_to_int_if_valid(self, string):
623 | if not self.check_str_list_valid(string):
624 | result = False
625 | elif string == "":
626 | result = []
627 | else:
628 | string = self.data_processor.real_strip(string, [","])
629 | result = self.data_processor.strip_row_list(string.split(","))
630 | result = map(int, result)
631 | return result
632 |
633 | prev_guess = -1
634 |
635 | def render_camera(self):
636 | camera_window = Toplevel(self.tk_main)
637 | image_frame = Frame(camera_window, width=600, height=500)
638 | image_frame.pack()
639 | capture_frame = cv2.VideoCapture(0)
640 | label_for_cam = Label(image_frame)
641 | label_for_cam.pack()
642 |
643 | mini_cam_window = Toplevel(self.tk_main, width=300, height=300)
644 | imagemini_frame = Frame(mini_cam_window, width=600, height=500)
645 | imagemini_frame.pack()
646 | label_for_minicam = Label(imagemini_frame)
647 | label_for_minicam.pack()
648 |
649 | guess_str_val = StringVar()
650 | label_guess = Label(
651 | mini_cam_window,
652 | text="",
653 | font=(self.font_face, 20),
654 | textvariable=guess_str_val)
655 | label_guess.pack()
656 |
657 | def render_cam_frame():
658 | _, cv_frame = capture_frame.read()
659 | cv_frame = cv2.cvtColor(cv_frame, cv2.COLOR_BGR2GRAY)
660 | roi_size = 50
661 | roi_point_1 = (400, 200)
662 | roi_point_2 = (roi_point_1[0] + roi_size, roi_point_1[1] + roi_size)
663 | roi_matrix = cv_frame[roi_point_1[1]:roi_point_2[1], roi_point_1[0]:
664 | roi_point_2[0]]
665 | _, roi_matrix = cv2.threshold(roi_matrix, 100, 255, cv2.THRESH_BINARY_INV)
666 |
667 | img_miniframe = Image.fromarray(roi_matrix)
668 | tk_miniframe = ImageTk.PhotoImage(image=img_miniframe)
669 | label_for_minicam.imgtk = tk_miniframe
670 | label_for_minicam.configure(image=tk_miniframe)
671 |
672 | roi_matrix = cv2.resize(roi_matrix, (28, 28))
673 | matrix_float = self.data_processor.prep_matrix_for_input(roi_matrix)
674 | outline_vals = [
675 | matrix_float[0, :-1], matrix_float[:-1, -1], matrix_float[-1, ::-1],
676 | matrix_float[-2:0:-1, 0]
677 | ]
678 | outline_sum = np.concatenate(outline_vals).sum()
679 | if (int(outline_sum) == 0):
680 | self.neural_network.feed_forward(matrix_float.flatten())
681 | output_neurons = self.neural_network.nn_neurons[-1].tolist()
682 | max_val = max(output_neurons)
683 | if (max_val > 0.9):
684 | guess_val = output_neurons.index(max_val)
685 | guess_str_val.set(guess_val)
686 |
687 | cv2.rectangle(
688 | cv_frame,
689 | roi_point_1,
690 | roi_point_2, (255),
691 | thickness=3,
692 | lineType=8,
693 | shift=0)
694 |
695 | img_frame = Image.fromarray(cv_frame)
696 | tk_frame = ImageTk.PhotoImage(image=img_frame)
697 | label_for_cam.imgtk = tk_frame
698 | label_for_cam.configure(image=tk_frame)
699 | label_for_cam.after(10, render_cam_frame)
700 |
701 | render_cam_frame()
702 |
703 | def preprocess_data_render(self):
704 | self.preprocess_window = Toplevel(self.ui_frame, width=300, height=400)
705 | self.preprocess_form = Frame(
706 | self.preprocess_window, background=self.main_bg)
707 | self.preprocess_form.pack(side=LEFT, fill=BOTH)
708 |
709 | self.preprocess_inter_viewer = Frame(
710 | self.preprocess_window, bg=self.main_bg)
711 | self.preprocess_inter_viewer.pack(side=RIGHT, fill=BOTH)
712 | self.inter_viewer_header = Label(
713 | self.preprocess_inter_viewer,
714 | text="Samples of processed data...",
715 | font=(self.font_face, self.main_font_size),
716 | bg=self.main_bg)
717 | self.inter_viewer_header.pack()
718 | self.v_scrollbar = Scrollbar(self.tk_main)
719 | self.v_scrollbar.pack(side=RIGHT, fill=Y)
720 | self.inter_viewer_box = Text(
721 | self.preprocess_inter_viewer,
722 | bg="#b3b8c5",
723 | height=16,
724 | width=100,
725 | borderwidth=0,
726 | highlightthickness=0,
727 | font=("courier bold", 10))
728 | self.inter_viewer_box.pack(padx=3, ipady=20, ipadx=10, side=RIGHT, fill=Y)
729 | self.inter_viewer_box.config(yscrollcommand=self.v_scrollbar.set)
730 | self.inter_viewer_box.configure(state="disabled")
731 | self.v_scrollbar.config(command=self.inter_viewer_box.yview)
732 |
733 | self.AtN_tran_fields = []
734 | self.prepro = {}
735 |
736 | avaliable_datasets = ["--select--"]
737 | for file in self.data_processor.get_avaliable_datasets("old"):
738 | avaliable_datasets.append(file)
739 | self.prepro["original_file"] = self.render_input_field(
740 | 0,
741 | "File To Process",
742 | "Chose avaliable text file",
743 | 5,
744 | self.preprocess_form,
745 | drop=avaliable_datasets,
746 | command=self.update_prepro_viewer_for_struct)
747 |
748 | self.prepro["row_separator_char"] = self.render_input_field(
749 | "\n",
750 | "Row Delimiter",
751 | "Enter the character that separates each row, default is a '\\n' ",
752 | self.input_text_length,
753 | self.preprocess_form,
754 | command=self.update_prepro_viewer_for_struct)
755 | ig_first_row_opts = ["No", "Yes"]
756 | self.prepro["ignore_first_row"] = self.render_input_field(
757 | 0,
758 | "Ignore First Row",
759 | "If first row are labels/column names, remove them.",
760 | 5,
761 | self.preprocess_form,
762 | drop=ig_first_row_opts,
763 | command=self.update_prepro_viewer_for_struct)
764 | self.prepro["fields_to_ignore"] = self.render_input_field(
765 | "",
766 | "Fields to Ignore",
767 | "Enter position of fields to be removed/ignored (where first field is 0) ",
768 | self.input_text_length,
769 | self.preprocess_form,
770 | command=self.update_prepro_viewer_for_struct)
771 |
772 | self.prepro["fields_to_min"] = self.render_input_field(
773 | "",
774 | "Fields For Minimisation",
775 | "Enter position of fields that need minimising (where first field is 0)",
776 | self.input_text_length,
777 | self.preprocess_form,
778 | command=self.add_min_field)
779 | self.prepro_mins_frame = Frame(self.preprocess_form, bg=self.main_bg)
780 | self.prepro_mins_frame.pack(fill=BOTH)
781 |
782 | self.alpha_trans_opt = self.render_option(
783 | "See Alpha Classes",
784 | self.render_trans_alpha_window,
785 | self.preprocess_form,
786 | width=20)
787 | #self.alpha_trans_opt.configure(state="disabled")
788 |
789 | target_types = ["--select--", "Binary", "Real", "Alpha Encoded"]
790 | self.prepro["target_val_pos"] = self.render_input_field(
791 | "",
792 | "Target position(s)",
793 | "Enter position of fields that are target values",
794 | self.input_text_length,
795 | self.preprocess_form,
796 | command=self.update_prepro_viewer_for_struct)
797 | self.prepro["target_type"] = self.render_input_field(
798 | 0,
799 | "Target Value Type",
800 | "Choose binary or numeric",
801 | 5,
802 | self.preprocess_form,
803 | drop=target_types,
804 | command=self.prepro_vb_change)
805 |
806 | self.prepro_vb_frame = Frame(self.preprocess_form, bg=self.main_bg)
807 | self.prepro_vb_frame.pack(fill=BOTH)
808 | self.prepro["bin_range"] = None
809 |
810 | self.prepro["rows_for_testing"] = self.render_input_field(
811 | "", "Disclude Rows For Testing",
812 | "Enter list or range of rows that should be unseen by the neural net, for testing later.",
813 | self.input_text_length, self.preprocess_form)
814 |
815 | self.prepro_opt = self.render_option("PROCESS", self.start_preprocess,
816 | self.preprocess_form)
817 | self.reset_opt = self.render_option("RESET", self.reset_prepro,
818 | self.preprocess_form)
819 |
820 | def reset_prepro(self):
821 | self.preprocess_window.destroy()
822 | self.preprocess_data_render()
823 |
824 | is_viewing_trans = False
825 |
826 | def render_trans_alpha_window(self, event=None):
827 | self.is_viewing_trans = True
828 | self.prepro_transAN_frame = Toplevel(
829 | self.ui_frame, width=200, height=200, bg=self.main_bg)
830 | self.prepro_transAN_frame.protocol('WM_DELETE_WINDOW',
831 | self.unset_alpha_fields)
832 | has_found_alphas = False
833 | self.update_prepro_viewer_for_struct()
834 | Label(
835 | self.prepro_transAN_frame,
836 | text="Alpha Classes Found...",
837 | font=(self.font_face, self.main_font_size + 2),
838 | bg=self.main_bg).pack()
839 | if (len(self.data_processor.found_alphas) > 0):
840 | for field in self.data_processor.found_alphas:
841 | if (len(self.data_processor.found_alphas[field]) > 0):
842 | has_found_alphas = True
843 | alphas_found_as_str = ','.join(
844 | str(e) for e in self.data_processor.found_alphas[field])
845 | label_txt = "field_" + str(field) + ": " + alphas_found_as_str
846 | Label(
847 | self.prepro_transAN_frame,
848 | text=label_txt,
849 | font=(self.font_face, self.main_font_size),
850 | bg=self.main_bg).pack()
851 | else:
852 | Label(
853 | self.prepro_transAN_frame,
854 | text="No alphas found",
855 | font=(self.font_face, self.main_font_size),
856 | bg=self.main_bg).pack()
857 |
858 | def unset_alpha_fields(self):
859 | self.prepro_transAN_frame.destroy()
860 | self.is_viewing_trans = False
861 | self.update_prepro_viewer_for_struct()
862 |
863 | def update_prepro_viewer_for_struct(self, event=None):
864 | if (os.path.isfile(self.data_processor.folders_for_data["old"] + "/" +
865 | self.prepro["original_file"].get())):
866 | if (self.prepro["row_separator_char"].get() == "\\n"):
867 | self.prepro["row_separator_char"].delete(0, END)
868 | self.prepro["row_separator_char"].insert(0, "\n")
869 |
870 | prepro_vals = self.data_processor.validate_prepro()
871 | struct_str = self.data_processor.struct_dataset(True, True, prepro_vals)
872 | self.update_viewer_text(struct_str)
873 |
874 | if (self.data_processor.target_has_encoded_alphas):
875 | self.prepro["target_type"].set("Alpha Encoded")
876 | self.all_drops["Target Value Type: "].configure(state="disabled")
877 | else:
878 | self.all_drops["Target Value Type: "].configure(state="normal")
879 |
880 | def update_viewer_text(self, text):
881 | self.inter_viewer_box.configure(state="normal")
882 | self.inter_viewer_box.delete(1.0, END)
883 | self.inter_viewer_box.insert(INSERT, text)
884 | self.inter_viewer_box.configure(state="disabled")
885 |
886 | def add_min_field(self, event=None):
887 | min_val = self.prepro["fields_to_min"].get()
888 | if (min_val == "all"):
889 | self.min_fields = []
890 | self.clear_frame(self.prepro_mins_frame)
891 | min_field_val = self.render_input_field(
892 | "",
893 | "Minimise All Fields By",
894 | "Enter the divider value",
895 | self.input_text_length,
896 | self.prepro_mins_frame,
897 | command=self.update_prepro_viewer_for_struct)
898 | min_field_except = self.render_input_field(
899 | "",
900 | "...Except Fields",
901 | "Enter the field positions that shouldn't be divided, or leave blank",
902 | self.input_text_length,
903 | self.prepro_mins_frame,
904 | command=self.update_prepro_viewer_for_struct)
905 | self.min_fields.append(min_field_val)
906 | self.min_fields.append(min_field_except)
907 | elif (self.check_str_list_valid(min_val)):
908 | min_fields_list = self.map_to_int_if_valid(min_val)
909 | if (min_fields_list != False):
910 | self.min_fields = []
911 | self.clear_frame(self.prepro_mins_frame)
912 | for field in min_fields_list:
913 | min_field = self.render_input_field(
914 | "",
915 | "Minimise field_" + str(field) + " By",
916 | "Enter the divider",
917 | self.input_text_length,
918 | self.prepro_mins_frame,
919 | command=self.update_prepro_viewer_for_struct)
920 | self.min_fields.append(min_field)
921 |
922 | def clear_frame(self, frame):
923 | for field in frame.winfo_children():
924 | field.destroy()
925 |
926 | def prepro_vb_change(self, value):
927 | self.clear_frame(self.prepro_vb_frame)
928 | if (value == "Binary"):
929 | self.prepro["bin_range"] = self.render_input_field(
930 | "",
931 | "Binary Vector Range",
932 | "The binary vector range (number of classes)",
933 | self.input_text_length,
934 | self.prepro_vb_frame,
935 | command=self.update_prepro_viewer_for_struct)
936 | else:
937 | self.prepro["bin_range"] = None
938 |
939 | def start_preprocess(self):
940 | poss_errors = self.data_processor.validate_prepro()["error"]
941 | if (poss_errors == ""):
942 | prepro_vals = self.data_processor.validate_prepro()
943 | self.preprocess_window.destroy()
944 | thread.start_new_thread(self.data_processor.struct_dataset,
945 | (False, False, prepro_vals))
946 | else:
947 | tkMessageBox.showinfo("Error", poss_errors)
948 |
949 | def test_input(self):
950 | # Allow for different forms of input
951 | input_str = tkSimpleDialog.askstring(
952 | "Enter Input",
953 | "Enter the name of an image file, text file, 'MNIST' for the demo, or enter row data manually: "
954 | )
955 | if input_str:
956 | file_type_pos = input_str.rfind(".")
957 | valid_files = ["png", "jpg", "txt"]
958 | file_type_str = ""
959 | if input_str == "MNIST":
960 | self.render_camera()
961 | else:
962 | if file_type_pos != -1:
963 | file_type_str = input_str[file_type_pos + 1:]
964 |
965 | if (file_type_str not in valid_files or file_type_str == "txt"):
966 | if (file_type_str == "txt"):
967 | input_str = open(input_str, 'r').read()
968 | input_data = input_str.split(",")
969 | new_data = []
970 | for target_d in self.dataset_meta["target_info"][2]:
971 | input_data.insert(int(target_d), None)
972 | processed_row, t, s = self.data_processor.change_data_to_processed(
973 | self.dataset_meta, input_data)
974 | el_i = 0
975 | for el in processed_row:
976 | if not el:
977 | del processed_row[el_i]
978 | el_i += 1
979 | matrix_ready = np.asarray(processed_row, dtype=np.float32)
980 | elif (file_type_str in valid_files):
981 | image_matrix = cv2.imread(file_name)
982 | image_matrix = cv2.cvtColor(image_matrix, cv2.COLOR_BGR2GRAY)
983 | image_matrix = cv2.resize(image_matrix,
984 | (self.matrix_dims[0], self.matrix_dims[1]))
985 | matrix_ready = self.matrix_data_loader.prep_matrix_for_input(
986 | image_matrix)
987 | else:
988 | output_pos_result = -1
989 | self.print_console("**ERROR: invalid test input")
990 |
991 | # Having taken different forms of input, construct conforming matrix for input layer
992 | self.neural_network.feed_forward(matrix_ready)
993 |
994 | output_neurons = self.neural_network.nn_neurons[-1].tolist()
995 | output_pos_result = output_neurons.index(max(output_neurons))
996 | if (len(output_neurons) > 1):
997 | if (self.dataset_meta["target_info"][0] == "Alpha Encoded"):
998 | c = 0
999 | for targ_pos in self.dataset_meta["target_info"][2]:
1000 | alphas = self.dataset_meta["alphas"][targ_pos]
1001 | output_pos_result = alphas[output_pos_result]
1002 | c += 1
1003 | elif (self.dataset_meta["target_info"][0] == "Binary"):
1004 | output_pos_result = output_neurons.index(max(output_neurons))
1005 |
1006 | else:
1007 | output_pos_result = output_neurons
1008 |
1009 | if (output_pos_result != -1):
1010 | self.print_console("**OUTPUT RESULT: " + str(output_pos_result))
1011 |
1012 | def cancel_learning(self):
1013 | self.cancel_training = True
1014 | self.prepare_new_line_graph()
1015 | self.start_learning_opt.config(state="normal")
1016 | self.cancel_learning_opt.config(state="disabled")
1017 | self.save_nn_opt.config(state="normal")
1018 | if (self.input_neuron_count > 0):
1019 | self.test_input_opt.config(state="normal")
1020 |
1021 | def print_console(self, text):
1022 | self.console_list_box.configure(state="normal")
1023 | if (text == " **TRAINING** \n"):
1024 | text += ">>With graph line color: " + self.line_colors[self.new_line_count
1025 | - 1]
1026 | self.console_list_box.insert(END, ">>" + text + "\n")
1027 | self.console_list_box.see(END)
1028 | self.console_list_box.configure(state="disabled")
1029 |
1030 | def check_all_fields_valid(self):
1031 | # Validate all inputs to check correect ints or list
1032 | hidden_str = self.input_fields["hidden_layer"].get()
1033 | bias_str = self.input_fields["bias_vals"].get()
1034 | error = ""
1035 | valid_values = {}
1036 | if not self.check_str_list_valid(hidden_str + bias_str
1037 | ) or not hidden_str or not bias_str:
1038 | error = "You hidden layers or bias values are invalid"
1039 | else:
1040 | valid_values['hidden_layers'] = map(int, hidden_str.split(","))
1041 | valid_values['biases_for_non_input_layers'] = map(int,
1042 | bias_str.split(","))
1043 |
1044 | if (len(valid_values['hidden_layers']) + 1 != len(
1045 | valid_values['biases_for_non_input_layers'])):
1046 | error = "Bias count must be equal to " + str(
1047 | len(valid_values['hidden_layers']) +
1048 | 1) + " (the total layer count expect input)"
1049 |
1050 | learning_constant = self.input_fields["learning_rate"].get()
1051 | if not learning_constant.replace(".", "", 1).isdigit():
1052 | error = "Invalid learning constant"
1053 | else:
1054 | valid_values['learning_constant'] = float(learning_constant)
1055 | valid_values['data_file_name'] = self.input_fields["dataset_name"].get()
1056 | matrix_dims_str = self.input_fields["matrix_dims"].get()
1057 | weight_range_str = self.input_fields["weight_range"].get()
1058 | to_retrieve = self.input_fields["data_to_retrieve"].get()
1059 | output_count = self.input_fields["output_count"].get()
1060 | epochs = self.input_fields["epochs"].get()
1061 | data_to_test_count = self.input_fields["test_data_partition"].get()
1062 |
1063 | if matrix_dims_str.isdigit():
1064 | valid_values['matrix_dims'] = int(matrix_dims_str)
1065 | else:
1066 | error = "Invalid input count"
1067 |
1068 | weight_range_str_test = weight_range_str.replace(".", "")
1069 | weight_range_str_test = weight_range_str_test.replace("-", "")
1070 | if self.check_str_list_valid(weight_range_str_test):
1071 | valid_values['weight_range'] = map(float, weight_range_str.split(","))
1072 | else:
1073 | error = "Invalid weight ranges"
1074 | if to_retrieve.isdigit() and to_retrieve != 'all':
1075 | if to_retrieve != 'all':
1076 | valid_values['to_retrieve'] = int(to_retrieve)
1077 | else:
1078 | valid_values['to_retrieve'] = to_retrieve
1079 | else:
1080 | error = "Invalid matrices to use entry"
1081 |
1082 | valid_values["data_file_name"] = self.data_processor.folders_for_data[
1083 | "new"] + "/" + valid_values["data_file_name"]
1084 | if not os.path.isfile(valid_values["data_file_name"]):
1085 | error = "File does not exist in processed_datasets"
1086 |
1087 | if not output_count.isdigit():
1088 | error = "Invalid output count"
1089 | else:
1090 | valid_values['output_count'] = int(output_count)
1091 | if not epochs.isdigit():
1092 | error = "Invalid epochs entry"
1093 | else:
1094 | valid_values['epochs'] = int(epochs)
1095 | if not data_to_test_count.isdigit():
1096 | error = "Invalid data to test entry"
1097 | else:
1098 | valid_values['data_to_test'] = int(data_to_test_count)
1099 | if (valid_values['data_to_test'] > 100):
1100 | error = "Data to test should be under 100%"
1101 |
1102 | valid_values['success'] = True
1103 |
1104 | if not error:
1105 | return valid_values
1106 | else:
1107 | response = {}
1108 | response['success'] = False
1109 | response['error'] = error
1110 | return response
1111 |
1112 | def start_learning_ui_request(self):
1113 | self.cancel_training = False
1114 | self.can_clear_graph = True
1115 | self.field_result = self.check_all_fields_valid()
1116 | if self.field_result['success']:
1117 | self.start_learning_opt.config(state="disabled")
1118 | self.cancel_learning_opt.config(state="normal")
1119 | self.save_nn_opt.config(state="disabled")
1120 | thread.start_new_thread(self.start_learning_in_thread, ())
1121 | else:
1122 | tkMessageBox.showinfo("Error", self.field_result['error'])
1123 |
1124 | matrix_data = []
1125 | matrix_targets = []
1126 | curr_dataset_name = ""
1127 | input_neuron_count = 0
1128 | prev_to_retrieve_val = ""
1129 |
1130 | # Start new thread for training, to prevent crash on main thread.
1131 | def start_learning_in_thread(self):
1132 | field_result = self.field_result
1133 | testing_mode = False
1134 | if (field_result['to_retrieve'] != self.prev_to_retrieve_val or
1135 | field_result['data_file_name'] != self.curr_dataset_name):
1136 |
1137 | # Initiate hyperparameters for neural network
1138 | self.curr_dataset_name = field_result['data_file_name']
1139 | self.matrix_dims = field_result['matrix_dims']
1140 | self.data_processor.load_matrix_data(field_result['to_retrieve'],
1141 | field_result['data_file_name'], self)
1142 | self.data_processor.populate_matrices()
1143 | self.prev_to_retrieve = self.data_processor.to_retrieve
1144 | self.input_neuron_count = field_result['matrix_dims']
1145 | self.matrix_data = self.data_processor.matrices
1146 | self.matrix_targets = self.data_processor.targets
1147 | self.dataset_meta = self.data_processor.dataset_meta
1148 | self.has_alphas = self.data_processor.has_alphas
1149 |
1150 | # Initiate neural network!
1151 | self.neural_network = neural_network()
1152 | self.neural_network.initilize_nn(
1153 | field_result['hidden_layers'], self.input_neuron_count,
1154 | field_result['output_count'], self.matrix_data, self.matrix_targets,
1155 | field_result['biases_for_non_input_layers'],
1156 | field_result['learning_constant'], testing_mode,
1157 | field_result['weight_range'], field_result['epochs'],
1158 | field_result['data_to_test'], self.dataset_meta, self.dataset_row_count,
1159 | self.has_alphas, self)
1160 | self.prev_to_retrieve_val = field_result['to_retrieve']
1161 | self.neural_network.train()
1162 |
1163 | def render_neural_net_visualization(self, layers, biases):
1164 | self.tk_nn_visual_canvas.delete("all")
1165 | for old_labels in self.canvas_labels:
1166 | old_labels.destroy()
1167 |
1168 | example_p_limit_count = 20 #zero for all
1169 | highest_layer_count = max(layers)
1170 | if (highest_layer_count > example_p_limit_count):
1171 | highest_layer_count = example_p_limit_count
1172 |
1173 | highest_layer_height = 0
1174 |
1175 | if (len(layers) - 1 != len(biases)):
1176 | diff_b_layers = len(layers) - 1 - len(biases)
1177 | if (diff_b_layers < 0):
1178 | biases = biases[0:diff_b_layers]
1179 | else:
1180 | for i in range(diff_b_layers):
1181 | biases.append(0)
1182 |
1183 | neuron_padding = 5
1184 | neuron_radius = int(
1185 | (((self.canvas_height / highest_layer_count) / 2) - neuron_padding))
1186 | if (neuron_radius > 15): neuron_radius = 15
1187 | neuron_x = neuron_radius + 20
1188 | neuron_dist_x = (self.canvas_width / (len(layers) - 1)) - neuron_x * 2
1189 | neuron_hidden_c = "#db7070"
1190 | neuron_outter_c = "#8bd78f"
1191 | line_color = "#a0a6b7"
1192 |
1193 | bias_pos_diff_x = 50
1194 | bias_pos_diff_y = 50
1195 | bias_color = "#837FD3"
1196 | bias_pos_y = neuron_radius * 2
1197 |
1198 | def get_layer_height_px(layer_count):
1199 | return (layer_count * (neuron_radius * 2 + neuron_padding))
1200 |
1201 | # Check neuron layers for maximum lengths and heights
1202 | for neuron_layer in range(0, len(layers)):
1203 | length_of_layer = layers[neuron_layer]
1204 | if (example_p_limit_count > 0 and
1205 | example_p_limit_count < length_of_layer):
1206 | length_of_layer = example_p_limit_count
1207 | curr_layer_height = get_layer_height_px(length_of_layer)
1208 | if (curr_layer_height > highest_layer_height):
1209 | highest_layer_height = curr_layer_height
1210 |
1211 | # Render actual neurons and labels
1212 | for neuron_layer in range(0, len(layers)):
1213 | length_of_layer = layers[neuron_layer]
1214 | if (example_p_limit_count > 0 and
1215 | example_p_limit_count < length_of_layer):
1216 | length_of_layer = example_p_limit_count
1217 |
1218 | neuron_ystart = (
1219 | self.canvas_height - get_layer_height_px(length_of_layer)) / 2
1220 | neuron_y = neuron_ystart
1221 | layer_has_bias = ((neuron_layer > 0) and (biases[neuron_layer - 1] != 0))
1222 | if layer_has_bias:
1223 | bias_y_pos = 20
1224 | bias_x_pos = neuron_x - bias_pos_diff_x
1225 | bias_oval = self.tk_nn_visual_canvas.create_oval(
1226 | bias_x_pos - neuron_radius,
1227 | bias_y_pos - neuron_radius,
1228 | bias_x_pos + neuron_radius,
1229 | bias_y_pos + neuron_radius,
1230 | fill=bias_color,
1231 | outline=bias_color)
1232 | self.tk_nn_visual_canvas.tag_raise(bias_oval)
1233 |
1234 | neuron_color = neuron_hidden_c
1235 | if (neuron_layer == 0 or neuron_layer == len(layers) - 1):
1236 | neuron_color = neuron_outter_c
1237 | for single_neuron in range(0, length_of_layer):
1238 | if (single_neuron == 0):
1239 | real_layer_count = layers[neuron_layer]
1240 | extra_str_label = ""
1241 | if (real_layer_count > length_of_layer):
1242 | extra_str_label = "^\n^\n"
1243 | self.canvas_labels.append(
1244 | Label(
1245 | self.tk_nn_visual_canvas,
1246 | text=extra_str_label + str(real_layer_count)))
1247 | self.canvas_labels[-1].place(
1248 | x=neuron_x - (neuron_radius * 2),
1249 | y=neuron_y - (neuron_radius * 3))
1250 |
1251 | neuron_oval = self.tk_nn_visual_canvas.create_oval(
1252 | neuron_x - neuron_radius,
1253 | neuron_y - neuron_radius,
1254 | neuron_x + neuron_radius,
1255 | neuron_y + neuron_radius,
1256 | fill=neuron_color,
1257 | outline=neuron_color)
1258 | self.tk_nn_visual_canvas.tag_raise(neuron_oval)
1259 |
1260 | if layer_has_bias:
1261 | bias_connector = self.tk_nn_visual_canvas.create_line(
1262 | neuron_x, neuron_y, bias_x_pos, bias_y_pos, fill=line_color)
1263 | self.tk_nn_visual_canvas.tag_lower(bias_connector)
1264 |
1265 | neuron_dist_y = (neuron_radius * 2) + neuron_padding
1266 | if (neuron_layer < len(layers) - 1):
1267 | length_of_next_layer = layers[neuron_layer + 1]
1268 | if (example_p_limit_count > 0 and
1269 | example_p_limit_count < length_of_next_layer):
1270 | length_of_next_layer = example_p_limit_count
1271 | neuron_y_for_line = (self.canvas_height - (length_of_next_layer) *
1272 | (neuron_radius * 2 + neuron_padding)) / 2
1273 |
1274 | for neuron_weights in range(0, length_of_next_layer):
1275 | neuron_connector = self.tk_nn_visual_canvas.create_line(
1276 | neuron_x,
1277 | neuron_y,
1278 | neuron_x + neuron_dist_x,
1279 | neuron_y_for_line,
1280 | fill=line_color)
1281 | self.tk_nn_visual_canvas.tag_lower(neuron_connector)
1282 |
1283 | neuron_y_for_line += neuron_dist_y
1284 |
1285 | neuron_y += neuron_dist_y
1286 | neuron_x += neuron_dist_x
1287 |
--------------------------------------------------------------------------------