├── AutoEncoder
├── .ipynb_checkpoints
│ └── AutoEncoder-checkpoint.ipynb
└── AutoEncoder.ipynb
├── CNN
├── .ipynb_checkpoints
│ ├── CNN_text-checkpoint.ipynb
│ └── Tradition_cnn_image-checkpoint.ipynb
├── CNN_text.ipynb
└── Tradition_cnn_image.ipynb
├── EDEN
├── .ipynb_checkpoints
│ └── EDEN-DEBUG-checkpoint.ipynb
└── EDEN-DEBUG.ipynb
├── GAN
├── .ipynb_checkpoints
│ └── GAN-checkpoint.ipynb
└── GAN.ipynb
├── LSTM
└── LSTM_poem_robot.ipynb
├── MLP
├── .ipynb_checkpoints
│ ├── MLP-checkpoint.ipynb
│ └── MLP_Text-checkpoint.ipynb
├── MLP.ipynb
└── MLP_Text.ipynb
├── Neat
├── .ipynb_checkpoints
│ └── xor-checkpoint.ipynb
├── Digraph.gv
├── Digraph.gv.svg
├── __pycache__
│ ├── visualize.cpython-36.pyc
│ └── visualize.cpython-37.pyc
├── avg_fitness.svg
├── config-feedforward
├── neat-checkpoint-104
├── neat-checkpoint-109
├── neat-checkpoint-114
├── neat-checkpoint-119
├── neat-checkpoint-124
├── neat-checkpoint-14
├── neat-checkpoint-19
├── neat-checkpoint-24
├── neat-checkpoint-29
├── neat-checkpoint-34
├── neat-checkpoint-39
├── neat-checkpoint-4
├── neat-checkpoint-44
├── neat-checkpoint-49
├── neat-checkpoint-54
├── neat-checkpoint-59
├── neat-checkpoint-64
├── neat-checkpoint-69
├── neat-checkpoint-74
├── neat-checkpoint-79
├── neat-checkpoint-84
├── neat-checkpoint-89
├── neat-checkpoint-9
├── neat-checkpoint-94
├── neat-checkpoint-99
├── speciation.svg
├── visualize.py
├── xor.ipynb
└── xor.py
├── Python_Basis
├── .ipynb_checkpoints
│ ├── Basis-checkpoint.ipynb
│ ├── Basis_Advance-checkpoint.ipynb
│ └── Basis_high_ranking-checkpoint.ipynb
├── Basis.ipynb
├── Basis_Advance.ipynb
├── Basis_high_ranking.ipynb
├── __pycache__
│ └── test.cpython-37.pyc
├── test.py
├── text.txt
└── tx.txt
└── Transfer_learning
├── .ipynb_checkpoints
└── Transfer_learning_cnn_image-checkpoint.ipynb
├── Transfer_learning_cnn_image.ipynb
└── Trasnfer_learning_cnn_regression.ipynb
/EDEN/.ipynb_checkpoints/EDEN-DEBUG-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stderr",
10 | "output_type": "stream",
11 | "text": [
12 | "C:\\Users\\SZU\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
13 | " from ._conv import register_converters as _register_converters\n",
14 | "Using TensorFlow backend.\n"
15 | ]
16 | },
17 | {
18 | "name": "stdout",
19 | "output_type": "stream",
20 | "text": [
21 | "Load data.....\n"
22 | ]
23 | }
24 | ],
25 | "source": [
26 | "import os\n",
27 | "import sys\n",
28 | "import numpy as np\n",
29 | "from sklearn.model_selection import train_test_split\n",
30 | "from keras.preprocessing import image\n",
31 | "print('Load data.....')\n",
32 | "parapath = './dataset/Parasitized/'\n",
33 | "uninpath = './dataset/Uninfected/'\n",
34 | "parastized = os.listdir(parapath)\n",
35 | "uninfected = os.listdir(uninpath)\n",
36 | "data = []\n",
37 | "label = []\n",
38 | "for para in parastized:\n",
39 | " try:\n",
40 | " img = image.load_img(parapath+para,target_size=(128,128))\n",
41 | " x = image.img_to_array(img)\n",
42 | " data.append(x)\n",
43 | " label.append(1)\n",
44 | " except:\n",
45 | " print(\"Can't add \"+para+\" in the dataset\")\n",
46 | "for unin in uninfected:\n",
47 | " try:\n",
48 | " img = image.load_img(uninpath+unin,target_size=(128,128))\n",
49 | " data.append(x)\n",
50 | " label.append(0)\n",
51 | " except:\n",
52 | " print(\"Can't add \"+unin+\" in the dataset\") \n",
53 | "data = np.array(data)\n",
54 | "label = np.array(label)\n",
55 | "data = data/255\n",
56 | "x_train, x_test, y_train, y_test = train_test_split(data,label,test_size = 0.2,random_state=0)"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 14,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "#!/usr/bin/env python\n",
66 | "# -*- coding: utf-8 -*-\n",
67 | "\"\"\"\n",
68 | "@Author:ChileWang\n",
69 | "@Created On 2019-06-21\n",
70 | "@Coding Environment: Anaconda Python 3.7\n",
71 | "\"\"\"\n",
72 | "import random\n",
73 | "import matplotlib.pyplot as plt\n",
74 | "import pandas as pd\n",
75 | "import keras\n",
76 | "from keras.preprocessing.image import ImageDataGenerator\n",
77 | "from keras.models import Sequential, Model\n",
78 | "from keras.layers import Dense, Dropout, Activation, Flatten\n",
79 | "from keras.layers import Conv2D, MaxPooling2D\n",
80 | "import os\n",
81 | "from keras.layers import LeakyReLU\n",
82 | "from keras.layers import PReLU\n",
83 | "import operator\n",
84 | "from keras.models import load_model\n",
85 | "import copy\n",
86 | "from keras.utils import multi_gpu_model\n",
87 | "from keras.layers.normalization import BatchNormalization\n",
88 | "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" # 占用1, 2,3块GPU\n",
89 | "# Save model and weights\n",
90 | "save_dir = os.path.join(os.getcwd(), 'saved_models_cnn')\n",
91 | "if not os.path.isdir(save_dir):\n",
92 | " os.makedirs(save_dir)\n",
93 | "\n",
94 | "class GeneticAlgorithm:\n",
95 | " # -----------初始数据定义---------------------\n",
96 | " # 定义一些遗传算法需要的参数\n",
97 | " # JCL = 0.9 # 遗传时的交叉率\n",
98 | " # BYL = 0.09 # 遗传时的变异率\n",
99 | "\n",
100 | " def __init__(self, rows, times, num_classes, kill_num):\n",
101 | " self.rows = rows # 染色体个数\n",
102 | " self.times = times # 迭代次数\n",
103 | " self.accuracy = 0 # 模型准确率\n",
104 | " self.layer_list = ['Conv2D', 'Dense'] # 算法使用的网络层\n",
105 | " self.cnn_activation_function = ['linear', 'leaky relu', 'prelu', 'relu'] # CNN层用到激励函数\n",
106 | " self.dense_activation_function = ['linear', 'sigmoid', 'softmax', 'relu'] # 中间全连接层用到的激励函数\n",
107 | " self.last_dense_activation_function = ['sigmoid', 'softmax'] # 最后一个全连接层用到的激励函数\n",
108 | " self.unit_num_list = [64, 128, 256] # 神经元数目选择\n",
109 | " self.filter_num_list = [4, 8, 16] # 卷积核数目选择\n",
110 | " self.pooling_size = range(2, 4) # max_pooling的选择范围\n",
111 | " self.filter_size = range(2, 4) # 卷积核的选择范围\n",
112 | " self.layer_num_list = range(2, 4) # 网络层次的选择范围 \n",
113 | " self.max_size = 10 # 层数最大值\n",
114 | " self.threshold = 3 # 层数临界值\n",
115 | " self.batch_size = 512\n",
116 | " self.num_classes = num_classes # 2分类\n",
117 | " self.kill_num = kill_num # 每次杀掉的网络个数\n",
118 | "\n",
119 | " # -------------遗传函数开始执行---------------------\n",
120 | " def run(self):\n",
121 | "\n",
122 | " print(\"开始迭代\")\n",
123 | " # 初始化种群\n",
124 | " lines = [self.create_network(self.create_chromosome()) for i in range(self.rows)]\n",
125 | "\n",
126 | " # 初始化种群适应度\n",
127 | " fit = [0 for i in range(self.rows)]\n",
128 | "\n",
129 | " epochs = 1\n",
130 | " # 计算每个染色体(网络)的适应度\n",
131 | " for i in range(0, self.rows):\n",
132 | " lines[i]['is_saved'] = False\n",
133 | " lines[i]['model_name'] = 'model_%s' % str(i)\n",
134 | " lines[i] = self.cal_fitness(lines[i], epochs)\n",
135 | " fit[i] = lines[i]['fitness']\n",
136 | "\n",
137 | " # 开始迭代\n",
138 | " t = 0\n",
139 | " while t < self.times:\n",
140 | " print('迭代次数:', t)\n",
141 | " random_fit = [0 for i in range(self.rows)]\n",
142 | " total_fit = 0\n",
143 | " tmp_fit = 0\n",
144 | "\n",
145 | " # 开始遗传\n",
146 | " # 根据轮盘赌选择复代\n",
147 | " # 计算原有种群的总适应度\n",
148 | " for i in range(self.rows):\n",
149 | " total_fit += fit[i]\n",
150 | " # 通过适应度占总适应度的比例生成随机适应度\n",
151 | " for i in range(self.rows):\n",
152 | " random_fit[i] = tmp_fit + fit[i] / total_fit\n",
153 | " tmp_fit += random_fit[i]\n",
154 | " r = int(self.random_select(random_fit))\n",
155 | " line = lines[r]\n",
156 | "\n",
157 | " # 不需要交叉的,直接变异,然后遗传到下一代\n",
158 | " # 基因变异, 生成两个子代\n",
159 | " print('*******变异******')\n",
160 | " offspring1 = self.mutation(line, 'offspring1')\n",
161 | " offspring2 = self.mutation(offspring1, 'offspring2')\n",
162 | " best_chromosome = self.get_best_chromosome(line, offspring1, offspring2, epochs)\n",
163 | " # 替换原先的父代\n",
164 | " father_model_name = lines[r]['model_name']\n",
165 | " lines[r] = best_chromosome\n",
166 | " print('保存最佳变异个体。。。。')\n",
167 | " # 保存模型\n",
168 | " model_path = os.path.join(save_dir, father_model_name)\n",
169 | " lines[r]['model_path'] = model_path # 每一个模型的路径\n",
170 | " lines[r]['is_saved'] = True # 是否保存\n",
171 | " best_chromosome_model = lines[r]['model'] \n",
172 | " best_chromosome_model.save(model_path)\n",
173 | " \n",
174 | " epochs += 1\n",
175 | " # 杀掉最差的self.kill_num个网络\n",
176 | " kill_index = 1\n",
177 | " sorted_lines = sorted(lines, key=operator.itemgetter('fitness')) # 按适应度从小到大排序\n",
178 | " if len(sorted_lines) > self.kill_num:\n",
179 | " # 第一次迭代杀死适应度小于0.55的网络\n",
180 | " for i in range(len(sorted_lines)):\n",
181 | " if sorted_lines[i]['fitness'] < 0.55:\n",
182 | " kill_index = i\n",
183 | " else:\n",
184 | " break\n",
185 | " if t == 0:\n",
186 | " new_lines = sorted_lines[kill_index:]\n",
187 | " self.rows -= kill_index\n",
188 | " else:\n",
189 | " new_lines = sorted_lines[self.kill_num:]\n",
190 | " self.rows -= self.kill_num\n",
191 | " lines = new_lines # 更新种群\n",
192 | " next_fit = [line['fitness'] for line in lines] # 更新种群\n",
193 | " fit = next_fit \n",
194 | " # self.rows -= self.kill_num\n",
195 | " print('..........Population size:%d .........' % self.rows)\n",
196 | " \n",
197 | " # 进行新的一次epochs,计算种群的适应度\n",
198 | " # 计算每个染色体(网络)的适应度\n",
199 | " for i in range(0, self.rows):\n",
200 | " lines[i] = self.cal_fitness(lines[i], 1)\n",
201 | " fit[i] = lines[i]['fitness']\n",
202 | " print('***************************************************')\n",
203 | " print()\n",
204 | " t += 1 # 代数+1\n",
205 | "\n",
206 | " # 提取适应度最高的\n",
207 | " m = fit[0]\n",
208 | " ml = 0\n",
209 | " for i in range(self.rows):\n",
210 | " if m < fit[i]:\n",
211 | " m = fit[i]\n",
212 | " ml = i\n",
213 | "\n",
214 | " print(\"迭代完成\")\n",
215 | " # 输出结果:\n",
216 | " excellent_chromosome = self.cal_fitness(lines[ml], 0)\n",
217 | " print('The best network:')\n",
218 | " print(excellent_chromosome['model'].summary())\n",
219 | " print('Fitness: ', excellent_chromosome['fitness'])\n",
220 | " print('Accuracy', excellent_chromosome['accuracy'])\n",
221 | " \n",
222 | " best_model_save_dir = os.path.join(os.getcwd(), 'best_model')\n",
223 | " if not os.path.isdir(best_model_save_dir):\n",
224 | " os.makedirs(best_model_save_dir)\n",
225 | " best_model_path = os.path.join(best_model_save_dir, 'excellent_model')\n",
226 | " excellent_chromosome['model'].save(best_model_path)\n",
227 | " print(excellent_chromosome['layer_list'])\n",
228 | " with open('best_network_layer_list.txt', 'w') as fw:\n",
229 | " for layer in excellent_chromosome['layer_list']:\n",
230 | " fw.write(str(layer) + '\\n')\n",
231 | " \n",
232 | "\n",
233 | " # -----------------遗传函数执行完成--------------------\n",
234 | "\n",
235 | " # -----------------各种辅助计算函数--------------------\n",
236 | " def create_network(self, chromsome_dict):\n",
237 | " \"\"\"\n",
238 | " :param chromosome:\n",
239 | " :return:\n",
240 | " \"\"\"\n",
241 | " layer_list = chromsome_dict['layer_list']\n",
242 | " layer_num = chromsome_dict['layer_num'] + 2 # 包括输入输出\n",
243 | " unit_num_sum = 0 # 统计Dense神经元的个数\n",
244 | " model = Sequential()\n",
245 | " for i in range(len(layer_list) - 1):\n",
246 | " if i == 0:\n",
247 | " model.add(Conv2D(layer_list[i]['conv_kernel_num'],\n",
248 | " layer_list[i]['conv_kernel_size'],\n",
249 | " padding=layer_list[i]['padding'],\n",
250 | " input_shape=layer_list[i]['input_shape'],\n",
251 | " kernel_initializer='he_normal'\n",
252 | " )\n",
253 | " )\n",
254 | " model = self.add_activation_funtion(model, layer_list[i]['layer_activation_function'])\n",
255 | "\n",
256 | " else:\n",
257 | " if layer_list[i]['layer_name'] == 'Conv2D':\n",
258 | " model.add(Conv2D(layer_list[i]['conv_kernel_num'],\n",
259 | " layer_list[i]['conv_kernel_size'],\n",
260 | " padding=layer_list[i]['padding'],\n",
261 | " )\n",
262 | " )\n",
263 | " model = self.add_activation_funtion(model, layer_list[i]['layer_activation_function'])\n",
264 | "\n",
265 | " if layer_list[i]['pooling_choice']: # 是否创建Pooling层次\n",
266 | " try:\n",
267 | " model.add(MaxPooling2D(pool_size=layer_list[i]['pool_size'], dim_ordering=\"tf\"))\n",
268 | " except Exception as error:\n",
269 | " print('MaxPooling大于输入的矩阵, 用pool_size=(1, 1)代替')\n",
270 | " model.add(MaxPooling2D(pool_size=(1, 1), strides = (2, 2)))\n",
271 | " layer_num += 1\n",
272 | " model.add(BatchNormalization())\n",
273 | "\n",
274 | " # Dropout层\n",
275 | " model.add(Dropout(layer_list[i]['dropout_rate']))\n",
276 | "\n",
277 | " else: # Dense层\n",
278 | " unit_num_sum += layer_list[i]['unit_num']\n",
279 | " model.add(Dense(layer_list[i]['unit_num'],\n",
280 | " )\n",
281 | " )\n",
282 | " model = self.add_activation_funtion(model, layer_list[i]['layer_activation_function'])\n",
283 | " # Dropout层\n",
284 | " model.add(Dropout(layer_list[i]['dropout_rate']))\n",
285 | "\n",
286 | " # 最后一层\n",
287 | " model.add(Flatten())\n",
288 | "\n",
289 | " if layer_list[-1]['layer_activation_function'] == 'sigmoid':\n",
290 | " model.add(Dense(1))\n",
291 | " unit_num_sum += 1\n",
292 | " else:\n",
293 | " model.add(Dense(self.num_classes))\n",
294 | " unit_num_sum += self.num_classes\n",
295 | " model = self.add_activation_funtion(model, layer_list[-1]['layer_activation_function'])\n",
296 | " chromsome_dict['model'] = model\n",
297 | " chromsome_dict['punish_factor'] = (1 / layer_num) + (1 / unit_num_sum) # 惩罚因子\n",
298 | " return chromsome_dict\n",
299 | "\n",
300 | " def add_activation_funtion(self, model, activation_name):\n",
301 | " \"\"\"\n",
302 | " 添加激活函数\n",
303 | " :param model:\n",
304 | " :param activation_name:\n",
305 | " :return:\n",
306 | " \"\"\"\n",
307 | " if activation_name == 'leaky relu':\n",
308 | " model.add(LeakyReLU())\n",
309 | " elif activation_name == 'prelu':\n",
310 | " model.add(PReLU())\n",
311 | " else:\n",
312 | " model.add(Activation(activation_name))\n",
313 | " return model\n",
314 | "\n",
315 | " def create_chromosome(self):\n",
316 | " \"\"\"\n",
317 | " 创建染色体\n",
318 | " \"\"\"\n",
319 | " chromsome_dict = dict() # 用字典装载染色体的所有属性\n",
320 | " chromsome_dict['learning_rate'] = self.random_learning_rate() # 学习率\n",
321 | " layer_num = random.choice(self.layer_num_list) # 创建的网络层次, 输入层和输出层不计算在内\n",
322 | " chromsome_dict['layer_num'] = layer_num\n",
323 | "\n",
324 | " layer_list = list() # 网络层次顺序表\n",
325 | " # 第一层必须是卷积层\n",
326 | " layer_list.append({'layer_name': 'Conv2D',\n",
327 | " 'conv_kernel_num': 32,\n",
328 | " 'conv_kernel_size': (3, 3),\n",
329 | " 'padding': 'same',\n",
330 | " 'input_shape': (128, 128, 3),\n",
331 | " 'layer_activation_function': random.choice(self.cnn_activation_function)}\n",
332 | " )\n",
333 | " # 每一层的属性\n",
334 | " for i in range(layer_num):\n",
335 | " # 选择层次类型\n",
336 | "# layer_name = self.layer_list[random.randint(0, 1)]\n",
337 | " layer_name = 'Conv2D'\n",
338 | " if i == 0: # 第一层dropout_rate 必须为0,即不存在\n",
339 | " layer_dict = self.create_layer(layer_name)\n",
340 | " layer_dict['dropout_rate'] = 0\n",
341 | "\n",
342 | " else:\n",
343 | " layer_dict = self.create_layer(layer_name)\n",
344 | " layer_list.append(layer_dict) # 添加至层次列表\n",
345 | "\n",
346 | " # 最后一层必须是Dense层\n",
347 | " layer_list.append({'layer_name': 'Dense',\n",
348 | " 'layer_activation_function': random.choice(self.last_dense_activation_function)\n",
349 | " }\n",
350 | " )\n",
351 | "\n",
352 | " # 将网络层次顺序表添加至染色体\n",
353 | " chromsome_dict['layer_list'] = layer_list\n",
354 | "\n",
355 | " return chromsome_dict\n",
356 | "\n",
357 | " def create_layer(self, layer_name):\n",
358 | " \"\"\"\n",
359 | " 创建网络层次属性\n",
360 | " \"\"\"\n",
361 | " layer_dict = dict()\n",
362 | " layer_dict['layer_name'] = layer_name\n",
363 | " if layer_name == 'Conv2D':\n",
364 | " # 激励函数\n",
365 | " layer_activation_function = random.choice(self.cnn_activation_function)\n",
366 | " # 卷积核数量和大小\n",
367 | " conv_kernel_num = random.choice(self.filter_num_list)\n",
368 | " random_size = random.choice(self.filter_size)\n",
369 | " conv_kernel_size = (random_size, random_size)\n",
370 | " # 是否加入Pooling层\n",
371 | " pooling_choice = [True, False]\n",
372 | " if pooling_choice[random.randint(0, 1)]:\n",
373 | " layer_dict['pooling_choice'] = True\n",
374 | " random_size = random.choice(self.pooling_size)\n",
375 | " pool_size = (random_size, random_size)\n",
376 | " layer_dict['pool_size'] = pool_size\n",
377 | " else:\n",
378 | " layer_dict['pooling_choice'] = False\n",
379 | "\n",
380 | " layer_dict['layer_activation_function'] = layer_activation_function\n",
381 | " layer_dict['conv_kernel_num'] = conv_kernel_num\n",
382 | " layer_dict['conv_kernel_size'] = conv_kernel_size\n",
383 | " layer_dict['padding'] = 'same'\n",
384 | "\n",
385 | " else: # Dense层\n",
386 | " # 激励函数\n",
387 | " layer_activation_function = random.choice(self.dense_activation_function)\n",
388 | " # 神经元个数\n",
389 | " unit_num = random.choice(self.unit_num_list)\n",
390 | " layer_dict['layer_activation_function'] = layer_activation_function\n",
391 | " layer_dict['unit_num'] = unit_num\n",
392 | " layer_dict['dropout_rate'] = round(random.uniform(0, 1), 3)\n",
393 | " return layer_dict\n",
394 | "\n",
395 | " def cal_fitness(self, line, epochs):\n",
396 | " \"\"\"\n",
397 | " :param line: 染色体(网络)\n",
398 | " :param epochs: 迭代次数\n",
399 | " :return:\n",
400 | " \"\"\"\n",
401 | " if epochs == 0:\n",
402 | " return line\n",
403 | " line = self.train_process(line, epochs)\n",
404 | " # 适应度函数,表示准确率 + 训练参数个数的倒数, 适应度越大,说明模型越好。\n",
405 | " # fitness = line['accuracy'] + line['punish_factor']\n",
406 | " fitness = line['accuracy'] \n",
407 | " line['fitness'] = fitness\n",
408 | " return line\n",
409 | "\n",
410 | " def train_process(self, line, epochs):\n",
411 | " \"\"\"\n",
412 | " 训练\n",
413 | " :param line: 染色体\n",
414 | " :param epochs: 迭代次数\n",
415 | " :return:\n",
416 | " \"\"\"\n",
417 | " \n",
418 | " print('learning_rate:', line['learning_rate'])\n",
419 | " print('layer_num:', len(line['layer_list']))\n",
420 | " \n",
421 | " if line['is_saved']: # 若保存,则直接读入训练即可\n",
422 | " print('读取原有模型训练..............')\n",
423 | " model_path = line['model_path']\n",
424 | " model = load_model(model_path)\n",
425 | " accuracy = model.evaluate(x = x_test,y = y_test)[1]\n",
426 | " print('former accuracy:', accuracy)\n",
427 | " else:\n",
428 | " print('重新训练.......')\n",
429 | " model = line['model']\n",
430 | " learning_rate = line['learning_rate']\n",
431 | " # 初始化adam优化器\n",
432 | " opt = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999,\n",
433 | " epsilon=None, decay=1e-6, amsgrad=False)\n",
434 | " # 编译模型\n",
435 | " layer_list = line['layer_list']\n",
436 | " if layer_list[-1]['layer_activation_function'] == 'softmax':\n",
437 | " loss = 'sparse_categorical_crossentropy'\n",
438 | " else:\n",
439 | " loss = 'binary_crossentropy'\n",
440 | " print(loss)\n",
441 | " model.compile(loss=loss,\n",
442 | " optimizer=opt,\n",
443 | " metrics=['accuracy'])\n",
444 | "\n",
445 | " history = model.fit(x_train, y_train,epochs=epochs, batch_size=self.batch_size)\n",
446 | " # Score trained model.\n",
447 | " accuracy = model.evaluate(x = x_test,y = y_test)[1]\n",
448 | " line['accuracy'] = accuracy\n",
449 | " line['history'] = history # 训练历史\n",
450 | " print('accuracy:', accuracy)\n",
451 | " # 保存模型\n",
452 | " model_name = line['model_name']\n",
453 | " model_path = os.path.join(save_dir, model_name)\n",
454 | " line['model_path'] = model_path # 每一个模型的路径\n",
455 | " line['is_saved'] = True # 是否保存\n",
456 | " line['model'] = model\n",
457 | " model.save(model_path)\n",
458 | " \n",
459 | " return line\n",
460 | "\n",
461 | " def mutation(self, line, name):\n",
462 | " \"\"\"\n",
463 | " 基因变异\n",
464 | " :param line:\n",
465 | " :return:\n",
466 | " \"\"\"\n",
467 | " offspring1 = copy.deepcopy(line) # 深拷贝!!!!子代1\n",
468 | " offspring1['model_name'] = name\n",
469 | " offspring1['is_saved'] = False\n",
470 | " mutation_choice = [True, False]\n",
471 | " # 子代1变异\n",
472 | " if mutation_choice[random.randint(0, 1)]: # 改变学习率\n",
473 | " print('Mutation Operation: Change learning rate....')\n",
474 | " offspring1['learning_rate'] = self.random_learning_rate() # 学习率\n",
475 | " else: # 改变网络结构\n",
476 | " offspring1 = self.layer_mutation_operation(offspring1)\n",
477 | " offspring1 = self.create_network(offspring1)\n",
478 | " return offspring1\n",
479 | "\n",
480 | " def layer_mutation_operation(self, offspring):\n",
481 | " \"\"\"\n",
482 | " :param offspring: 子代染色体\n",
483 | " :return:\n",
484 | " \"\"\"\n",
485 | " mutation_layer_choice = [0, 1, 2] # 添加,替换,删除\n",
486 | " mutation_layer_choice_name = ['Add', 'Replace', 'Delete']\n",
487 | " layer_name = self.layer_list[random.randint(0, 1)]\n",
488 | " layer_dict = self.create_layer(layer_name)\n",
489 | " choice_index = -1\n",
490 | " if self.threshold < offspring['layer_num'] < self.max_size: # 层数小于最大值且大于临界值,则可以添加和替换和删除\n",
491 | " choice_index = random.randint(0, 2)\n",
492 | " if mutation_layer_choice[choice_index] == 0: # 添加\n",
493 | " insert_index = random.randint(1, len(offspring['layer_list']) - 1) # 插入位置\n",
494 | " offspring['layer_list'].insert(insert_index, layer_dict)\n",
495 | " offspring['layer_num'] += 1\n",
496 | "\n",
497 | " elif mutation_layer_choice[choice_index] == 1: # 替换\n",
498 | " replace_index = random.randint(1, len(offspring['layer_list']) - 1) # 替换位置\n",
499 | " offspring['layer_list'][replace_index] = layer_dict\n",
500 | "\n",
501 | " else: # 删除层\n",
502 | " delete_index = random.randint(1, len(offspring['layer_list']) - 1) # 删除位置\n",
503 | " del offspring['layer_list'][delete_index]\n",
504 | " offspring['layer_num'] -= 1\n",
505 | "\n",
506 | " elif offspring['layer_num'] <= self.threshold: # 小于等于临界值,只能添加或者替换\n",
507 | " choice_index = random.randint(0, 1)\n",
508 | " if mutation_layer_choice[choice_index] == 0: # 添加\n",
509 | " insert_index = random.randint(1, len(offspring['layer_list']) - 1) # 插入位置\n",
510 | " offspring['layer_list'].insert(insert_index, layer_dict)\n",
511 | " offspring['layer_num'] += 1\n",
512 | " else:\n",
513 | " replace_index = random.randint(1, len(offspring['layer_list']) - 1) # 替换位置\n",
514 | " offspring['layer_list'][replace_index] = layer_dict\n",
515 | "\n",
516 | " else: # 层数到达最大值,则只能替换和删除\n",
517 | " choice_index = random.randint(1, 2)\n",
518 | " if mutation_layer_choice[choice_index] == 1: # 替换层\n",
519 | " replace_index = random.randint(1, len(offspring['layer_list']) - 1) # 替换位置\n",
520 | " offspring['layer_list'][replace_index] = layer_dict\n",
521 | "\n",
522 | " else: # 删除层\n",
523 | " delete_index = random.randint(1, len(offspring['layer_list']) - 1) # 删除位置\n",
524 | " del offspring['layer_list'][delete_index]\n",
525 | " offspring['layer_num'] -= 1\n",
526 | " print('Mutation Operation:', mutation_layer_choice_name[choice_index])\n",
527 | " return offspring\n",
528 | "\n",
529 | " def get_best_chromosome(self, father, offspring1, offspring2, epochs):\n",
530 | " \"\"\"\n",
531 | " 比较父代,子代1,子代2的适应度,返回适应度最大的染色体\n",
532 | " :param father:\n",
533 | " :param offspring1:\n",
534 | " :param offspring2:\n",
535 | " :param epochs:\n",
536 | " :return: 返回适应度最高的染色体\n",
537 | " \"\"\"\n",
538 | " print('子代1训练:', epochs)\n",
539 | " offspring1 = self.cal_fitness(offspring1, epochs)\n",
540 | " \n",
541 | " print('子代2训练:', epochs)\n",
542 | " offspring2 = self.cal_fitness(offspring2, epochs)\n",
543 | " \n",
544 | " tmp_lines = [father, offspring1, offspring2]\n",
545 | " sorted_lines = sorted(tmp_lines, key=operator.itemgetter('fitness')) # 按适应度从小到大排序\n",
546 | " return sorted_lines[-1]\n",
547 | "\n",
548 | " def random_learning_rate(self):\n",
549 | " return random.uniform(0.01, 0.02) # 学习率\n",
550 | "\n",
551 | " def random_select(self, ran_fit):\n",
552 | " \"\"\"\n",
553 | " 轮盘赌选择\n",
554 | " 根据概率随机选择的染色体\n",
555 | " :param ran_fit:\n",
556 | " :return:\n",
557 | " \"\"\"\n",
558 | " ran = random.random()\n",
559 | " for i in range(self.rows):\n",
560 | " if ran < ran_fit[i]:\n",
561 | " return i\n",
562 | " \n",
563 | " \n"
564 | ]
565 | },
566 | {
567 | "cell_type": "code",
568 | "execution_count": null,
569 | "metadata": {
570 | "scrolled": false
571 | },
572 | "outputs": [
573 | {
574 | "name": "stdout",
575 | "output_type": "stream",
576 | "text": [
577 | "开始迭代\n"
578 | ]
579 | },
580 | {
581 | "name": "stderr",
582 | "output_type": "stream",
583 | "text": [
584 | "C:\\Users\\SZU\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:203: UserWarning: Update your `MaxPooling2D` call to the Keras 2 API: `MaxPooling2D(pool_size=(2, 2), data_format=\"channels_last\")`\n",
585 | "C:\\Users\\SZU\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:203: UserWarning: Update your `MaxPooling2D` call to the Keras 2 API: `MaxPooling2D(pool_size=(3, 3), data_format=\"channels_last\")`\n"
586 | ]
587 | },
588 | {
589 | "name": "stdout",
590 | "output_type": "stream",
591 | "text": [
592 | "learning_rate: 0.012029467024930782\n",
593 | "layer_num: 4\n",
594 | "重新训练.......\n",
595 | "sparse_categorical_crossentropy\n",
596 | "Epoch 1/1\n",
597 | "22046/22046 [==============================] - 28s 1ms/step - loss: 7.5838 - acc: 0.5212\n",
598 | "5512/5512 [==============================] - 6s 1ms/step\n",
599 | "accuracy: 0.49038461538461536\n",
600 | "learning_rate: 0.015437717269751258\n",
601 | "layer_num: 4\n",
602 | "重新训练.......\n",
603 | "sparse_categorical_crossentropy\n",
604 | "Epoch 1/1\n",
605 | "22046/22046 [==============================] - 22s 1ms/step - loss: 7.7997 - acc: 0.5026\n",
606 | "5512/5512 [==============================] - 6s 1ms/step\n",
607 | "accuracy: 0.49038461538461536\n",
608 | "learning_rate: 0.01007904573168161\n",
609 | "layer_num: 5\n",
610 | "重新训练.......\n",
611 | "binary_crossentropy\n",
612 | "Epoch 1/1\n"
613 | ]
614 | }
615 | ],
616 | "source": [
617 | "# -------------入口函数,开始执行-----------------------------\n",
618 | "\"\"\"\n",
619 | "输入参数的的意义依次为\n",
620 | " self.rows = rows # 染色体个数(即种群大小:101个网络)\n",
621 | " self.times = times # 迭代次数\n",
622 | " self.num_classes = num_classes # 几分类\n",
623 | " self.kill_num = kill_num # 每次迭代杀死的网络\n",
624 | "\"\"\"\n",
625 | "if __name__ == '__main__':\n",
626 | " ga = GeneticAlgorithm(rows=50, times=3, num_classes=2, kill_num=7)\n",
627 | " ga.run()\n"
628 | ]
629 | }
630 | ],
631 | "metadata": {
632 | "kernelspec": {
633 | "display_name": "Python 3",
634 | "language": "python",
635 | "name": "python3"
636 | },
637 | "language_info": {
638 | "codemirror_mode": {
639 | "name": "ipython",
640 | "version": 3
641 | },
642 | "file_extension": ".py",
643 | "mimetype": "text/x-python",
644 | "name": "python",
645 | "nbconvert_exporter": "python",
646 | "pygments_lexer": "ipython3",
647 | "version": "3.7.3"
648 | }
649 | },
650 | "nbformat": 4,
651 | "nbformat_minor": 2
652 | }
653 |
--------------------------------------------------------------------------------
/EDEN/EDEN-DEBUG.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stderr",
10 | "output_type": "stream",
11 | "text": [
12 | "C:\\Users\\SZU\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
13 | " from ._conv import register_converters as _register_converters\n",
14 | "Using TensorFlow backend.\n"
15 | ]
16 | },
17 | {
18 | "name": "stdout",
19 | "output_type": "stream",
20 | "text": [
21 | "Load data.....\n"
22 | ]
23 | }
24 | ],
25 | "source": [
26 | "import os\n",
27 | "import sys\n",
28 | "import numpy as np\n",
29 | "from sklearn.model_selection import train_test_split\n",
30 | "from keras.preprocessing import image\n",
31 | "print('Load data.....')\n",
32 | "parapath = './dataset/Parasitized/'\n",
33 | "uninpath = './dataset/Uninfected/'\n",
34 | "parastized = os.listdir(parapath)\n",
35 | "uninfected = os.listdir(uninpath)\n",
36 | "data = []\n",
37 | "label = []\n",
38 | "for para in parastized:\n",
39 | " try:\n",
40 | " img = image.load_img(parapath+para,target_size=(128,128))\n",
41 | " x = image.img_to_array(img)\n",
42 | " data.append(x)\n",
43 | " label.append(1)\n",
44 | " except:\n",
45 | " print(\"Can't add \"+para+\" in the dataset\")\n",
46 | "for unin in uninfected:\n",
47 | " try:\n",
48 | " img = image.load_img(uninpath+unin,target_size=(128,128))\n",
49 | " data.append(x)\n",
50 | " label.append(0)\n",
51 | " except:\n",
52 | " print(\"Can't add \"+unin+\" in the dataset\") \n",
53 | "data = np.array(data)\n",
54 | "label = np.array(label)\n",
55 | "data = data/255\n",
56 | "x_train, x_test, y_train, y_test = train_test_split(data,label,test_size = 0.2,random_state=0)"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 14,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "#!/usr/bin/env python\n",
66 | "# -*- coding: utf-8 -*-\n",
67 | "\"\"\"\n",
68 | "@Author:ChileWang\n",
69 | "@Created On 2019-06-21\n",
70 | "@Coding Environment: Anaconda Python 3.7\n",
71 | "\"\"\"\n",
72 | "import random\n",
73 | "import matplotlib.pyplot as plt\n",
74 | "import pandas as pd\n",
75 | "import keras\n",
76 | "from keras.preprocessing.image import ImageDataGenerator\n",
77 | "from keras.models import Sequential, Model\n",
78 | "from keras.layers import Dense, Dropout, Activation, Flatten\n",
79 | "from keras.layers import Conv2D, MaxPooling2D\n",
80 | "import os\n",
81 | "from keras.layers import LeakyReLU\n",
82 | "from keras.layers import PReLU\n",
83 | "import operator\n",
84 | "from keras.models import load_model\n",
85 | "import copy\n",
86 | "from keras.utils import multi_gpu_model\n",
87 | "from keras.layers.normalization import BatchNormalization\n",
88 | "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" # 占用1, 2,3块GPU\n",
89 | "# Save model and weights\n",
90 | "save_dir = os.path.join(os.getcwd(), 'saved_models_cnn')\n",
91 | "if not os.path.isdir(save_dir):\n",
92 | " os.makedirs(save_dir)\n",
93 | "\n",
94 | "class GeneticAlgorithm:\n",
95 | " # -----------初始数据定义---------------------\n",
96 | " # 定义一些遗传算法需要的参数\n",
97 | " # JCL = 0.9 # 遗传时的交叉率\n",
98 | " # BYL = 0.09 # 遗传时的变异率\n",
99 | "\n",
100 | " def __init__(self, rows, times, num_classes, kill_num):\n",
101 | " self.rows = rows # 染色体个数\n",
102 | " self.times = times # 迭代次数\n",
103 | " self.accuracy = 0 # 模型准确率\n",
104 | " self.layer_list = ['Conv2D', 'Dense'] # 算法使用的网络层\n",
105 | " self.cnn_activation_function = ['linear', 'leaky relu', 'prelu', 'relu'] # CNN层用到激励函数\n",
106 | " self.dense_activation_function = ['linear', 'sigmoid', 'softmax', 'relu'] # 中间全连接层用到的激励函数\n",
107 | " self.last_dense_activation_function = ['sigmoid', 'softmax'] # 最后一个全连接层用到的激励函数\n",
108 | " self.unit_num_list = [64, 128, 256] # 神经元数目选择\n",
109 | " self.filter_num_list = [4, 8, 16] # 卷积核数目选择\n",
110 | " self.pooling_size = range(2, 4) # max_pooling的选择范围\n",
111 | " self.filter_size = range(2, 4) # 卷积核的选择范围\n",
112 | " self.layer_num_list = range(2, 4) # 网络层次的选择范围 \n",
113 | " self.max_size = 10 # 层数最大值\n",
114 | " self.threshold = 3 # 层数临界值\n",
115 | " self.batch_size = 512\n",
116 | " self.num_classes = num_classes # 2分类\n",
117 | " self.kill_num = kill_num # 每次杀掉的网络个数\n",
118 | "\n",
119 | " # -------------遗传函数开始执行---------------------\n",
120 | " def run(self):\n",
121 | "\n",
122 | " print(\"开始迭代\")\n",
123 | " # 初始化种群\n",
124 | " lines = [self.create_network(self.create_chromosome()) for i in range(self.rows)]\n",
125 | "\n",
126 | " # 初始化种群适应度\n",
127 | " fit = [0 for i in range(self.rows)]\n",
128 | "\n",
129 | " epochs = 1\n",
130 | " # 计算每个染色体(网络)的适应度\n",
131 | " for i in range(0, self.rows):\n",
132 | " lines[i]['is_saved'] = False\n",
133 | " lines[i]['model_name'] = 'model_%s' % str(i)\n",
134 | " lines[i] = self.cal_fitness(lines[i], epochs)\n",
135 | " fit[i] = lines[i]['fitness']\n",
136 | "\n",
137 | " # 开始迭代\n",
138 | " t = 0\n",
139 | " while t < self.times:\n",
140 | " print('迭代次数:', t)\n",
141 | " random_fit = [0 for i in range(self.rows)]\n",
142 | " total_fit = 0\n",
143 | " tmp_fit = 0\n",
144 | "\n",
145 | " # 开始遗传\n",
146 | " # 根据轮盘赌选择复代\n",
147 | " # 计算原有种群的总适应度\n",
148 | " for i in range(self.rows):\n",
149 | " total_fit += fit[i]\n",
150 | " # 通过适应度占总适应度的比例生成随机适应度\n",
151 | " for i in range(self.rows):\n",
152 | " random_fit[i] = tmp_fit + fit[i] / total_fit\n",
153 | " tmp_fit += random_fit[i]\n",
154 | " r = int(self.random_select(random_fit))\n",
155 | " line = lines[r]\n",
156 | "\n",
157 | " # 不需要交叉的,直接变异,然后遗传到下一代\n",
158 | " # 基因变异, 生成两个子代\n",
159 | " print('*******变异******')\n",
160 | " offspring1 = self.mutation(line, 'offspring1')\n",
161 | " offspring2 = self.mutation(offspring1, 'offspring2')\n",
162 | " best_chromosome = self.get_best_chromosome(line, offspring1, offspring2, epochs)\n",
163 | " # 替换原先的父代\n",
164 | " father_model_name = lines[r]['model_name']\n",
165 | " lines[r] = best_chromosome\n",
166 | " print('保存最佳变异个体。。。。')\n",
167 | " # 保存模型\n",
168 | " model_path = os.path.join(save_dir, father_model_name)\n",
169 | " lines[r]['model_path'] = model_path # 每一个模型的路径\n",
170 | " lines[r]['is_saved'] = True # 是否保存\n",
171 | " best_chromosome_model = lines[r]['model'] \n",
172 | " best_chromosome_model.save(model_path)\n",
173 | " \n",
174 | " epochs += 1\n",
175 | " # 杀掉最差的self.kill_num个网络\n",
176 | " kill_index = 1\n",
177 | " sorted_lines = sorted(lines, key=operator.itemgetter('fitness')) # 按适应度从小到大排序\n",
178 | " if len(sorted_lines) > self.kill_num:\n",
179 | " # 第一次迭代杀死适应度小于0.55的网络\n",
180 | " for i in range(len(sorted_lines)):\n",
181 | " if sorted_lines[i]['fitness'] < 0.55:\n",
182 | " kill_index = i\n",
183 | " else:\n",
184 | " break\n",
185 | " if t == 0:\n",
186 | " new_lines = sorted_lines[kill_index:]\n",
187 | " self.rows -= kill_index\n",
188 | " else:\n",
189 | " new_lines = sorted_lines[self.kill_num:]\n",
190 | " self.rows -= self.kill_num\n",
191 | " lines = new_lines # 更新种群\n",
192 | " next_fit = [line['fitness'] for line in lines] # 更新种群\n",
193 | " fit = next_fit \n",
194 | " # self.rows -= self.kill_num\n",
195 | " print('..........Population size:%d .........' % self.rows)\n",
196 | " \n",
197 | " # 进行新的一次epochs,计算种群的适应度\n",
198 | " # 计算每个染色体(网络)的适应度\n",
199 | " for i in range(0, self.rows):\n",
200 | " lines[i] = self.cal_fitness(lines[i], 1)\n",
201 | " fit[i] = lines[i]['fitness']\n",
202 | " print('***************************************************')\n",
203 | " print()\n",
204 | " t += 1 # 代数+1\n",
205 | "\n",
206 | " # 提取适应度最高的\n",
207 | " m = fit[0]\n",
208 | " ml = 0\n",
209 | " for i in range(self.rows):\n",
210 | " if m < fit[i]:\n",
211 | " m = fit[i]\n",
212 | " ml = i\n",
213 | "\n",
214 | " print(\"迭代完成\")\n",
215 | " # 输出结果:\n",
216 | " excellent_chromosome = self.cal_fitness(lines[ml], 0)\n",
217 | " print('The best network:')\n",
218 | " print(excellent_chromosome['model'].summary())\n",
219 | " print('Fitness: ', excellent_chromosome['fitness'])\n",
220 | " print('Accuracy', excellent_chromosome['accuracy'])\n",
221 | " \n",
222 | " best_model_save_dir = os.path.join(os.getcwd(), 'best_model')\n",
223 | " if not os.path.isdir(best_model_save_dir):\n",
224 | " os.makedirs(best_model_save_dir)\n",
225 | " best_model_path = os.path.join(best_model_save_dir, 'excellent_model')\n",
226 | " excellent_chromosome['model'].save(best_model_path)\n",
227 | " print(excellent_chromosome['layer_list'])\n",
228 | " with open('best_network_layer_list.txt', 'w') as fw:\n",
229 | " for layer in excellent_chromosome['layer_list']:\n",
230 | " fw.write(str(layer) + '\\n')\n",
231 | " \n",
232 | "\n",
233 | " # -----------------遗传函数执行完成--------------------\n",
234 | "\n",
235 | " # -----------------各种辅助计算函数--------------------\n",
236 | " def create_network(self, chromsome_dict):\n",
237 | " \"\"\"\n",
238 | " :param chromosome:\n",
239 | " :return:\n",
240 | " \"\"\"\n",
241 | " layer_list = chromsome_dict['layer_list']\n",
242 | " layer_num = chromsome_dict['layer_num'] + 2 # 包括输入输出\n",
243 | " unit_num_sum = 0 # 统计Dense神经元的个数\n",
244 | " model = Sequential()\n",
245 | " for i in range(len(layer_list) - 1):\n",
246 | " if i == 0:\n",
247 | " model.add(Conv2D(layer_list[i]['conv_kernel_num'],\n",
248 | " layer_list[i]['conv_kernel_size'],\n",
249 | " padding=layer_list[i]['padding'],\n",
250 | " input_shape=layer_list[i]['input_shape'],\n",
251 | " kernel_initializer='he_normal'\n",
252 | " )\n",
253 | " )\n",
254 | " model = self.add_activation_funtion(model, layer_list[i]['layer_activation_function'])\n",
255 | "\n",
256 | " else:\n",
257 | " if layer_list[i]['layer_name'] == 'Conv2D':\n",
258 | " model.add(Conv2D(layer_list[i]['conv_kernel_num'],\n",
259 | " layer_list[i]['conv_kernel_size'],\n",
260 | " padding=layer_list[i]['padding'],\n",
261 | " )\n",
262 | " )\n",
263 | " model = self.add_activation_funtion(model, layer_list[i]['layer_activation_function'])\n",
264 | "\n",
265 | " if layer_list[i]['pooling_choice']: # 是否创建Pooling层次\n",
266 | " try:\n",
267 | " model.add(MaxPooling2D(pool_size=layer_list[i]['pool_size'], dim_ordering=\"tf\"))\n",
268 | " except Exception as error:\n",
269 | " print('MaxPooling大于输入的矩阵, 用pool_size=(1, 1)代替')\n",
270 | " model.add(MaxPooling2D(pool_size=(1, 1), strides = (2, 2)))\n",
271 | " layer_num += 1\n",
272 | " model.add(BatchNormalization())\n",
273 | "\n",
274 | " # Dropout层\n",
275 | " model.add(Dropout(layer_list[i]['dropout_rate']))\n",
276 | "\n",
277 | " else: # Dense层\n",
278 | " unit_num_sum += layer_list[i]['unit_num']\n",
279 | " model.add(Dense(layer_list[i]['unit_num'],\n",
280 | " )\n",
281 | " )\n",
282 | " model = self.add_activation_funtion(model, layer_list[i]['layer_activation_function'])\n",
283 | " # Dropout层\n",
284 | " model.add(Dropout(layer_list[i]['dropout_rate']))\n",
285 | "\n",
286 | " # 最后一层\n",
287 | " model.add(Flatten())\n",
288 | "\n",
289 | " if layer_list[-1]['layer_activation_function'] == 'sigmoid':\n",
290 | " model.add(Dense(1))\n",
291 | " unit_num_sum += 1\n",
292 | " else:\n",
293 | " model.add(Dense(self.num_classes))\n",
294 | " unit_num_sum += self.num_classes\n",
295 | " model = self.add_activation_funtion(model, layer_list[-1]['layer_activation_function'])\n",
296 | " chromsome_dict['model'] = model\n",
297 | " chromsome_dict['punish_factor'] = (1 / layer_num) + (1 / unit_num_sum) # 惩罚因子\n",
298 | " return chromsome_dict\n",
299 | "\n",
300 | " def add_activation_funtion(self, model, activation_name):\n",
301 | " \"\"\"\n",
302 | " 添加激活函数\n",
303 | " :param model:\n",
304 | " :param activation_name:\n",
305 | " :return:\n",
306 | " \"\"\"\n",
307 | " if activation_name == 'leaky relu':\n",
308 | " model.add(LeakyReLU())\n",
309 | " elif activation_name == 'prelu':\n",
310 | " model.add(PReLU())\n",
311 | " else:\n",
312 | " model.add(Activation(activation_name))\n",
313 | " return model\n",
314 | "\n",
315 | " def create_chromosome(self):\n",
316 | " \"\"\"\n",
317 | " 创建染色体\n",
318 | " \"\"\"\n",
319 | " chromsome_dict = dict() # 用字典装载染色体的所有属性\n",
320 | " chromsome_dict['learning_rate'] = self.random_learning_rate() # 学习率\n",
321 | " layer_num = random.choice(self.layer_num_list) # 创建的网络层次, 输入层和输出层不计算在内\n",
322 | " chromsome_dict['layer_num'] = layer_num\n",
323 | "\n",
324 | " layer_list = list() # 网络层次顺序表\n",
325 | " # 第一层必须是卷积层\n",
326 | " layer_list.append({'layer_name': 'Conv2D',\n",
327 | " 'conv_kernel_num': 32,\n",
328 | " 'conv_kernel_size': (3, 3),\n",
329 | " 'padding': 'same',\n",
330 | " 'input_shape': (128, 128, 3),\n",
331 | " 'layer_activation_function': random.choice(self.cnn_activation_function)}\n",
332 | " )\n",
333 | " # 每一层的属性\n",
334 | " for i in range(layer_num):\n",
335 | " # 选择层次类型\n",
336 | "# layer_name = self.layer_list[random.randint(0, 1)]\n",
337 | " layer_name = 'Conv2D'\n",
338 | " if i == 0: # 第一层dropout_rate 必须为0,即不存在\n",
339 | " layer_dict = self.create_layer(layer_name)\n",
340 | " layer_dict['dropout_rate'] = 0\n",
341 | "\n",
342 | " else:\n",
343 | " layer_dict = self.create_layer(layer_name)\n",
344 | " layer_list.append(layer_dict) # 添加至层次列表\n",
345 | "\n",
346 | " # 最后一层必须是Dense层\n",
347 | " layer_list.append({'layer_name': 'Dense',\n",
348 | " 'layer_activation_function': random.choice(self.last_dense_activation_function)\n",
349 | " }\n",
350 | " )\n",
351 | "\n",
352 | " # 将网络层次顺序表添加至染色体\n",
353 | " chromsome_dict['layer_list'] = layer_list\n",
354 | "\n",
355 | " return chromsome_dict\n",
356 | "\n",
357 | " def create_layer(self, layer_name):\n",
358 | " \"\"\"\n",
359 | " 创建网络层次属性\n",
360 | " \"\"\"\n",
361 | " layer_dict = dict()\n",
362 | " layer_dict['layer_name'] = layer_name\n",
363 | " if layer_name == 'Conv2D':\n",
364 | " # 激励函数\n",
365 | " layer_activation_function = random.choice(self.cnn_activation_function)\n",
366 | " # 卷积核数量和大小\n",
367 | " conv_kernel_num = random.choice(self.filter_num_list)\n",
368 | " random_size = random.choice(self.filter_size)\n",
369 | " conv_kernel_size = (random_size, random_size)\n",
370 | " # 是否加入Pooling层\n",
371 | " pooling_choice = [True, False]\n",
372 | " if pooling_choice[random.randint(0, 1)]:\n",
373 | " layer_dict['pooling_choice'] = True\n",
374 | " random_size = random.choice(self.pooling_size)\n",
375 | " pool_size = (random_size, random_size)\n",
376 | " layer_dict['pool_size'] = pool_size\n",
377 | " else:\n",
378 | " layer_dict['pooling_choice'] = False\n",
379 | "\n",
380 | " layer_dict['layer_activation_function'] = layer_activation_function\n",
381 | " layer_dict['conv_kernel_num'] = conv_kernel_num\n",
382 | " layer_dict['conv_kernel_size'] = conv_kernel_size\n",
383 | " layer_dict['padding'] = 'same'\n",
384 | "\n",
385 | " else: # Dense层\n",
386 | " # 激励函数\n",
387 | " layer_activation_function = random.choice(self.dense_activation_function)\n",
388 | " # 神经元个数\n",
389 | " unit_num = random.choice(self.unit_num_list)\n",
390 | " layer_dict['layer_activation_function'] = layer_activation_function\n",
391 | " layer_dict['unit_num'] = unit_num\n",
392 | " layer_dict['dropout_rate'] = round(random.uniform(0, 1), 3)\n",
393 | " return layer_dict\n",
394 | "\n",
395 | " def cal_fitness(self, line, epochs):\n",
396 | " \"\"\"\n",
397 | " :param line: 染色体(网络)\n",
398 | " :param epochs: 迭代次数\n",
399 | " :return:\n",
400 | " \"\"\"\n",
401 | " if epochs == 0:\n",
402 | " return line\n",
403 | " line = self.train_process(line, epochs)\n",
404 | " # 适应度函数,表示准确率 + 训练参数个数的倒数, 适应度越大,说明模型越好。\n",
405 | " # fitness = line['accuracy'] + line['punish_factor']\n",
406 | " fitness = line['accuracy'] \n",
407 | " line['fitness'] = fitness\n",
408 | " return line\n",
409 | "\n",
410 | " def train_process(self, line, epochs):\n",
411 | " \"\"\"\n",
412 | " 训练\n",
413 | " :param line: 染色体\n",
414 | " :param epochs: 迭代次数\n",
415 | " :return:\n",
416 | " \"\"\"\n",
417 | " \n",
418 | " print('learning_rate:', line['learning_rate'])\n",
419 | " print('layer_num:', len(line['layer_list']))\n",
420 | " \n",
421 | " if line['is_saved']: # 若保存,则直接读入训练即可\n",
422 | " print('读取原有模型训练..............')\n",
423 | " model_path = line['model_path']\n",
424 | " model = load_model(model_path)\n",
425 | " accuracy = model.evaluate(x = x_test,y = y_test)[1]\n",
426 | " print('former accuracy:', accuracy)\n",
427 | " else:\n",
428 | " print('重新训练.......')\n",
429 | " model = line['model']\n",
430 | " learning_rate = line['learning_rate']\n",
431 | " # 初始化adam优化器\n",
432 | " opt = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999,\n",
433 | " epsilon=None, decay=1e-6, amsgrad=False)\n",
434 | " # 编译模型\n",
435 | " layer_list = line['layer_list']\n",
436 | " if layer_list[-1]['layer_activation_function'] == 'softmax':\n",
437 | " loss = 'sparse_categorical_crossentropy'\n",
438 | " else:\n",
439 | " loss = 'binary_crossentropy'\n",
440 | " print(loss)\n",
441 | " model.compile(loss=loss,\n",
442 | " optimizer=opt,\n",
443 | " metrics=['accuracy'])\n",
444 | "\n",
445 | " history = model.fit(x_train, y_train,epochs=epochs, batch_size=self.batch_size)\n",
446 | " # Score trained model.\n",
447 | " accuracy = model.evaluate(x = x_test,y = y_test)[1]\n",
448 | " line['accuracy'] = accuracy\n",
449 | " line['history'] = history # 训练历史\n",
450 | " print('accuracy:', accuracy)\n",
451 | " # 保存模型\n",
452 | " model_name = line['model_name']\n",
453 | " model_path = os.path.join(save_dir, model_name)\n",
454 | " line['model_path'] = model_path # 每一个模型的路径\n",
455 | " line['is_saved'] = True # 是否保存\n",
456 | " line['model'] = model\n",
457 | " model.save(model_path)\n",
458 | " \n",
459 | " return line\n",
460 | "\n",
461 | " def mutation(self, line, name):\n",
462 | " \"\"\"\n",
463 | " 基因变异\n",
464 | " :param line:\n",
465 | " :return:\n",
466 | " \"\"\"\n",
467 | " offspring1 = copy.deepcopy(line) # 深拷贝!!!!子代1\n",
468 | " offspring1['model_name'] = name\n",
469 | " offspring1['is_saved'] = False\n",
470 | " mutation_choice = [True, False]\n",
471 | " # 子代1变异\n",
472 | " if mutation_choice[random.randint(0, 1)]: # 改变学习率\n",
473 | " print('Mutation Operation: Change learning rate....')\n",
474 | " offspring1['learning_rate'] = self.random_learning_rate() # 学习率\n",
475 | " else: # 改变网络结构\n",
476 | " offspring1 = self.layer_mutation_operation(offspring1)\n",
477 | " offspring1 = self.create_network(offspring1)\n",
478 | " return offspring1\n",
479 | "\n",
480 | " def layer_mutation_operation(self, offspring):\n",
481 | " \"\"\"\n",
482 | " :param offspring: 子代染色体\n",
483 | " :return:\n",
484 | " \"\"\"\n",
485 | " mutation_layer_choice = [0, 1, 2] # 添加,替换,删除\n",
486 | " mutation_layer_choice_name = ['Add', 'Replace', 'Delete']\n",
487 | " layer_name = self.layer_list[random.randint(0, 1)]\n",
488 | " layer_dict = self.create_layer(layer_name)\n",
489 | " choice_index = -1\n",
490 | " if self.threshold < offspring['layer_num'] < self.max_size: # 层数小于最大值且大于临界值,则可以添加和替换和删除\n",
491 | " choice_index = random.randint(0, 2)\n",
492 | " if mutation_layer_choice[choice_index] == 0: # 添加\n",
493 | " insert_index = random.randint(1, len(offspring['layer_list']) - 1) # 插入位置\n",
494 | " offspring['layer_list'].insert(insert_index, layer_dict)\n",
495 | " offspring['layer_num'] += 1\n",
496 | "\n",
497 | " elif mutation_layer_choice[choice_index] == 1: # 替换\n",
498 | " replace_index = random.randint(1, len(offspring['layer_list']) - 1) # 替换位置\n",
499 | " offspring['layer_list'][replace_index] = layer_dict\n",
500 | "\n",
501 | " else: # 删除层\n",
502 | " delete_index = random.randint(1, len(offspring['layer_list']) - 1) # 删除位置\n",
503 | " del offspring['layer_list'][delete_index]\n",
504 | " offspring['layer_num'] -= 1\n",
505 | "\n",
506 | " elif offspring['layer_num'] <= self.threshold: # 小于等于临界值,只能添加或者替换\n",
507 | " choice_index = random.randint(0, 1)\n",
508 | " if mutation_layer_choice[choice_index] == 0: # 添加\n",
509 | " insert_index = random.randint(1, len(offspring['layer_list']) - 1) # 插入位置\n",
510 | " offspring['layer_list'].insert(insert_index, layer_dict)\n",
511 | " offspring['layer_num'] += 1\n",
512 | " else:\n",
513 | " replace_index = random.randint(1, len(offspring['layer_list']) - 1) # 替换位置\n",
514 | " offspring['layer_list'][replace_index] = layer_dict\n",
515 | "\n",
516 | " else: # 层数到达最大值,则只能替换和删除\n",
517 | " choice_index = random.randint(1, 2)\n",
518 | " if mutation_layer_choice[choice_index] == 1: # 替换层\n",
519 | " replace_index = random.randint(1, len(offspring['layer_list']) - 1) # 替换位置\n",
520 | " offspring['layer_list'][replace_index] = layer_dict\n",
521 | "\n",
522 | " else: # 删除层\n",
523 | " delete_index = random.randint(1, len(offspring['layer_list']) - 1) # 删除位置\n",
524 | " del offspring['layer_list'][delete_index]\n",
525 | " offspring['layer_num'] -= 1\n",
526 | " print('Mutation Operation:', mutation_layer_choice_name[choice_index])\n",
527 | " return offspring\n",
528 | "\n",
529 | " def get_best_chromosome(self, father, offspring1, offspring2, epochs):\n",
530 | " \"\"\"\n",
531 | " 比较父代,子代1,子代2的适应度,返回适应度最大的染色体\n",
532 | " :param father:\n",
533 | " :param offspring1:\n",
534 | " :param offspring2:\n",
535 | " :param epochs:\n",
536 | " :return: 返回适应度最高的染色体\n",
537 | " \"\"\"\n",
538 | " print('子代1训练:', epochs)\n",
539 | " offspring1 = self.cal_fitness(offspring1, epochs)\n",
540 | " \n",
541 | " print('子代2训练:', epochs)\n",
542 | " offspring2 = self.cal_fitness(offspring2, epochs)\n",
543 | " \n",
544 | " tmp_lines = [father, offspring1, offspring2]\n",
545 | " sorted_lines = sorted(tmp_lines, key=operator.itemgetter('fitness')) # 按适应度从小到大排序\n",
546 | " return sorted_lines[-1]\n",
547 | "\n",
548 | " def random_learning_rate(self):\n",
549 | " return random.uniform(0.01, 0.02) # 学习率\n",
550 | "\n",
551 | " def random_select(self, ran_fit):\n",
552 | " \"\"\"\n",
553 | " 轮盘赌选择\n",
554 | " 根据概率随机选择的染色体\n",
555 | " :param ran_fit:\n",
556 | " :return:\n",
557 | " \"\"\"\n",
558 | " ran = random.random()\n",
559 | " for i in range(self.rows):\n",
560 | " if ran < ran_fit[i]:\n",
561 | " return i\n",
562 | " \n",
563 | " \n"
564 | ]
565 | },
566 | {
567 | "cell_type": "code",
568 | "execution_count": null,
569 | "metadata": {
570 | "scrolled": false
571 | },
572 | "outputs": [
573 | {
574 | "name": "stdout",
575 | "output_type": "stream",
576 | "text": [
577 | "开始迭代\n"
578 | ]
579 | },
580 | {
581 | "name": "stderr",
582 | "output_type": "stream",
583 | "text": [
584 | "C:\\Users\\SZU\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:203: UserWarning: Update your `MaxPooling2D` call to the Keras 2 API: `MaxPooling2D(pool_size=(2, 2), data_format=\"channels_last\")`\n",
585 | "C:\\Users\\SZU\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:203: UserWarning: Update your `MaxPooling2D` call to the Keras 2 API: `MaxPooling2D(pool_size=(3, 3), data_format=\"channels_last\")`\n"
586 | ]
587 | },
588 | {
589 | "name": "stdout",
590 | "output_type": "stream",
591 | "text": [
592 | "learning_rate: 0.012029467024930782\n",
593 | "layer_num: 4\n",
594 | "重新训练.......\n",
595 | "sparse_categorical_crossentropy\n",
596 | "Epoch 1/1\n",
597 | "22046/22046 [==============================] - 28s 1ms/step - loss: 7.5838 - acc: 0.5212\n",
598 | "5512/5512 [==============================] - 6s 1ms/step\n",
599 | "accuracy: 0.49038461538461536\n",
600 | "learning_rate: 0.015437717269751258\n",
601 | "layer_num: 4\n",
602 | "重新训练.......\n",
603 | "sparse_categorical_crossentropy\n",
604 | "Epoch 1/1\n",
605 | "22046/22046 [==============================] - 22s 1ms/step - loss: 7.7997 - acc: 0.5026\n",
606 | "5512/5512 [==============================] - 6s 1ms/step\n",
607 | "accuracy: 0.49038461538461536\n",
608 | "learning_rate: 0.01007904573168161\n",
609 | "layer_num: 5\n",
610 | "重新训练.......\n",
611 | "binary_crossentropy\n",
612 | "Epoch 1/1\n"
613 | ]
614 | }
615 | ],
616 | "source": [
617 | "# -------------入口函数,开始执行-----------------------------\n",
618 | "\"\"\"\n",
619 | "输入参数的的意义依次为\n",
620 | " self.rows = rows # 染色体个数(即种群大小:101个网络)\n",
621 | " self.times = times # 迭代次数\n",
622 | " self.num_classes = num_classes # 几分类\n",
623 | " self.kill_num = kill_num # 每次迭代杀死的网络\n",
624 | "\"\"\"\n",
625 | "if __name__ == '__main__':\n",
626 | " ga = GeneticAlgorithm(rows=50, times=3, num_classes=2, kill_num=7)\n",
627 | " ga.run()\n"
628 | ]
629 | }
630 | ],
631 | "metadata": {
632 | "kernelspec": {
633 | "display_name": "Python 3",
634 | "language": "python",
635 | "name": "python3"
636 | },
637 | "language_info": {
638 | "codemirror_mode": {
639 | "name": "ipython",
640 | "version": 3
641 | },
642 | "file_extension": ".py",
643 | "mimetype": "text/x-python",
644 | "name": "python",
645 | "nbconvert_exporter": "python",
646 | "pygments_lexer": "ipython3",
647 | "version": "3.7.3"
648 | }
649 | },
650 | "nbformat": 4,
651 | "nbformat_minor": 2
652 | }
653 |
--------------------------------------------------------------------------------
/GAN/.ipynb_checkpoints/GAN-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/MLP/.ipynb_checkpoints/MLP_Text-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 5,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stderr",
10 | "output_type": "stream",
11 | "text": [
12 | "Using TensorFlow backend.\n"
13 | ]
14 | }
15 | ],
16 | "source": [
17 | "import pandas as pd\n",
18 | "import jieba\n",
19 | "import jieba.analyse as analyse\n",
20 | "from keras.preprocessing.text import Tokenizer\n",
21 | "from keras.preprocessing import sequence\n",
22 | "from keras.models import Sequential\n",
23 | "from keras.layers import Dense, Dropout, Activation, Flatten, MaxPool1D, Conv1D\n",
24 | "from keras.layers.embeddings import Embedding\n",
25 | "from keras.utils import multi_gpu_model\n",
26 | "from keras.models import load_model\n",
27 | "from keras import regularizers # 正则化\n",
28 | "import matplotlib.pyplot as plt\n",
29 | "import numpy as np\n",
30 | "from keras.utils import plot_model\n",
31 | "from sklearn.model_selection import train_test_split\n",
32 | "from keras.utils.np_utils import to_categorical\n",
33 | "from sklearn.preprocessing import LabelEncoder\n",
34 | "from keras.layers import BatchNormalization"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 8,
40 | "metadata": {},
41 | "outputs": [
42 | {
43 | "name": "stdout",
44 | "output_type": "stream",
45 | "text": [
46 | " PositionType Job_Description\n",
47 | "0 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,...\n",
48 | "1 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,...\n",
49 | "2 移动开发 \\r\\n 岗位职责: \\r\\n 1.负责安卓客户端应用的框架设计; \\r\\n 2.负责安卓客...\n",
50 | "3 移动开发 \\r\\n 现诚招资深iOS高级软件开发工程师一枚! 【你的工作职责】 1、负责iPhone手...\n",
51 | "4 后端开发 \\r\\n 岗位职责: \\r\\n 1、基于海量交通信息数据的数据仓库建设、数据应用开发。 2、...\n",
52 | "['项目管理', '移动开发', '后端开发', '前端开发', '测试', '高端技术职位', '硬件开发', 'dba', '运维', '企业软件']\n"
53 | ]
54 | }
55 | ],
56 | "source": [
57 | "job_detail_pd = pd.read_csv('job_detail_dataset.csv', encoding='UTF-8')\n",
58 | "print(job_detail_pd.head(5))\n",
59 | "label = list(job_detail_pd['PositionType'].unique()) # 标签\n",
60 | "print(label)"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 9,
66 | "metadata": {},
67 | "outputs": [
68 | {
69 | "data": {
70 | "text/html": [
71 | "
\n",
72 | "\n",
85 | "
\n",
86 | " \n",
87 | " \n",
88 | " | \n",
89 | " PositionType | \n",
90 | " Job_Description | \n",
91 | " label | \n",
92 | "
\n",
93 | " \n",
94 | " \n",
95 | " \n",
96 | " 0 | \n",
97 | " 项目管理 | \n",
98 | " \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... | \n",
99 | " 0 | \n",
100 | "
\n",
101 | " \n",
102 | " 1 | \n",
103 | " 项目管理 | \n",
104 | " \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... | \n",
105 | " 0 | \n",
106 | "
\n",
107 | " \n",
108 | " 2 | \n",
109 | " 移动开发 | \n",
110 | " \\r\\n 岗位职责: \\r\\n 1.负责安卓客户端应用的框架设计; \\r\\n 2.负责安卓客... | \n",
111 | " 1 | \n",
112 | "
\n",
113 | " \n",
114 | " 3 | \n",
115 | " 移动开发 | \n",
116 | " \\r\\n 现诚招资深iOS高级软件开发工程师一枚! 【你的工作职责】 1、负责iPhone手... | \n",
117 | " 1 | \n",
118 | "
\n",
119 | " \n",
120 | " 4 | \n",
121 | " 后端开发 | \n",
122 | " \\r\\n 岗位职责: \\r\\n 1、基于海量交通信息数据的数据仓库建设、数据应用开发。 2、... | \n",
123 | " 2 | \n",
124 | "
\n",
125 | " \n",
126 | "
\n",
127 | "
"
128 | ],
129 | "text/plain": [
130 | " PositionType Job_Description label\n",
131 | "0 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... 0\n",
132 | "1 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... 0\n",
133 | "2 移动开发 \\r\\n 岗位职责: \\r\\n 1.负责安卓客户端应用的框架设计; \\r\\n 2.负责安卓客... 1\n",
134 | "3 移动开发 \\r\\n 现诚招资深iOS高级软件开发工程师一枚! 【你的工作职责】 1、负责iPhone手... 1\n",
135 | "4 后端开发 \\r\\n 岗位职责: \\r\\n 1、基于海量交通信息数据的数据仓库建设、数据应用开发。 2、... 2"
136 | ]
137 | },
138 | "execution_count": 9,
139 | "metadata": {},
140 | "output_type": "execute_result"
141 | }
142 | ],
143 | "source": [
144 | "# 上标签\n",
145 | "def label_dataset(row):\n",
146 | " num_label = label.index(row) # 返回label列表对应值的索引\n",
147 | " return num_label\n",
148 | "\n",
149 | "job_detail_pd['label'] = job_detail_pd['PositionType'].apply(label_dataset)\n",
150 | "job_detail_pd = job_detail_pd.dropna() # 删除空行\n",
151 | "job_detail_pd.head(5)"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 10,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "name": "stderr",
161 | "output_type": "stream",
162 | "text": [
163 | "Building prefix dict from the default dictionary ...\n",
164 | "Loading model from cache /tmp/jieba.cache\n",
165 | "Loading model cost 1.418 seconds.\n",
166 | "Prefix dict has been built succesfully.\n"
167 | ]
168 | },
169 | {
170 | "data": {
171 | "text/html": [
172 | "\n",
173 | "\n",
186 | "
\n",
187 | " \n",
188 | " \n",
189 | " | \n",
190 | " PositionType | \n",
191 | " Job_Description | \n",
192 | " label | \n",
193 | " Job_Description_jieba_cut | \n",
194 | "
\n",
195 | " \n",
196 | " \n",
197 | " \n",
198 | " 0 | \n",
199 | " 项目管理 | \n",
200 | " \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... | \n",
201 | " 0 | \n",
202 | " \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... | \n",
203 | "
\n",
204 | " \n",
205 | " 1 | \n",
206 | " 项目管理 | \n",
207 | " \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... | \n",
208 | " 0 | \n",
209 | " \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... | \n",
210 | "
\n",
211 | " \n",
212 | " 2 | \n",
213 | " 移动开发 | \n",
214 | " \\r\\n 岗位职责: \\r\\n 1.负责安卓客户端应用的框架设计; \\r\\n 2.负责安卓客... | \n",
215 | " 1 | \n",
216 | " \\r\\n 岗位职责 : \\r\\n 1 . 负责 安卓 客户端 应用 的 框架 设... | \n",
217 | "
\n",
218 | " \n",
219 | " 3 | \n",
220 | " 移动开发 | \n",
221 | " \\r\\n 现诚招资深iOS高级软件开发工程师一枚! 【你的工作职责】 1、负责iPhone手... | \n",
222 | " 1 | \n",
223 | " \\r\\n 现 诚招 资深 iOS 高级 软件开发 工程师 一枚 ! 【 你 的 工作... | \n",
224 | "
\n",
225 | " \n",
226 | " 4 | \n",
227 | " 后端开发 | \n",
228 | " \\r\\n 岗位职责: \\r\\n 1、基于海量交通信息数据的数据仓库建设、数据应用开发。 2、... | \n",
229 | " 2 | \n",
230 | " \\r\\n 岗位职责 : \\r\\n 1 、 基于 海量 交通 信息 数据 的 数据... | \n",
231 | "
\n",
232 | " \n",
233 | "
\n",
234 | "
"
235 | ],
236 | "text/plain": [
237 | " PositionType Job_Description label \\\n",
238 | "0 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... 0 \n",
239 | "1 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... 0 \n",
240 | "2 移动开发 \\r\\n 岗位职责: \\r\\n 1.负责安卓客户端应用的框架设计; \\r\\n 2.负责安卓客... 1 \n",
241 | "3 移动开发 \\r\\n 现诚招资深iOS高级软件开发工程师一枚! 【你的工作职责】 1、负责iPhone手... 1 \n",
242 | "4 后端开发 \\r\\n 岗位职责: \\r\\n 1、基于海量交通信息数据的数据仓库建设、数据应用开发。 2、... 2 \n",
243 | "\n",
244 | " Job_Description_jieba_cut \n",
245 | "0 \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... \n",
246 | "1 \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... \n",
247 | "2 \\r\\n 岗位职责 : \\r\\n 1 . 负责 安卓 客户端 应用 的 框架 设... \n",
248 | "3 \\r\\n 现 诚招 资深 iOS 高级 软件开发 工程师 一枚 ! 【 你 的 工作... \n",
249 | "4 \\r\\n 岗位职责 : \\r\\n 1 、 基于 海量 交通 信息 数据 的 数据... "
250 | ]
251 | },
252 | "execution_count": 10,
253 | "metadata": {},
254 | "output_type": "execute_result"
255 | }
256 | ],
257 | "source": [
258 | "# 中文分词\n",
259 | "def chinese_word_cut(row):\n",
260 | " return \" \".join(jieba.cut(row))\n",
261 | "\n",
262 | "job_detail_pd['Job_Description_jieba_cut'] = job_detail_pd.Job_Description.apply(chinese_word_cut)\n",
263 | "job_detail_pd.head(5)"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": 11,
269 | "metadata": {},
270 | "outputs": [
271 | {
272 | "data": {
273 | "text/html": [
274 | "\n",
275 | "\n",
288 | "
\n",
289 | " \n",
290 | " \n",
291 | " | \n",
292 | " PositionType | \n",
293 | " Job_Description | \n",
294 | " label | \n",
295 | " Job_Description_jieba_cut | \n",
296 | " Job_Description_key_word | \n",
297 | "
\n",
298 | " \n",
299 | " \n",
300 | " \n",
301 | " 0 | \n",
302 | " 项目管理 | \n",
303 | " \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... | \n",
304 | " 0 | \n",
305 | " \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... | \n",
306 | " 互联网 体验 用户 产品 优先 运营 熟悉 电商 axure visio 竞品 O2O 岗位... | \n",
307 | "
\n",
308 | " \n",
309 | " 1 | \n",
310 | " 项目管理 | \n",
311 | " \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... | \n",
312 | " 0 | \n",
313 | " \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... | \n",
314 | " 互联网 体验 用户 产品 优先 运营 熟悉 电商 axure visio 竞品 O2O 岗位... | \n",
315 | "
\n",
316 | " \n",
317 | " 2 | \n",
318 | " 移动开发 | \n",
319 | " \\r\\n 岗位职责: \\r\\n 1.负责安卓客户端应用的框架设计; \\r\\n 2.负责安卓客... | \n",
320 | " 1 | \n",
321 | " \\r\\n 岗位职责 : \\r\\n 1 . 负责 安卓 客户端 应用 的 框架 设... | \n",
322 | " Android 安卓 java 客户端 能力 编程 具备 应用 良好 开发 优先 测试人员 ... | \n",
323 | "
\n",
324 | " \n",
325 | " 3 | \n",
326 | " 移动开发 | \n",
327 | " \\r\\n 现诚招资深iOS高级软件开发工程师一枚! 【你的工作职责】 1、负责iPhone手... | \n",
328 | " 1 | \n",
329 | " \\r\\n 现 诚招 资深 iOS 高级 软件开发 工程师 一枚 ! 【 你 的 工作... | \n",
330 | " iOS 开发 应用 技术 素质 用户 适配 平台 iPhone iPad 专业本科 编写程序... | \n",
331 | "
\n",
332 | " \n",
333 | " 4 | \n",
334 | " 后端开发 | \n",
335 | " \\r\\n 岗位职责: \\r\\n 1、基于海量交通信息数据的数据仓库建设、数据应用开发。 2、... | \n",
336 | " 2 | \n",
337 | " \\r\\n 岗位职责 : \\r\\n 1 、 基于 海量 交通 信息 数据 的 数据... | \n",
338 | " 数据仓库 Hadoop Hive Hbase 开发 数据 优先 交通 经验 应用 相关 智能... | \n",
339 | "
\n",
340 | " \n",
341 | "
\n",
342 | "
"
343 | ],
344 | "text/plain": [
345 | " PositionType Job_Description label \\\n",
346 | "0 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... 0 \n",
347 | "1 项目管理 \\r\\n 岗位职责: \\r\\n 1 、熟练使用 axure,visio ,熟悉竞品分析,... 0 \n",
348 | "2 移动开发 \\r\\n 岗位职责: \\r\\n 1.负责安卓客户端应用的框架设计; \\r\\n 2.负责安卓客... 1 \n",
349 | "3 移动开发 \\r\\n 现诚招资深iOS高级软件开发工程师一枚! 【你的工作职责】 1、负责iPhone手... 1 \n",
350 | "4 后端开发 \\r\\n 岗位职责: \\r\\n 1、基于海量交通信息数据的数据仓库建设、数据应用开发。 2、... 2 \n",
351 | "\n",
352 | " Job_Description_jieba_cut \\\n",
353 | "0 \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... \n",
354 | "1 \\r\\n 岗位职责 : \\r\\n 1 、 熟练 使用 axure... \n",
355 | "2 \\r\\n 岗位职责 : \\r\\n 1 . 负责 安卓 客户端 应用 的 框架 设... \n",
356 | "3 \\r\\n 现 诚招 资深 iOS 高级 软件开发 工程师 一枚 ! 【 你 的 工作... \n",
357 | "4 \\r\\n 岗位职责 : \\r\\n 1 、 基于 海量 交通 信息 数据 的 数据... \n",
358 | "\n",
359 | " Job_Description_key_word \n",
360 | "0 互联网 体验 用户 产品 优先 运营 熟悉 电商 axure visio 竞品 O2O 岗位... \n",
361 | "1 互联网 体验 用户 产品 优先 运营 熟悉 电商 axure visio 竞品 O2O 岗位... \n",
362 | "2 Android 安卓 java 客户端 能力 编程 具备 应用 良好 开发 优先 测试人员 ... \n",
363 | "3 iOS 开发 应用 技术 素质 用户 适配 平台 iPhone iPad 专业本科 编写程序... \n",
364 | "4 数据仓库 Hadoop Hive Hbase 开发 数据 优先 交通 经验 应用 相关 智能... "
365 | ]
366 | },
367 | "execution_count": 11,
368 | "metadata": {},
369 | "output_type": "execute_result"
370 | }
371 | ],
372 | "source": [
373 | "# 提取关键词\n",
374 | "def key_word_extract(texts):\n",
375 | " return \" \".join(analyse.extract_tags(texts, topK=50, withWeight=False, allowPOS=()))\n",
376 | "job_detail_pd['Job_Description_key_word'] = job_detail_pd.Job_Description.apply(key_word_extract)\n",
377 | "job_detail_pd.head(5)"
378 | ]
379 | },
380 | {
381 | "cell_type": "markdown",
382 | "metadata": {},
383 | "source": [
384 | "## 训练全过程\n",
385 | "步骤1:读取数据集;\n",
386 | "\n",
387 | "步骤2:建立token字典;\n",
388 | "\n",
389 | "步骤3:使用token字典将“文字”转化为“数字列表”\n",
390 | "\n",
391 | "步骤4:截长补短让所有“数字列表”长度都是100\n",
392 | "\n",
393 | "步骤5:Embedding层将“数字列表”转化为\"向量列表\";\n",
394 | "\n",
395 | "步骤6:将向量列表送入深度学习模型进行训练"
396 | ]
397 | },
398 | {
399 | "cell_type": "code",
400 | "execution_count": null,
401 | "metadata": {},
402 | "outputs": [],
403 | "source": [
404 | "# 建立2000个词的字典\n",
405 | "token = Tokenizer(num_words = 2000) \n",
406 | "token.fit_on_texts(job_detail_pd['Job_Description_key_word']) #按单词出现次数排序,排序前2000的单词会列入词典中"
407 | ]
408 | },
409 | {
410 | "cell_type": "code",
411 | "execution_count": null,
412 | "metadata": {},
413 | "outputs": [],
414 | "source": [
415 | "print(token.document_count) # 查看token读了多少文章\n",
416 | "print(token.word_index) # 查看token读了多少文章"
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": null,
422 | "metadata": {},
423 | "outputs": [],
424 | "source": [
425 | "# 使用token字典将“文字”转化为“数字列表”\n",
426 | "Job_Description_Seq = token.texts_to_sequences(job_detail_pd['Job_Description_key_word'])"
427 | ]
428 | },
429 | {
430 | "cell_type": "code",
431 | "execution_count": null,
432 | "metadata": {},
433 | "outputs": [],
434 | "source": [
435 | "# 截长补短让所有“数字列表”长度都是50\n",
436 | "Job_Description_Seq_Padding = sequence.pad_sequences(Job_Description_Seq, maxlen=50)\n",
437 | "\n",
438 | "x_train = Job_Description_Seq_Padding\n",
439 | "y_train = job_detail_pd['label'].tolist()"
440 | ]
441 | },
442 | {
443 | "cell_type": "code",
444 | "execution_count": null,
445 | "metadata": {},
446 | "outputs": [],
447 | "source": [
448 | "y_train_one_hot_encoded = pd.get_dummies(job_detail_pd['label'], sparse = True)\n",
449 | "print(y_train_one_hot_encoded)"
450 | ]
451 | },
452 | {
453 | "cell_type": "markdown",
454 | "metadata": {},
455 | "source": [
456 | "## 开始训练MLP"
457 | ]
458 | },
459 | {
460 | "cell_type": "code",
461 | "execution_count": null,
462 | "metadata": {},
463 | "outputs": [],
464 | "source": [
465 | "batch_size = 256\n",
466 | "epochs = 5\n",
467 | "model = Sequential()\n",
468 | "model.add(Embedding(output_dim = 32, # 词向量的维度\n",
469 | " input_dim = 2000, # Size of the vocabulary 字典大小\n",
470 | " input_length = 50 # 每个数字列表的长度\n",
471 | " ) \n",
472 | " )\n",
473 | "\n",
474 | "model.add(Dropout(0.2)) \n",
475 | "model.add(Flatten()) # 平展\n",
476 | "model.add(Dense(units = 256,\n",
477 | " activation = \"relu\"))\n",
478 | "model.add(Dropout(0.25))\n",
479 | "model.add(Dense(units = 10,\n",
480 | " activation = \"softmax\"))\n",
481 | "\n",
482 | "print(model.summary()) # 打印模型\n",
483 | "# CPU版本\n",
484 | "model.compile(loss = \"sparse_categorical_crossentropy\", # 多分类\n",
485 | " optimizer = \"adam\",\n",
486 | " metrics = [\"accuracy\"]\n",
487 | " )\n",
488 | "\n",
489 | "history = model.fit(\n",
490 | " x_train, \n",
491 | " y_train, \n",
492 | " batch_size = batch_size,\n",
493 | " epochs = epochs,\n",
494 | " verbose = 2,\n",
495 | " validation_split = 0.2 # 训练集的20%用作验证集\n",
496 | ")\n",
497 | "\n",
498 | "# GPU版本\n",
499 | "\"\"\"\n",
500 | " 如果你的 label 是 one-hot 编码,用 categorical_crossentropy\n",
501 | " one-hot 编码:[0, 0, 1], [1, 0, 0], [0, 1, 0]\n",
502 | " 如果你的 tagets 是 数字编码 ,用 sparse_categorical_crossentropy\n",
503 | " 数字编码:2, 0, 1\n",
504 | " \"\"\"\n",
505 | "# parallel_model = multi_gpu_model(model, gpus=4)\n",
506 | "# parallel_model.compile(loss='sparse_categorical_crossentropy', # 多分类 \n",
507 | "# optimizer='adam',\n",
508 | "# metrics=['accuracy']\n",
509 | "# )\n",
510 | "\n",
511 | "# This `fit` call will be distributed on 4 GPUs.\n",
512 | "# Since the batch size is 50, each GPU will process 32 samples.\n",
513 | "# batch_size = 512\n",
514 | "# epochs = 2\n",
515 | "# history = parallel_model.fit(\n",
516 | "# x_train, \n",
517 | "# y_train,\n",
518 | "# batch_size=batch_size,\n",
519 | "# epochs=epochs,\n",
520 | "# validation_split = 0.2\n",
521 | "# )\n",
522 | "\n"
523 | ]
524 | },
525 | {
526 | "cell_type": "markdown",
527 | "metadata": {},
528 | "source": [
529 | "## 保存模型 & 模型可视化"
530 | ]
531 | },
532 | {
533 | "cell_type": "code",
534 | "execution_count": null,
535 | "metadata": {},
536 | "outputs": [],
537 | "source": [
538 | "from keras.utils import plot_model\n",
539 | "# 保存模型\n",
540 | "model.save('model_MLP_text.h5') # creates a HDF5 file 'my_model.h5'\n",
541 | "# 模型可视化\n",
542 | "plot_model(model, to_file='model_MLP_text.png', show_shapes=True)"
543 | ]
544 | },
545 | {
546 | "cell_type": "markdown",
547 | "metadata": {},
548 | "source": [
549 | "## 模型的预测功能"
550 | ]
551 | },
552 | {
553 | "cell_type": "code",
554 | "execution_count": null,
555 | "metadata": {},
556 | "outputs": [],
557 | "source": [
558 | "from keras.models import load_model\n",
559 | "# 加载模型\n",
560 | "model = load_model('model_MLP_text.h5')\n",
561 | "y_new = model.predict(x_train[0])\n",
562 | "print(y_new)\n",
563 | "print(y_train[0])"
564 | ]
565 | },
566 | {
567 | "cell_type": "markdown",
568 | "metadata": {},
569 | "source": [
570 | "## 训练可视化"
571 | ]
572 | },
573 | {
574 | "cell_type": "code",
575 | "execution_count": null,
576 | "metadata": {},
577 | "outputs": [],
578 | "source": [
579 | "import matplotlib.pyplot as plt\n",
580 | "# 绘制训练 & 验证的准确率值\n",
581 | "plt.plot(history.history['acc'])\n",
582 | "plt.plot(history.history['val_acc'])\n",
583 | "plt.title('Model accuracy')\n",
584 | "plt.ylabel('Accuracy')\n",
585 | "plt.xlabel('Epoch')\n",
586 | "plt.legend(['Train', 'Valid'], loc='upper left')\n",
587 | "plt.show()\n",
588 | "\n",
589 | "# 绘制训练 & 验证的损失值\n",
590 | "plt.plot(history.history['loss'])\n",
591 | "plt.plot(history.history['val_loss'])\n",
592 | "plt.title('Model loss')\n",
593 | "plt.ylabel('Loss')\n",
594 | "plt.xlabel('Epoch')\n",
595 | "plt.legend(['Train', 'Valid'], loc='upper left')\n",
596 | "plt.show()"
597 | ]
598 | }
599 | ],
600 | "metadata": {
601 | "kernelspec": {
602 | "display_name": "Python 3",
603 | "language": "python",
604 | "name": "python3"
605 | },
606 | "language_info": {
607 | "codemirror_mode": {
608 | "name": "ipython",
609 | "version": 3
610 | },
611 | "file_extension": ".py",
612 | "mimetype": "text/x-python",
613 | "name": "python",
614 | "nbconvert_exporter": "python",
615 | "pygments_lexer": "ipython3",
616 | "version": "3.7.2"
617 | }
618 | },
619 | "nbformat": 4,
620 | "nbformat_minor": 2
621 | }
622 |
--------------------------------------------------------------------------------
/Neat/.ipynb_checkpoints/xor-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/Neat/Digraph.gv:
--------------------------------------------------------------------------------
1 | digraph {
2 | node [fontsize=9 height=0.2 shape=circle width=0.2]
3 | A [fillcolor=lightgray shape=box style=filled]
4 | B [fillcolor=lightgray shape=box style=filled]
5 | "A XOR B" [fillcolor=lightblue style=filled]
6 | 101 [fillcolor=white style=filled]
7 | 102 [fillcolor=white style=filled]
8 | 518 [fillcolor=white style=filled]
9 | 1806 [fillcolor=white style=filled]
10 | 1103 [fillcolor=white style=filled]
11 | 344 [fillcolor=white style=filled]
12 | A -> 101 [color=red penwidth=1.014475964605062 style=solid]
13 | B -> 101 [color=red penwidth=0.6039459171968576 style=solid]
14 | 101 -> "A XOR B" [color=red penwidth=0.684020074349466 style=solid]
15 | 102 -> 344 [color=red penwidth=0.39568527424792477 style=dotted]
16 | 1103 -> 102 [color=red penwidth=0.1543632724567388 style=dotted]
17 | B -> "A XOR B" [color=red penwidth=0.29503754516838065 style=solid]
18 | B -> 1806 [color=green penwidth=0.45344458118601894 style=solid]
19 | 1806 -> "A XOR B" [color=green penwidth=0.1072748034717091 style=solid]
20 | A -> "A XOR B" [color=red penwidth=0.2789419808374101 style=solid]
21 | }
22 |
--------------------------------------------------------------------------------
/Neat/Digraph.gv.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
104 |
--------------------------------------------------------------------------------
/Neat/__pycache__/visualize.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/__pycache__/visualize.cpython-36.pyc
--------------------------------------------------------------------------------
/Neat/__pycache__/visualize.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/__pycache__/visualize.cpython-37.pyc
--------------------------------------------------------------------------------
/Neat/config-feedforward:
--------------------------------------------------------------------------------
1 | #--- parameters for the XOR-2 experiment ---#
2 |
3 | [NEAT]
4 | fitness_criterion = max
5 | fitness_threshold = 3.9
6 | pop_size = 150
7 | reset_on_extinction = False
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = sigmoid
12 | activation_mutate_rate = 0.0
13 | activation_options = sigmoid
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 0.5
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.5
35 | conn_delete_prob = 0.5
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 |
41 | feed_forward = True
42 | initial_connection = full
43 |
44 | # node add/remove rates
45 | node_add_prob = 0.2
46 | node_delete_prob = 0.2
47 |
48 | # network parameters
49 | num_hidden = 2
50 | num_inputs = 2
51 | num_outputs = 1
52 |
53 | # node response options
54 | response_init_mean = 1.0
55 | response_init_stdev = 0.0
56 | response_max_value = 30.0
57 | response_min_value = -30.0
58 | response_mutate_power = 0.0
59 | response_mutate_rate = 0.0
60 | response_replace_rate = 0.0
61 |
62 | # connection weight options
63 | weight_init_mean = 0.0
64 | weight_init_stdev = 1.0
65 | weight_max_value = 30
66 | weight_min_value = -30
67 | weight_mutate_power = 0.5
68 | weight_mutate_rate = 0.8
69 | weight_replace_rate = 0.1
70 |
71 | [DefaultSpeciesSet]
72 | compatibility_threshold = 3.0
73 |
74 | [DefaultStagnation]
75 | species_fitness_func = max
76 | max_stagnation = 20
77 | species_elitism = 2
78 |
79 | [DefaultReproduction]
80 | elitism = 2
81 | survival_threshold = 0.2
82 |
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-104:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-104
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-109:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-109
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-114:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-114
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-119:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-119
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-124:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-124
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-14:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-14
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-19:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-19
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-24:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-24
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-29:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-29
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-34:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-34
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-39:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-39
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-4
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-44:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-44
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-49:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-49
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-54:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-54
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-59:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-59
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-64
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-69:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-69
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-74:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-74
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-79:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-79
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-84:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-84
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-89:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-89
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-9:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-9
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-94:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-94
--------------------------------------------------------------------------------
/Neat/neat-checkpoint-99:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Neat/neat-checkpoint-99
--------------------------------------------------------------------------------
/Neat/visualize.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import copy
4 | import warnings
5 |
6 | import graphviz
7 | import matplotlib.pyplot as plt
8 | import numpy as np
9 |
10 |
11 | def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
12 | """ Plots the population's average and best fitness. """
13 | if plt is None:
14 | warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
15 | return
16 |
17 | generation = range(len(statistics.most_fit_genomes))
18 | best_fitness = [c.fitness for c in statistics.most_fit_genomes]
19 | avg_fitness = np.array(statistics.get_fitness_mean())
20 | stdev_fitness = np.array(statistics.get_fitness_stdev())
21 |
22 | plt.plot(generation, avg_fitness, 'b-', label="average")
23 | plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
24 | plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
25 | plt.plot(generation, best_fitness, 'r-', label="best")
26 |
27 | plt.title("Population's average and best fitness")
28 | plt.xlabel("Generations")
29 | plt.ylabel("Fitness")
30 | plt.grid()
31 | plt.legend(loc="best")
32 | if ylog:
33 | plt.gca().set_yscale('symlog')
34 |
35 | plt.savefig(filename)
36 | if view:
37 | plt.show()
38 |
39 | plt.close()
40 |
41 |
42 | def plot_spikes(spikes, view=False, filename=None, title=None):
43 | """ Plots the trains for a single spiking neuron. """
44 | t_values = [t for t, I, v, u, f in spikes]
45 | v_values = [v for t, I, v, u, f in spikes]
46 | u_values = [u for t, I, v, u, f in spikes]
47 | I_values = [I for t, I, v, u, f in spikes]
48 | f_values = [f for t, I, v, u, f in spikes]
49 |
50 | fig = plt.figure()
51 | plt.subplot(4, 1, 1)
52 | plt.ylabel("Potential (mv)")
53 | plt.xlabel("Time (in ms)")
54 | plt.grid()
55 | plt.plot(t_values, v_values, "g-")
56 |
57 | if title is None:
58 | plt.title("Izhikevich's spiking neuron model")
59 | else:
60 | plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
61 |
62 | plt.subplot(4, 1, 2)
63 | plt.ylabel("Fired")
64 | plt.xlabel("Time (in ms)")
65 | plt.grid()
66 | plt.plot(t_values, f_values, "r-")
67 |
68 | plt.subplot(4, 1, 3)
69 | plt.ylabel("Recovery (u)")
70 | plt.xlabel("Time (in ms)")
71 | plt.grid()
72 | plt.plot(t_values, u_values, "r-")
73 |
74 | plt.subplot(4, 1, 4)
75 | plt.ylabel("Current (I)")
76 | plt.xlabel("Time (in ms)")
77 | plt.grid()
78 | plt.plot(t_values, I_values, "r-o")
79 |
80 | if filename is not None:
81 | plt.savefig(filename)
82 |
83 | if view:
84 | plt.show()
85 | plt.close()
86 | fig = None
87 |
88 | return fig
89 |
90 |
91 | def plot_species(statistics, view=False, filename='speciation.svg'):
92 | """ Visualizes speciation throughout evolution. """
93 | if plt is None:
94 | warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
95 | return
96 |
97 | species_sizes = statistics.get_species_sizes()
98 | num_generations = len(species_sizes)
99 | curves = np.array(species_sizes).T
100 |
101 | fig, ax = plt.subplots()
102 | ax.stackplot(range(num_generations), *curves)
103 |
104 | plt.title("Speciation")
105 | plt.ylabel("Size per Species")
106 | plt.xlabel("Generations")
107 |
108 | plt.savefig(filename)
109 |
110 | if view:
111 | plt.show()
112 |
113 | plt.close()
114 |
115 |
116 | def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
117 | node_colors=None, fmt='svg'):
118 | """ Receives a genome and draws a neural network with arbitrary topology. """
119 | # Attributes for network nodes.
120 | if graphviz is None:
121 | warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
122 | return
123 |
124 | if node_names is None:
125 | node_names = {}
126 |
127 | assert type(node_names) is dict
128 |
129 | if node_colors is None:
130 | node_colors = {}
131 |
132 | assert type(node_colors) is dict
133 |
134 | node_attrs = {
135 | 'shape': 'circle',
136 | 'fontsize': '9',
137 | 'height': '0.2',
138 | 'width': '0.2'}
139 |
140 | dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
141 |
142 | inputs = set()
143 | for k in config.genome_config.input_keys:
144 | inputs.add(k)
145 | name = node_names.get(k, str(k))
146 | input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
147 | dot.node(name, _attributes=input_attrs)
148 |
149 | outputs = set()
150 | for k in config.genome_config.output_keys:
151 | outputs.add(k)
152 | name = node_names.get(k, str(k))
153 | node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
154 |
155 | dot.node(name, _attributes=node_attrs)
156 |
157 | if prune_unused:
158 | connections = set()
159 | for cg in genome.connections.values():
160 | if cg.enabled or show_disabled:
161 | connections.add((cg.in_node_id, cg.out_node_id))
162 |
163 | used_nodes = copy.copy(outputs)
164 | pending = copy.copy(outputs)
165 | while pending:
166 | new_pending = set()
167 | for a, b in connections:
168 | if b in pending and a not in used_nodes:
169 | new_pending.add(a)
170 | used_nodes.add(a)
171 | pending = new_pending
172 | else:
173 | used_nodes = set(genome.nodes.keys())
174 |
175 | for n in used_nodes:
176 | if n in inputs or n in outputs:
177 | continue
178 |
179 | attrs = {'style': 'filled',
180 | 'fillcolor': node_colors.get(n, 'white')}
181 | dot.node(str(n), _attributes=attrs)
182 |
183 | for cg in genome.connections.values():
184 | if cg.enabled or show_disabled:
185 | #if cg.input not in used_nodes or cg.output not in used_nodes:
186 | # continue
187 | input, output = cg.key
188 | a = node_names.get(input, str(input))
189 | b = node_names.get(output, str(output))
190 | style = 'solid' if cg.enabled else 'dotted'
191 | color = 'green' if cg.weight > 0 else 'red'
192 | width = str(0.1 + abs(cg.weight / 5.0))
193 | dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
194 |
195 | dot.render(filename, view=view)
196 |
197 | return dot
198 |
--------------------------------------------------------------------------------
/Neat/xor.py:
--------------------------------------------------------------------------------
1 | """
2 | 2-input XOR example -- this is most likely the simplest possible example.
3 | """
4 |
5 | from __future__ import print_function
6 | import os
7 | import neat
8 | import visualize
9 |
10 | # 2-input XOR inputs and expected outputs.
11 | xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
12 | xor_outputs = [ (0.0,), (1.0,), (1.0,), (0.0,)]
13 |
14 |
15 | def eval_genomes(genomes, config):
16 | for genome_id, genome in genomes:
17 | genome.fitness = 4.0
18 | net = neat.nn.FeedForwardNetwork.create(genome, config)
19 | for xi, xo in zip(xor_inputs, xor_outputs):
20 | output = net.activate(xi)
21 | genome.fitness -= (output[0] - xo[0]) ** 2
22 |
23 |
24 | def run(config_file):
25 | # Load configuration.
26 | config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
27 | neat.DefaultSpeciesSet, neat.DefaultStagnation,
28 | config_file)
29 |
30 | # Create the population, which is the top-level object for a NEAT run.
31 | p = neat.Population(config)
32 |
33 | # Add a stdout reporter to show progress in the terminal.
34 | p.add_reporter(neat.StdOutReporter(True))
35 | stats = neat.StatisticsReporter()
36 | p.add_reporter(stats)
37 | p.add_reporter(neat.Checkpointer(5))
38 |
39 | # Run for up to 300 generations.
40 | winner = p.run(eval_genomes, 300)
41 |
42 | # Display the winning genome.
43 | print('\nBest genome:\n{!s}'.format(winner))
44 |
45 | # Show output of the most fit genome against training data.
46 | print('\nOutput:')
47 | winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
48 | for xi, xo in zip(xor_inputs, xor_outputs):
49 | output = winner_net.activate(xi)
50 | print("input {!r}, expected output {!r}, got {!r}".format(xi, xo, output))
51 |
52 | node_names = {-1:'A', -2: 'B', 0:'A XOR B'}
53 | visualize.draw_net(config, winner, True, node_names=node_names)
54 | visualize.plot_stats(stats, ylog=False, view=True)
55 | visualize.plot_species(stats, view=True)
56 |
57 | p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')
58 | p.run(eval_genomes, 10)
59 |
60 |
61 | if __name__ == '__main__':
62 | # Determine path to configuration file. This path manipulation is
63 | # here so that the script will run successfully regardless of the
64 | # current working directory.
65 | local_dir = os.path.dirname(__file__)
66 | config_path = os.path.join(local_dir, 'config-feedforward')
67 | run(config_path)
68 |
--------------------------------------------------------------------------------
/Python_Basis/.ipynb_checkpoints/Basis-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Python基础教学代码---基础篇 \n",
8 | "1.变量赋值 \n",
9 | "2.标准数据类型 \n",
10 | "3.数据转换 \n",
11 | "4.算数运算符 \n",
12 | "5.格式化\n",
13 | "***"
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {},
19 | "source": [
20 | "## 1.变量赋值"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 2,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | "1\n",
33 | "2 2\n"
34 | ]
35 | }
36 | ],
37 | "source": [
38 | "a = 1 # 单变量赋值\n",
39 | "c = b = 2 # 多变量赋值\n",
40 | "print(a)\n",
41 | "print(b, c)"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": 4,
47 | "metadata": {},
48 | "outputs": [
49 | {
50 | "name": "stdout",
51 | "output_type": "stream",
52 | "text": [
53 | "\n",
54 | "\n",
55 | "\n"
56 | ]
57 | }
58 | ],
59 | "source": [
60 | "# 变量类型\n",
61 | "name = 'Chile' # 字符串\n",
62 | "miles = 1000.0 # 浮点型\n",
63 | "num = 100 # 整形\n",
64 | "# 打印变量类型\n",
65 | "print(type(name))\n",
66 | "print(type(miles))\n",
67 | "print(type(num))"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "***\n",
75 | "## 2.标准数据类型 \n",
76 | "\n",
77 | "Python有五个标准的数据类型: \n",
78 | "1.Numbers(数字) \n",
79 | "2.String(字符串) \n",
80 | "3.List(列表) \n",
81 | "4.Tuple(元组) \n",
82 | "5.Dictionary(字典)\n",
83 | "6.Set(集合)\n",
84 | "其中List,Tuple,Dictionary,Set可以放任意数据类型"
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "metadata": {},
90 | "source": [
91 | "### 数字"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 5,
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "# Numbers: int & float\n",
101 | "a = 1 # int\n",
102 | "b = 1.0 # float"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 6,
108 | "metadata": {},
109 | "outputs": [
110 | {
111 | "name": "stdout",
112 | "output_type": "stream",
113 | "text": [
114 | "C\n",
115 | "hi\n",
116 | "ile\n",
117 | "e\n"
118 | ]
119 | }
120 | ],
121 | "source": [
122 | "# String\n",
123 | "my_name = 'Chile'\n",
124 | "print(my_name[0]) # 打印第0个字符\n",
125 | "print(my_name[1: 3]) # 打印第1个到第2个的字符\n",
126 | "print(my_name[2:]) # 打印第2个到最后一个的字符\n",
127 | "print(my_name[-1]) # 打印倒数第一个字符"
128 | ]
129 | },
130 | {
131 | "cell_type": "markdown",
132 | "metadata": {},
133 | "source": [
134 | "### 列表"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": 7,
140 | "metadata": {},
141 | "outputs": [
142 | {
143 | "name": "stdout",
144 | "output_type": "stream",
145 | "text": [
146 | "[1, 2, 3, 4]\n",
147 | "['Chile', 'b', 'c']\n",
148 | "['a', 1, 1.0, [1, 2, 3, 4], ['Chile', 'b', 'c']]\n"
149 | ]
150 | }
151 | ],
152 | "source": [
153 | "# List 可以放任意类型的数据类型\n",
154 | "num_list = [1, 2, 3, 4]\n",
155 | "str_list = ['Chile', 'b', 'c']\n",
156 | "mix_list = ['a', 1, 1.0, num_list, str_list]\n",
157 | "print(num_list)\n",
158 | "print(str_list)\n",
159 | "print(mix_list)"
160 | ]
161 | },
162 | {
163 | "cell_type": "markdown",
164 | "metadata": {},
165 | "source": [
166 | "### 元组"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": 26,
172 | "metadata": {},
173 | "outputs": [
174 | {
175 | "name": "stdout",
176 | "output_type": "stream",
177 | "text": [
178 | "1\n",
179 | "('chile', 111, 2.2, 'a', [1, 2, 3, 4])\n"
180 | ]
181 | },
182 | {
183 | "ename": "TypeError",
184 | "evalue": "'tuple' object does not support item assignment",
185 | "output_type": "error",
186 | "traceback": [
187 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
188 | "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
189 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmix_list\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmix_tuple\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mmix_tuple\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m1\u001b[0m \u001b[0;31m# 不可赋值,否则报错\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
190 | "\u001b[0;31mTypeError\u001b[0m: 'tuple' object does not support item assignment"
191 | ]
192 | }
193 | ],
194 | "source": [
195 | "# Tuple 可以放任意类型的数据类型\n",
196 | "mix_tuple = ('chile', 111, 2.2, 'a', num_list) # 不可赋值\n",
197 | "print(mix_list[1])\n",
198 | "print(mix_tuple)\n",
199 | "mix_tuple[1] = 1 # 不可赋值,否则报错"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "### 字典"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": 9,
212 | "metadata": {},
213 | "outputs": [
214 | {
215 | "name": "stdout",
216 | "output_type": "stream",
217 | "text": [
218 | "{'name': 'Chile', 'age': 18, 'num_list': [1, 2, 3, 4], 'tuple': ('chile', 111, 2.2, 'a', [1, 2, 3, 4])}\n",
219 | "dict_keys(['name', 'age', 'num_list', 'tuple'])\n",
220 | "dict_values(['Chile', 18, [1, 2, 3, 4], ('chile', 111, 2.2, 'a', [1, 2, 3, 4])])\n",
221 | "Chile\n",
222 | "[1, 2, 3, 4]\n",
223 | "('chile', 111, 2.2, 'a', [1, 2, 3, 4])\n"
224 | ]
225 | }
226 | ],
227 | "source": [
228 | "# Dictionary 可以放任意类型的数据类型\n",
229 | "test_dict = {'name': 'Chile', 'age': 18, 'num_list': num_list, 'tuple': mix_tuple}\n",
230 | "print(test_dict)\n",
231 | "print(test_dict.keys()) # 打印键\n",
232 | "print(test_dict.values()) # 打印值\n",
233 | "print(test_dict['name'])\n",
234 | "print(test_dict['num_list'])\n",
235 | "print(test_dict['tuple'])"
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "metadata": {},
241 | "source": [
242 | "### 字典的赋值“陷阱”"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": 15,
248 | "metadata": {},
249 | "outputs": [
250 | {
251 | "name": "stdout",
252 | "output_type": "stream",
253 | "text": [
254 | "alialili\n",
255 | "alialili\n"
256 | ]
257 | }
258 | ],
259 | "source": [
260 | "# 直接字典赋值, 被赋值的字典的值改变,原字典也会改变\n",
261 | "test_dict_copy = test_dict\n",
262 | "test_dict_copy['name'] = 'alialili'\n",
263 | "print(test_dict['name']) \n",
264 | "print(test_dict_copy['name'])"
265 | ]
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": 17,
270 | "metadata": {},
271 | "outputs": [
272 | {
273 | "name": "stdout",
274 | "output_type": "stream",
275 | "text": [
276 | "alialili\n",
277 | "Mary\n"
278 | ]
279 | }
280 | ],
281 | "source": [
282 | "# 使用深拷贝避免这种情况发生\n",
283 | "from copy import deepcopy\n",
284 | "test_dict_copy = deepcopy(test_dict)\n",
285 | "test_dict_copy['name'] = 'Mary'\n",
286 | "print(test_dict['name']) \n",
287 | "print(test_dict_copy['name'])"
288 | ]
289 | },
290 | {
291 | "cell_type": "markdown",
292 | "metadata": {},
293 | "source": [
294 | "### 集合"
295 | ]
296 | },
297 | {
298 | "cell_type": "code",
299 | "execution_count": 30,
300 | "metadata": {},
301 | "outputs": [
302 | {
303 | "name": "stdout",
304 | "output_type": "stream",
305 | "text": [
306 | "{1, 'chile', '1', 'abc'}\n"
307 | ]
308 | }
309 | ],
310 | "source": [
311 | "# 可以放任意类型的基础数据类型\n",
312 | "# Set 集合:与数学意义上的集合意义一致,集合内每一个值都是唯一的\n",
313 | "test_set = {'abc', 1, 1, '1', 'chile'} \n",
314 | "print(test_set) # 打印出来会只有一个数字1,因为集合的去重功能"
315 | ]
316 | },
317 | {
318 | "cell_type": "markdown",
319 | "metadata": {},
320 | "source": [
321 | "***\n",
322 | "## 3.数据类型转换"
323 | ]
324 | },
325 | {
326 | "cell_type": "code",
327 | "execution_count": 12,
328 | "metadata": {},
329 | "outputs": [
330 | {
331 | "name": "stdout",
332 | "output_type": "stream",
333 | "text": [
334 | "\n",
335 | "\n",
336 | "\n",
337 | "\n",
338 | "---------------\n",
339 | "\n",
340 | "\n",
341 | "\n"
342 | ]
343 | }
344 | ],
345 | "source": [
346 | "tr_a = '1'\n",
347 | "int_b = int(tr_a) # 字符串转数字\n",
348 | "str_c = str(int_b) # 数字转字符串\n",
349 | "float_d = float(str_c) # 字符串转浮点\n",
350 | "print(type(tr_a))\n",
351 | "print(type(int_b))\n",
352 | "print(type(str_c))\n",
353 | "print(type(float_d))\n",
354 | "print('---------------')\n",
355 | "tr_list = [1, 2, 3]\n",
356 | "set_a = set(tr_list) # 列表转集合\n",
357 | "list_b = list(set_a) # 集合转列表\n",
358 | "print(type(tr_list))\n",
359 | "print(type(set_a))\n",
360 | "print(type(list_b))"
361 | ]
362 | },
363 | {
364 | "cell_type": "markdown",
365 | "metadata": {},
366 | "source": [
367 | "***\n",
368 | "## 4.算数运算符 "
369 | ]
370 | },
371 | {
372 | "cell_type": "code",
373 | "execution_count": 23,
374 | "metadata": {},
375 | "outputs": [
376 | {
377 | "name": "stdout",
378 | "output_type": "stream",
379 | "text": [
380 | "c: 1\n",
381 | "d: 8\n",
382 | "e: 8.0\n",
383 | "f: 0\n",
384 | "g: 1\n",
385 | "h: 8\n"
386 | ]
387 | }
388 | ],
389 | "source": [
390 | "# 运算符\n",
391 | "a = 2\n",
392 | "b = a + 2\n",
393 | "c = a - 1\n",
394 | "d = a * b\n",
395 | "e = d / c\n",
396 | "f = d % c # 取余\n",
397 | "g = 3 // 2 # 整除(向下取整)\n",
398 | "h = 2**3 # 求幂\n",
399 | "print('c:', c)\n",
400 | "print('d:', d)\n",
401 | "print('e:', e)\n",
402 | "print('f:', f)\n",
403 | "print('g:', g)\n",
404 | "print('h:', h)"
405 | ]
406 | },
407 | {
408 | "cell_type": "markdown",
409 | "metadata": {},
410 | "source": [
411 | "***\n",
412 | "## 5.格式化\n",
413 | "%s 代表字符串 \n",
414 | "%d 代表整数 \n",
415 | "%f 代表浮点 \n",
416 | "%.2f 代表保留小数点后两位"
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": 25,
422 | "metadata": {},
423 | "outputs": [
424 | {
425 | "name": "stdout",
426 | "output_type": "stream",
427 | "text": [
428 | "abc 1, dhfjdhfhdh, Chile, sjdhsjhdhs, skdjskjsk1.000000,sdjsdhs\n",
429 | "abc 1, dhfjdhfhdh, Chile, sjdhsjhdhs, skdjskjsk1.00,sdjsdhs\n"
430 | ]
431 | }
432 | ],
433 | "source": [
434 | "# 格式化\n",
435 | "print('abc %d, dhfjdhfhdh, %s, sjdhsjhdhs, skdjskjsk%f,sdjsdhs' % (1, 'Chile', 1.0))\n",
436 | "# %.2f保留小数点\n",
437 | "print('abc %d, dhfjdhfhdh, %s, sjdhsjhdhs, skdjskjsk%.2f,sdjsdhs' % (1, 'Chile', 1.0))"
438 | ]
439 | }
440 | ],
441 | "metadata": {
442 | "kernelspec": {
443 | "display_name": "Python 3",
444 | "language": "python",
445 | "name": "python3"
446 | },
447 | "language_info": {
448 | "codemirror_mode": {
449 | "name": "ipython",
450 | "version": 3
451 | },
452 | "file_extension": ".py",
453 | "mimetype": "text/x-python",
454 | "name": "python",
455 | "nbconvert_exporter": "python",
456 | "pygments_lexer": "ipython3",
457 | "version": "3.7.3"
458 | }
459 | },
460 | "nbformat": 4,
461 | "nbformat_minor": 2
462 | }
463 |
--------------------------------------------------------------------------------
/Python_Basis/.ipynb_checkpoints/Basis_Advance-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/Python_Basis/.ipynb_checkpoints/Basis_high_ranking-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/Python_Basis/Basis.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Python基础教学代码---基础篇 \n",
8 | "1.变量赋值 \n",
9 | "2.标准数据类型 \n",
10 | "3.数据转换 \n",
11 | "4.算数运算符 \n",
12 | "5.格式化\n",
13 | "***"
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {},
19 | "source": [
20 | "## 1.变量赋值"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 2,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | "1\n",
33 | "2 2\n"
34 | ]
35 | }
36 | ],
37 | "source": [
38 | "a = 1 # 单变量赋值\n",
39 | "c = b = 2 # 多变量赋值\n",
40 | "print(a)\n",
41 | "print(b, c)"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": 4,
47 | "metadata": {},
48 | "outputs": [
49 | {
50 | "name": "stdout",
51 | "output_type": "stream",
52 | "text": [
53 | "\n",
54 | "\n",
55 | "\n"
56 | ]
57 | }
58 | ],
59 | "source": [
60 | "# 变量类型\n",
61 | "name = 'Chile' # 字符串\n",
62 | "miles = 1000.0 # 浮点型\n",
63 | "num = 100 # 整形\n",
64 | "# 打印变量类型\n",
65 | "print(type(name))\n",
66 | "print(type(miles))\n",
67 | "print(type(num))"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "***\n",
75 | "## 2.标准数据类型 \n",
76 | "\n",
77 | "Python有6个标准的数据类型: \n",
78 | "1.Numbers(数字) \n",
79 | "2.String(字符串) \n",
80 | "3.List(列表) \n",
81 | "4.Tuple(元组) \n",
82 | "5.Dictionary(字典) \n",
83 | "6.Set(集合) \n",
84 | "其中List,Tuple,Dictionary,Set可以放任意数据类型"
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "metadata": {},
90 | "source": [
91 | "### 数字"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 5,
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "# Numbers: int & float\n",
101 | "a = 1 # int\n",
102 | "b = 1.0 # float"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 6,
108 | "metadata": {},
109 | "outputs": [
110 | {
111 | "name": "stdout",
112 | "output_type": "stream",
113 | "text": [
114 | "C\n",
115 | "hi\n",
116 | "ile\n",
117 | "e\n"
118 | ]
119 | }
120 | ],
121 | "source": [
122 | "# String\n",
123 | "my_name = 'Chile'\n",
124 | "print(my_name[0]) # 打印第0个字符\n",
125 | "print(my_name[1: 3]) # 打印第1个到第2个的字符\n",
126 | "print(my_name[2:]) # 打印第2个到最后一个的字符\n",
127 | "print(my_name[-1]) # 打印倒数第一个字符"
128 | ]
129 | },
130 | {
131 | "cell_type": "markdown",
132 | "metadata": {},
133 | "source": [
134 | "### 列表"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": 7,
140 | "metadata": {},
141 | "outputs": [
142 | {
143 | "name": "stdout",
144 | "output_type": "stream",
145 | "text": [
146 | "[1, 2, 3, 4]\n",
147 | "['Chile', 'b', 'c']\n",
148 | "['a', 1, 1.0, [1, 2, 3, 4], ['Chile', 'b', 'c']]\n"
149 | ]
150 | }
151 | ],
152 | "source": [
153 | "# List 可以放任意类型的数据类型\n",
154 | "num_list = [1, 2, 3, 4]\n",
155 | "str_list = ['Chile', 'b', 'c']\n",
156 | "mix_list = ['a', 1, 1.0, num_list, str_list]\n",
157 | "print(num_list)\n",
158 | "print(str_list)\n",
159 | "print(mix_list)"
160 | ]
161 | },
162 | {
163 | "cell_type": "markdown",
164 | "metadata": {},
165 | "source": [
166 | "### 元组"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": 26,
172 | "metadata": {},
173 | "outputs": [
174 | {
175 | "name": "stdout",
176 | "output_type": "stream",
177 | "text": [
178 | "1\n",
179 | "('chile', 111, 2.2, 'a', [1, 2, 3, 4])\n"
180 | ]
181 | },
182 | {
183 | "ename": "TypeError",
184 | "evalue": "'tuple' object does not support item assignment",
185 | "output_type": "error",
186 | "traceback": [
187 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
188 | "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
189 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmix_list\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmix_tuple\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mmix_tuple\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m1\u001b[0m \u001b[0;31m# 不可赋值,否则报错\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
190 | "\u001b[0;31mTypeError\u001b[0m: 'tuple' object does not support item assignment"
191 | ]
192 | }
193 | ],
194 | "source": [
195 | "# Tuple 可以放任意类型的数据类型\n",
196 | "mix_tuple = ('chile', 111, 2.2, 'a', num_list) # 不可赋值\n",
197 | "print(mix_list[1])\n",
198 | "print(mix_tuple)\n",
199 | "mix_tuple[1] = 1 # 不可赋值,否则报错"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "### 字典"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": 9,
212 | "metadata": {},
213 | "outputs": [
214 | {
215 | "name": "stdout",
216 | "output_type": "stream",
217 | "text": [
218 | "{'name': 'Chile', 'age': 18, 'num_list': [1, 2, 3, 4], 'tuple': ('chile', 111, 2.2, 'a', [1, 2, 3, 4])}\n",
219 | "dict_keys(['name', 'age', 'num_list', 'tuple'])\n",
220 | "dict_values(['Chile', 18, [1, 2, 3, 4], ('chile', 111, 2.2, 'a', [1, 2, 3, 4])])\n",
221 | "Chile\n",
222 | "[1, 2, 3, 4]\n",
223 | "('chile', 111, 2.2, 'a', [1, 2, 3, 4])\n"
224 | ]
225 | }
226 | ],
227 | "source": [
228 | "# Dictionary 可以放任意类型的数据类型\n",
229 | "test_dict = {'name': 'Chile', 'age': 18, 'num_list': num_list, 'tuple': mix_tuple}\n",
230 | "print(test_dict)\n",
231 | "print(test_dict.keys()) # 打印键\n",
232 | "print(test_dict.values()) # 打印值\n",
233 | "print(test_dict['name'])\n",
234 | "print(test_dict['num_list'])\n",
235 | "print(test_dict['tuple'])"
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "metadata": {},
241 | "source": [
242 | "### 字典的赋值“陷阱”"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": 15,
248 | "metadata": {},
249 | "outputs": [
250 | {
251 | "name": "stdout",
252 | "output_type": "stream",
253 | "text": [
254 | "alialili\n",
255 | "alialili\n"
256 | ]
257 | }
258 | ],
259 | "source": [
260 | "# 直接字典赋值, 被赋值的字典的值改变,原字典也会改变\n",
261 | "test_dict_copy = test_dict\n",
262 | "test_dict_copy['name'] = 'alialili'\n",
263 | "print(test_dict['name']) \n",
264 | "print(test_dict_copy['name'])"
265 | ]
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": 17,
270 | "metadata": {},
271 | "outputs": [
272 | {
273 | "name": "stdout",
274 | "output_type": "stream",
275 | "text": [
276 | "alialili\n",
277 | "Mary\n"
278 | ]
279 | }
280 | ],
281 | "source": [
282 | "# 使用深拷贝避免这种情况发生\n",
283 | "from copy import deepcopy\n",
284 | "test_dict_copy = deepcopy(test_dict)\n",
285 | "test_dict_copy['name'] = 'Mary'\n",
286 | "print(test_dict['name']) \n",
287 | "print(test_dict_copy['name'])"
288 | ]
289 | },
290 | {
291 | "cell_type": "markdown",
292 | "metadata": {},
293 | "source": [
294 | "### 集合"
295 | ]
296 | },
297 | {
298 | "cell_type": "code",
299 | "execution_count": 30,
300 | "metadata": {},
301 | "outputs": [
302 | {
303 | "name": "stdout",
304 | "output_type": "stream",
305 | "text": [
306 | "{1, 'chile', '1', 'abc'}\n"
307 | ]
308 | }
309 | ],
310 | "source": [
311 | "# 可以放任意类型的基础数据类型\n",
312 | "# Set 集合:与数学意义上的集合意义一致,集合内每一个值都是唯一的\n",
313 | "test_set = {'abc', 1, 1, '1', 'chile'} \n",
314 | "print(test_set) # 打印出来会只有一个数字1,因为集合的去重功能"
315 | ]
316 | },
317 | {
318 | "cell_type": "markdown",
319 | "metadata": {},
320 | "source": [
321 | "***\n",
322 | "## 3.数据类型转换"
323 | ]
324 | },
325 | {
326 | "cell_type": "code",
327 | "execution_count": 12,
328 | "metadata": {},
329 | "outputs": [
330 | {
331 | "name": "stdout",
332 | "output_type": "stream",
333 | "text": [
334 | "\n",
335 | "\n",
336 | "\n",
337 | "\n",
338 | "---------------\n",
339 | "\n",
340 | "\n",
341 | "\n"
342 | ]
343 | }
344 | ],
345 | "source": [
346 | "tr_a = '1'\n",
347 | "int_b = int(tr_a) # 字符串转数字\n",
348 | "str_c = str(int_b) # 数字转字符串\n",
349 | "float_d = float(str_c) # 字符串转浮点\n",
350 | "print(type(tr_a))\n",
351 | "print(type(int_b))\n",
352 | "print(type(str_c))\n",
353 | "print(type(float_d))\n",
354 | "print('---------------')\n",
355 | "tr_list = [1, 2, 3]\n",
356 | "set_a = set(tr_list) # 列表转集合\n",
357 | "list_b = list(set_a) # 集合转列表\n",
358 | "print(type(tr_list))\n",
359 | "print(type(set_a))\n",
360 | "print(type(list_b))"
361 | ]
362 | },
363 | {
364 | "cell_type": "markdown",
365 | "metadata": {},
366 | "source": [
367 | "***\n",
368 | "## 4.算数运算符 "
369 | ]
370 | },
371 | {
372 | "cell_type": "code",
373 | "execution_count": 23,
374 | "metadata": {},
375 | "outputs": [
376 | {
377 | "name": "stdout",
378 | "output_type": "stream",
379 | "text": [
380 | "c: 1\n",
381 | "d: 8\n",
382 | "e: 8.0\n",
383 | "f: 0\n",
384 | "g: 1\n",
385 | "h: 8\n"
386 | ]
387 | }
388 | ],
389 | "source": [
390 | "# 运算符\n",
391 | "a = 2\n",
392 | "b = a + 2\n",
393 | "c = a - 1\n",
394 | "d = a * b\n",
395 | "e = d / c\n",
396 | "f = d % c # 取余\n",
397 | "g = 3 // 2 # 整除(向下取整)\n",
398 | "h = 2**3 # 求幂\n",
399 | "print('c:', c)\n",
400 | "print('d:', d)\n",
401 | "print('e:', e)\n",
402 | "print('f:', f)\n",
403 | "print('g:', g)\n",
404 | "print('h:', h)"
405 | ]
406 | },
407 | {
408 | "cell_type": "markdown",
409 | "metadata": {},
410 | "source": [
411 | "***\n",
412 | "## 5.格式化\n",
413 | "%s 代表字符串 \n",
414 | "%d 代表整数 \n",
415 | "%f 代表浮点 \n",
416 | "%.2f 代表保留小数点后两位"
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": 25,
422 | "metadata": {},
423 | "outputs": [
424 | {
425 | "name": "stdout",
426 | "output_type": "stream",
427 | "text": [
428 | "abc 1, dhfjdhfhdh, Chile, sjdhsjhdhs, skdjskjsk1.000000,sdjsdhs\n",
429 | "abc 1, dhfjdhfhdh, Chile, sjdhsjhdhs, skdjskjsk1.00,sdjsdhs\n"
430 | ]
431 | }
432 | ],
433 | "source": [
434 | "# 格式化\n",
435 | "print('abc %d, dhfjdhfhdh, %s, sjdhsjhdhs, skdjskjsk%f,sdjsdhs' % (1, 'Chile', 1.0))\n",
436 | "# %.2f保留小数点\n",
437 | "print('abc %d, dhfjdhfhdh, %s, sjdhsjhdhs, skdjskjsk%.2f,sdjsdhs' % (1, 'Chile', 1.0))"
438 | ]
439 | }
440 | ],
441 | "metadata": {
442 | "kernelspec": {
443 | "display_name": "Python 3",
444 | "language": "python",
445 | "name": "python3"
446 | },
447 | "language_info": {
448 | "codemirror_mode": {
449 | "name": "ipython",
450 | "version": 3
451 | },
452 | "file_extension": ".py",
453 | "mimetype": "text/x-python",
454 | "name": "python",
455 | "nbconvert_exporter": "python",
456 | "pygments_lexer": "ipython3",
457 | "version": "3.7.3"
458 | }
459 | },
460 | "nbformat": 4,
461 | "nbformat_minor": 2
462 | }
463 |
--------------------------------------------------------------------------------
/Python_Basis/Basis_Advance.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Python基础教学代码---进阶篇 \n",
8 | "1.循环 \n",
9 | "2.条件语句:if \n",
10 | "3.文件I/O \n",
11 | "4.异常处理 \n",
12 | "5.导包\n",
13 | "***"
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {},
19 | "source": [
20 | "## 1.循环 \n",
21 | "两种循环: \n",
22 | "1.1for 循环 \n",
23 | "1.2while 循环 "
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "metadata": {},
29 | "source": [
30 | "### for 循环打印List(列表)"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 5,
36 | "metadata": {},
37 | "outputs": [
38 | {
39 | "name": "stdout",
40 | "output_type": "stream",
41 | "text": [
42 | "第1种循环取值方式:直接取值\n",
43 | "Chile\n",
44 | "b\n",
45 | "c\n",
46 | "--------------------------\n",
47 | "第2种循环取值方式:索引取值\n",
48 | "Chile\n",
49 | "b\n",
50 | "c\n"
51 | ]
52 | }
53 | ],
54 | "source": [
55 | "str_list = ['Chile', 'b', 'c']\n",
56 | "\n",
57 | "print('第1种循环取值方式:直接取值')\n",
58 | "for sub_str in str_list:\n",
59 | " print(sub_str)\n",
60 | "\n",
61 | "print('--------------------------')\n",
62 | "print('第2种循环取值方式:索引取值') \n",
63 | "for i in range(len(str_list)):\n",
64 | " print(str_list[i])"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "### while 循环打印List(列表)"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 8,
77 | "metadata": {},
78 | "outputs": [
79 | {
80 | "name": "stdout",
81 | "output_type": "stream",
82 | "text": [
83 | "Chile\n",
84 | "b\n",
85 | "c\n"
86 | ]
87 | }
88 | ],
89 | "source": [
90 | "str_list = ['Chile', 'b', 'c']\n",
91 | "i = 0\n",
92 | "while i < len(str_list):\n",
93 | " print(str_list[i])\n",
94 | " i += 1\n"
95 | ]
96 | },
97 | {
98 | "cell_type": "markdown",
99 | "metadata": {},
100 | "source": [
101 | "### for 循环打印Tuple(元组)"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": 10,
107 | "metadata": {},
108 | "outputs": [
109 | {
110 | "name": "stdout",
111 | "output_type": "stream",
112 | "text": [
113 | "第1种循环取值方式:直接取值\n",
114 | "chile\n",
115 | "111\n",
116 | "2.2\n",
117 | "a\n",
118 | "['Chile', 'b', 'c']\n",
119 | "--------------------------\n",
120 | "第2种循环取值方式:索引取值\n",
121 | "chile\n",
122 | "111\n",
123 | "2.2\n",
124 | "a\n",
125 | "['Chile', 'b', 'c']\n"
126 | ]
127 | }
128 | ],
129 | "source": [
130 | "str_list = ['Chile', 'b', 'c']\n",
131 | "mix_tuple = ('chile', 111, 2.2, 'a', str_list) # 不可赋值\n",
132 | "print('第1种循环取值方式:直接取值')\n",
133 | "for sub_tuple in mix_tuple:\n",
134 | " print(sub_tuple)\n",
135 | "\n",
136 | "print('--------------------------')\n",
137 | "print('第2种循环取值方式:索引取值') \n",
138 | "for i in range(len(mix_tuple)):\n",
139 | " print(mix_tuple[i])"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {},
145 | "source": [
146 | "### while 循环打印Tuple(元组)"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": 13,
152 | "metadata": {},
153 | "outputs": [
154 | {
155 | "name": "stdout",
156 | "output_type": "stream",
157 | "text": [
158 | "chile\n",
159 | "111\n",
160 | "2.2\n",
161 | "a\n",
162 | "['Chile', 'b', 'c']\n"
163 | ]
164 | }
165 | ],
166 | "source": [
167 | "str_list = ['Chile', 'b', 'c']\n",
168 | "mix_tuple = ('chile', 111, 2.2, 'a', str_list) # 不可赋值\n",
169 | "i = 0\n",
170 | "while i < len(mix_tuple):\n",
171 | " print(mix_tuple[i])\n",
172 | " i += 1"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "metadata": {},
178 | "source": [
179 | "### for 循环打印Dictionary(字典)"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": 15,
185 | "metadata": {},
186 | "outputs": [
187 | {
188 | "name": "stdout",
189 | "output_type": "stream",
190 | "text": [
191 | "key: name\n",
192 | "value: Chile\n",
193 | "-------------\n",
194 | "key: age\n",
195 | "value: 18\n",
196 | "-------------\n",
197 | "key: num_list\n",
198 | "value: [1, 2, 3, 4]\n",
199 | "-------------\n",
200 | "key: tuple\n",
201 | "value: ('chile', 111, 2.2, 'a', ['Chile', 'b', 'c'])\n",
202 | "-------------\n"
203 | ]
204 | }
205 | ],
206 | "source": [
207 | "str_list = ['Chile', 'b', 'c']\n",
208 | "mix_tuple = ('chile', 111, 2.2, 'a', str_list) # 不可赋值\n",
209 | "num_list = [1, 2, 3, 4]\n",
210 | "test_dict = {'name': 'Chile', 'age': 18, 'num_list': num_list, 'tuple': mix_tuple}\n",
211 | "for key in test_dict.keys(): # 键值对打印法\n",
212 | " print('key:', key)\n",
213 | " print('value:', test_dict[key])\n",
214 | " print('-------------')\n",
215 | " "
216 | ]
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "metadata": {},
221 | "source": [
222 | "### for 循环打印Set(集合)"
223 | ]
224 | },
225 | {
226 | "cell_type": "code",
227 | "execution_count": 22,
228 | "metadata": {},
229 | "outputs": [
230 | {
231 | "name": "stdout",
232 | "output_type": "stream",
233 | "text": [
234 | "abc \n",
235 | "1 \n",
236 | "1 \n",
237 | "chile \n"
238 | ]
239 | }
240 | ],
241 | "source": [
242 | "test_set = {'abc', 1, 1, '1', 'chile'} \n",
243 | "for value in test_set:\n",
244 | " print(value, ' ', type(value))"
245 | ]
246 | },
247 | {
248 | "cell_type": "markdown",
249 | "metadata": {},
250 | "source": [
251 | "## 2.条件语句:if\n",
252 | "2.1 == : 恒等符号 \n",
253 | "2.2 != : 不等符号 \n",
254 | "2.3 > :大于号 \n",
255 | "2.4 < :小于号 \n",
256 | "2.5 >=:大于等于号 \n",
257 | "2.6 <=:小于等于号 \n",
258 | "2.7 and:与 \n",
259 | "2.8 or:或 \n",
260 | "2.9 not:非 "
261 | ]
262 | },
263 | {
264 | "cell_type": "code",
265 | "execution_count": 16,
266 | "metadata": {},
267 | "outputs": [
268 | {
269 | "name": "stdout",
270 | "output_type": "stream",
271 | "text": [
272 | "a != b\n"
273 | ]
274 | }
275 | ],
276 | "source": [
277 | "a = 1 # 数字\n",
278 | "b = '1' # 字符串\n",
279 | "if a == b:\n",
280 | " print('a == b')\n",
281 | "else:\n",
282 | " print('a != b')"
283 | ]
284 | },
285 | {
286 | "cell_type": "code",
287 | "execution_count": 17,
288 | "metadata": {},
289 | "outputs": [
290 | {
291 | "name": "stdout",
292 | "output_type": "stream",
293 | "text": [
294 | "a < b\n"
295 | ]
296 | }
297 | ],
298 | "source": [
299 | "a = 1\n",
300 | "b = 2\n",
301 | "if a > b:\n",
302 | " print('a > b')\n",
303 | "elif a < b:\n",
304 | " print('a < b')\n",
305 | "else:\n",
306 | " print('a == b')"
307 | ]
308 | },
309 | {
310 | "cell_type": "code",
311 | "execution_count": 23,
312 | "metadata": {},
313 | "outputs": [
314 | {
315 | "name": "stdout",
316 | "output_type": "stream",
317 | "text": [
318 | "False\n",
319 | "True\n",
320 | "True\n"
321 | ]
322 | }
323 | ],
324 | "source": [
325 | "a = True\n",
326 | "b = False\n",
327 | "if a and b:\n",
328 | " print('True')\n",
329 | "else:\n",
330 | " print('False')\n",
331 | " \n",
332 | "if a or b:\n",
333 | " print('True')\n",
334 | "else:\n",
335 | " print('False')\n",
336 | "\n",
337 | "if a and (not b):\n",
338 | " print('True')\n",
339 | "else:\n",
340 | " print('False')\n",
341 | " "
342 | ]
343 | },
344 | {
345 | "cell_type": "markdown",
346 | "metadata": {},
347 | "source": [
348 | "# 3.文件I/O"
349 | ]
350 | },
351 | {
352 | "cell_type": "markdown",
353 | "metadata": {},
354 | "source": [
355 | "权限: \n",
356 | "w: 写权限 \n",
357 | "r:读权限 \n",
358 | "a:在原有文本的基础上追加文本的权限 \n",
359 | "\n",
360 | "互联网上的文件有非常多的格式,这里只是举个例子让大家感受下。至于更多格式的读写, \n",
361 | "小伙伴们可以通过互联网去搜索,Python是兼容很多文件格式的读写,代码风格都差不多。"
362 | ]
363 | },
364 | {
365 | "cell_type": "code",
366 | "execution_count": 17,
367 | "metadata": {},
368 | "outputs": [],
369 | "source": [
370 | "with open('text.txt', 'w') as fw: # 只有文件名,默认文件在统计目录\n",
371 | " string = 'I am chile!'\n",
372 | " for i in range(5):\n",
373 | " fw.write(string + '\\n') "
374 | ]
375 | },
376 | {
377 | "cell_type": "code",
378 | "execution_count": 7,
379 | "metadata": {},
380 | "outputs": [
381 | {
382 | "name": "stdout",
383 | "output_type": "stream",
384 | "text": [
385 | "I am chile!\n",
386 | "\n",
387 | "I am chile!\n",
388 | "\n",
389 | "I am chile!\n",
390 | "\n",
391 | "I am chile!\n",
392 | "\n",
393 | "I am chile!\n",
394 | "\n"
395 | ]
396 | }
397 | ],
398 | "source": [
399 | "with open('text.txt', 'r') as fr:\n",
400 | " for line in fr:\n",
401 | " print(line)"
402 | ]
403 | },
404 | {
405 | "cell_type": "code",
406 | "execution_count": 8,
407 | "metadata": {},
408 | "outputs": [],
409 | "source": [
410 | "with open('text.txt', 'a') as fw:\n",
411 | " string = 'You are handsome!'\n",
412 | " for i in range(5):\n",
413 | " fw.write(string + '\\n') "
414 | ]
415 | },
416 | {
417 | "cell_type": "code",
418 | "execution_count": 9,
419 | "metadata": {},
420 | "outputs": [
421 | {
422 | "name": "stdout",
423 | "output_type": "stream",
424 | "text": [
425 | "I am chile!\n",
426 | "\n",
427 | "I am chile!\n",
428 | "\n",
429 | "I am chile!\n",
430 | "\n",
431 | "I am chile!\n",
432 | "\n",
433 | "I am chile!\n",
434 | "\n",
435 | "You are handsome!\n",
436 | "\n",
437 | "You are handsome!\n",
438 | "\n",
439 | "You are handsome!\n",
440 | "\n",
441 | "You are handsome!\n",
442 | "\n",
443 | "You are handsome!\n",
444 | "\n"
445 | ]
446 | }
447 | ],
448 | "source": [
449 | "with open('text.txt', 'r') as fr:\n",
450 | " for line in fr:\n",
451 | " print(line) "
452 | ]
453 | },
454 | {
455 | "cell_type": "markdown",
456 | "metadata": {},
457 | "source": [
458 | "# 4.异常\n",
459 | "\n",
460 | "try: \n",
461 | " 执行正常代码。 \n",
462 | " \n",
463 | "except: \n",
464 | " 发生异常,执行此处代码 \n",
465 | " \n",
466 | "else:(这段代码可不加) \n",
467 | " 无异常,则执行此处代码 \n"
468 | ]
469 | },
470 | {
471 | "cell_type": "code",
472 | "execution_count": 12,
473 | "metadata": {},
474 | "outputs": [
475 | {
476 | "name": "stdout",
477 | "output_type": "stream",
478 | "text": [
479 | "The file does not exist!\n"
480 | ]
481 | }
482 | ],
483 | "source": [
484 | "try:\n",
485 | " with open('txr.txt', 'r') as fr:\n",
486 | " text = fr.read()\n",
487 | "except IOError:\n",
488 | " print('The file does not exist!')\n",
489 | "\n",
490 | "else:\n",
491 | " print('Succeed')\n"
492 | ]
493 | },
494 | {
495 | "cell_type": "markdown",
496 | "metadata": {},
497 | "source": [
498 | "## 异常处理的目的 \n",
499 | "小伙伴们一开始写Python的时候可能并不需要的异常处理机制,因为我们的代码简洁又高效,不过这并不代表你永远不需要。 \n",
500 | "现代软件是非常庞大的,而代码又是人写的,难免会出错,你不知道一个大型软件在运行过程中会在什么时候出现一个bug,这时候 \n",
501 | "异常处理机制就能让你快速定位自己软件的bug,缩短我们调试的时间,这就是异常处理机制的用途。"
502 | ]
503 | },
504 | {
505 | "cell_type": "markdown",
506 | "metadata": {},
507 | "source": [
508 | "# 5.导包 \n",
509 | "1.本地包 \n",
510 | "2.系统包 \n",
511 | " \n",
512 | "导包指令: \n",
513 | "1.import \n",
514 | "2.from ... import ..."
515 | ]
516 | },
517 | {
518 | "cell_type": "markdown",
519 | "metadata": {},
520 | "source": [
521 | "## 导入本地包"
522 | ]
523 | },
524 | {
525 | "cell_type": "code",
526 | "execution_count": 13,
527 | "metadata": {},
528 | "outputs": [
529 | {
530 | "name": "stdout",
531 | "output_type": "stream",
532 | "text": [
533 | "hello!\n"
534 | ]
535 | }
536 | ],
537 | "source": [
538 | "from test import hello\n",
539 | "hello()"
540 | ]
541 | },
542 | {
543 | "cell_type": "code",
544 | "execution_count": 14,
545 | "metadata": {},
546 | "outputs": [
547 | {
548 | "name": "stdout",
549 | "output_type": "stream",
550 | "text": [
551 | "hello!\n"
552 | ]
553 | }
554 | ],
555 | "source": [
556 | "import test\n",
557 | "test.hello()"
558 | ]
559 | },
560 | {
561 | "cell_type": "markdown",
562 | "metadata": {},
563 | "source": [
564 | "## 导入系统包"
565 | ]
566 | },
567 | {
568 | "cell_type": "code",
569 | "execution_count": 16,
570 | "metadata": {},
571 | "outputs": [
572 | {
573 | "name": "stdout",
574 | "output_type": "stream",
575 | "text": [
576 | "2019-08-21 09:43:07\n"
577 | ]
578 | }
579 | ],
580 | "source": [
581 | "import time # 引入时间模块\n",
582 | "# 格式化成year-month-day hour:min:sec形式\n",
583 | "print (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())) "
584 | ]
585 | },
586 | {
587 | "cell_type": "markdown",
588 | "metadata": {},
589 | "source": [
590 | "## 导包的目的 \n",
591 | "Python之所以被称为胶水语言,是因为它有很多优秀的第三方包,让我们在编程过程中只关注任务的解决,而不拘泥于代码的繁琐, \n",
592 | "提升代码的复用率,加快编程速度。因此,导包是Python不可或缺的重要技能。"
593 | ]
594 | }
595 | ],
596 | "metadata": {
597 | "kernelspec": {
598 | "display_name": "Python 3",
599 | "language": "python",
600 | "name": "python3"
601 | },
602 | "language_info": {
603 | "codemirror_mode": {
604 | "name": "ipython",
605 | "version": 3
606 | },
607 | "file_extension": ".py",
608 | "mimetype": "text/x-python",
609 | "name": "python",
610 | "nbconvert_exporter": "python",
611 | "pygments_lexer": "ipython3",
612 | "version": "3.7.3"
613 | }
614 | },
615 | "nbformat": 4,
616 | "nbformat_minor": 2
617 | }
618 |
--------------------------------------------------------------------------------
/Python_Basis/__pycache__/test.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChileWang0228/DeepLearningTutorial/81a68bdba63c6a08f5c0c8c6289364ebdbc0b0c6/Python_Basis/__pycache__/test.cpython-37.pyc
--------------------------------------------------------------------------------
/Python_Basis/test.py:
--------------------------------------------------------------------------------
1 | def hello():
2 | print('hello!')
3 |
4 |
--------------------------------------------------------------------------------
/Python_Basis/text.txt:
--------------------------------------------------------------------------------
1 | I am chile!
2 | I am chile!
3 | I am chile!
4 | I am chile!
5 | I am chile!
6 |
--------------------------------------------------------------------------------
/Python_Basis/tx.txt:
--------------------------------------------------------------------------------
1 | Add some words.
--------------------------------------------------------------------------------
/Transfer_learning/.ipynb_checkpoints/Transfer_learning_cnn_image-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stderr",
10 | "output_type": "stream",
11 | "text": [
12 | "Using TensorFlow backend.\n"
13 | ]
14 | }
15 | ],
16 | "source": [
17 | "from tensorflow.python.keras.utils import get_file\n",
18 | "import gzip\n",
19 | "import numpy as np\n",
20 | "import keras\n",
21 | "from keras.datasets import cifar10\n",
22 | "from keras.preprocessing.image import ImageDataGenerator\n",
23 | "from keras.models import Sequential, Model\n",
24 | "from keras.layers import Dense, Dropout, Activation, Flatten\n",
25 | "from keras.layers import Conv2D, MaxPooling2D\n",
26 | "import os\n",
27 | "from keras import applications\n",
28 | "import cv2\n",
29 | "import functools\n",
30 | "from keras.models import load_model\n",
31 | "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" # 使用第2个GPU"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 2,
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "# 数据集与代码放在一起即可\n",
41 | "def load_data():\n",
42 | " paths = [\n",
43 | " 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n",
44 | " 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n",
45 | " ]\n",
46 | "\n",
47 | " with gzip.open(paths[0], 'rb') as lbpath:\n",
48 | " y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n",
49 | "\n",
50 | " with gzip.open(paths[1], 'rb') as imgpath:\n",
51 | " x_train = np.frombuffer(\n",
52 | " imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28, 1)\n",
53 | "\n",
54 | " with gzip.open(paths[2], 'rb') as lbpath:\n",
55 | " y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n",
56 | "\n",
57 | " with gzip.open(paths[3], 'rb') as imgpath:\n",
58 | " x_test = np.frombuffer(\n",
59 | " imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28, 1)\n",
60 | "\n",
61 | " return (x_train, y_train), (x_test, y_test)"
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "## 读取数据与数据预处理"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 6,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | " # read dataset\n",
78 | "(x_train, y_train), (x_test, y_test) = load_data()\n",
79 | "batch_size = 32\n",
80 | "num_classes = 10\n",
81 | "epochs = 5\n",
82 | "data_augmentation = True # 图像增强\n",
83 | "num_predictions = 20\n",
84 | "save_dir = os.path.join(os.getcwd(), 'saved_models_transfer_learning')\n",
85 | "model_name = 'keras_fashion_transfer_learning_trained_model.h5'\n",
86 | "\n",
87 | "\n",
88 | "# Convert class vectors to binary class matrices. 将类别弄成独热编码\n",
89 | "y_train = keras.utils.to_categorical(y_train, num_classes)\n",
90 | "y_test = keras.utils.to_categorical(y_test, num_classes)\n",
91 | "\n",
92 | "\n",
93 | "# x_train = x_train.astype('float32')\n",
94 | "# x_test = x_test.astype('float32')\n",
95 | "# 由于mist的输入数据维度是(num, 28, 28),vgg16 需要三维图像,因为扩充一下mnist的最后一维\n",
96 | "X_train = [cv2.cvtColor(cv2.resize(i, (48, 48)), cv2.COLOR_GRAY2RGB) for i in x_train]\n",
97 | "X_test = [cv2.cvtColor(cv2.resize(i, (48, 48)), cv2.COLOR_GRAY2RGB) for i in x_test]\n",
98 | "\n",
99 | "x_train = np.asarray(X_train)\n",
100 | "x_test = np.asarray(X_test)\n",
101 | "\n",
102 | "x_train = x_train.astype('float32')\n",
103 | "x_test = x_test.astype('float32')\n",
104 | "\n",
105 | "x_train /= 255 # 归一化\n",
106 | "x_test /= 255 # 归一化\n"
107 | ]
108 | },
109 | {
110 | "cell_type": "markdown",
111 | "metadata": {},
112 | "source": [
113 | "## 迁移学习建模"
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": 7,
119 | "metadata": {},
120 | "outputs": [
121 | {
122 | "name": "stdout",
123 | "output_type": "stream",
124 | "text": [
125 | "(48, 48, 3)\n",
126 | "Tensor(\"block5_pool_1/MaxPool:0\", shape=(?, 1, 1, 512), dtype=float32)\n"
127 | ]
128 | }
129 | ],
130 | "source": [
131 | "# 使用VGG16模型\n",
132 | "base_model = applications.VGG16(include_top=False, weights='imagenet', input_shape=x_train.shape[1:]) # 第一层需要指出图像的大小\n",
133 | "\n",
134 | "# # path to the model weights files.\n",
135 | "# top_model_weights_path = 'bottleneck_fc_model.h5'\n",
136 | "print(x_train.shape[1:])\n",
137 | "model = Sequential()\n",
138 | "print(base_model.output)\n",
139 | "model.add(Flatten(input_shape=base_model.output_shape[1:]))\n",
140 | "model.add(Dense(256, activation='relu'))\n",
141 | "model.add(Dropout(0.5))\n",
142 | "model.add(Dense(num_classes))\n",
143 | "model.add(Activation('softmax'))\n",
144 | "\n",
145 | "# add the model on top of the convolutional base\n",
146 | "model = Model(inputs=base_model.input, outputs=model(base_model.output)) # VGG16模型与自己构建的模型合并\n",
147 | " \n",
148 | "for layer in model.layers[:15]:\n",
149 | " layer.trainable = False\n",
150 | "\n",
151 | "# initiate RMSprop optimizer\n",
152 | "opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)\n",
153 | "\n",
154 | "# Let's train the model using RMSprop\n",
155 | "model.compile(loss='categorical_crossentropy',\n",
156 | " optimizer=opt,\n",
157 | " metrics=['accuracy'])"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "## 训练"
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": 8,
170 | "metadata": {
171 | "scrolled": true
172 | },
173 | "outputs": [
174 | {
175 | "name": "stdout",
176 | "output_type": "stream",
177 | "text": [
178 | "Using real-time data augmentation.\n",
179 | "1875\n",
180 | "1875.0\n",
181 | "Epoch 1/5\n",
182 | "1875/1875 [==============================] - 1144s 610ms/step - loss: 0.4936 - acc: 0.8304 - val_loss: 0.3751 - val_acc: 0.8639\n",
183 | "Epoch 2/5\n",
184 | "1875/1875 [==============================] - 855s 456ms/step - loss: 0.3874 - acc: 0.8690 - val_loss: 0.3440 - val_acc: 0.8810\n",
185 | "Epoch 3/5\n",
186 | "1875/1875 [==============================] - 825s 440ms/step - loss: 0.3633 - acc: 0.8799 - val_loss: 0.3488 - val_acc: 0.8914\n",
187 | "Epoch 4/5\n",
188 | "1875/1875 [==============================] - 1563s 834ms/step - loss: 0.3491 - acc: 0.8855 - val_loss: 0.3238 - val_acc: 0.8998\n",
189 | "Epoch 5/5\n",
190 | "1875/1875 [==============================] - 1929s 1s/step - loss: 0.3443 - acc: 0.8911 - val_loss: 0.3749 - val_acc: 0.8878\n"
191 | ]
192 | }
193 | ],
194 | "source": [
195 | "if not data_augmentation:\n",
196 | " print('Not using data augmentation.')\n",
197 | " history = model.fit(x_train, y_train,\n",
198 | " batch_size=batch_size,\n",
199 | " epochs=epochs,\n",
200 | " validation_data=(x_test, y_test),\n",
201 | " shuffle=True)\n",
202 | "else:\n",
203 | " print('Using real-time data augmentation.')\n",
204 | " # This will do preprocessing and realtime data augmentation:\n",
205 | " datagen = ImageDataGenerator(\n",
206 | " featurewise_center=False, # set input mean to 0 over the dataset\n",
207 | " samplewise_center=False, # set each sample mean to 0\n",
208 | " featurewise_std_normalization=False, # divide inputs by std of the dataset\n",
209 | " samplewise_std_normalization=False, # divide each input by its std\n",
210 | " zca_whitening=False, # apply ZCA whitening\n",
211 | " zca_epsilon=1e-06, # epsilon for ZCA whitening\n",
212 | " rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n",
213 | " # randomly shift images horizontally (fraction of total width)\n",
214 | " width_shift_range=0.1,\n",
215 | " # randomly shift images vertically (fraction of total height)\n",
216 | " height_shift_range=0.1,\n",
217 | " shear_range=0., # set range for random shear\n",
218 | " zoom_range=0., # set range for random zoom\n",
219 | " channel_shift_range=0., # set range for random channel shifts\n",
220 | " # set mode for filling points outside the input boundaries\n",
221 | " fill_mode='nearest',\n",
222 | " cval=0., # value used for fill_mode = \"constant\"\n",
223 | " horizontal_flip=True, # randomly flip images\n",
224 | " vertical_flip=False, # randomly flip images\n",
225 | " # set rescaling factor (applied before any other transformation)\n",
226 | " rescale=None,\n",
227 | " # set function that will be applied on each input\n",
228 | " preprocessing_function=None,\n",
229 | " # image data format, either \"channels_first\" or \"channels_last\"\n",
230 | " data_format=None,\n",
231 | " # fraction of images reserved for validation (strictly between 0 and 1)\n",
232 | " validation_split=0.0)\n",
233 | "\n",
234 | " # Compute quantities required for feature-wise normalization\n",
235 | " # (std, mean, and principal components if ZCA whitening is applied).\n",
236 | " datagen.fit(x_train)\n",
237 | " print(x_train.shape[0]//batch_size) # 取整\n",
238 | " print(x_train.shape[0]/batch_size) # 保留小数\n",
239 | " # Fit the model on the batches generated by datagen.flow().\n",
240 | " history = model.fit_generator(datagen.flow(x_train, y_train, # 按batch_size大小从x,y生成增强数据\n",
241 | " batch_size=batch_size), \n",
242 | " # flow_from_directory()从路径生成增强数据,和flow方法相比最大的优点在于不用\n",
243 | " # 一次将所有的数据读入内存当中,这样减小内存压力,这样不会发生OOM\n",
244 | " epochs=epochs,\n",
245 | " steps_per_epoch=x_train.shape[0]//batch_size,\n",
246 | " validation_data=(x_test, y_test),\n",
247 | " workers=10 # 在使用基于进程的线程时,最多需要启动的进程数量。\n",
248 | " )"
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "## 模型可视化与保存模型"
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": 9,
261 | "metadata": {},
262 | "outputs": [
263 | {
264 | "name": "stdout",
265 | "output_type": "stream",
266 | "text": [
267 | "_________________________________________________________________\n",
268 | "Layer (type) Output Shape Param # \n",
269 | "=================================================================\n",
270 | "input_2 (InputLayer) (None, 48, 48, 3) 0 \n",
271 | "_________________________________________________________________\n",
272 | "block1_conv1 (Conv2D) (None, 48, 48, 64) 1792 \n",
273 | "_________________________________________________________________\n",
274 | "block1_conv2 (Conv2D) (None, 48, 48, 64) 36928 \n",
275 | "_________________________________________________________________\n",
276 | "block1_pool (MaxPooling2D) (None, 24, 24, 64) 0 \n",
277 | "_________________________________________________________________\n",
278 | "block2_conv1 (Conv2D) (None, 24, 24, 128) 73856 \n",
279 | "_________________________________________________________________\n",
280 | "block2_conv2 (Conv2D) (None, 24, 24, 128) 147584 \n",
281 | "_________________________________________________________________\n",
282 | "block2_pool (MaxPooling2D) (None, 12, 12, 128) 0 \n",
283 | "_________________________________________________________________\n",
284 | "block3_conv1 (Conv2D) (None, 12, 12, 256) 295168 \n",
285 | "_________________________________________________________________\n",
286 | "block3_conv2 (Conv2D) (None, 12, 12, 256) 590080 \n",
287 | "_________________________________________________________________\n",
288 | "block3_conv3 (Conv2D) (None, 12, 12, 256) 590080 \n",
289 | "_________________________________________________________________\n",
290 | "block3_pool (MaxPooling2D) (None, 6, 6, 256) 0 \n",
291 | "_________________________________________________________________\n",
292 | "block4_conv1 (Conv2D) (None, 6, 6, 512) 1180160 \n",
293 | "_________________________________________________________________\n",
294 | "block4_conv2 (Conv2D) (None, 6, 6, 512) 2359808 \n",
295 | "_________________________________________________________________\n",
296 | "block4_conv3 (Conv2D) (None, 6, 6, 512) 2359808 \n",
297 | "_________________________________________________________________\n",
298 | "block4_pool (MaxPooling2D) (None, 3, 3, 512) 0 \n",
299 | "_________________________________________________________________\n",
300 | "block5_conv1 (Conv2D) (None, 3, 3, 512) 2359808 \n",
301 | "_________________________________________________________________\n",
302 | "block5_conv2 (Conv2D) (None, 3, 3, 512) 2359808 \n",
303 | "_________________________________________________________________\n",
304 | "block5_conv3 (Conv2D) (None, 3, 3, 512) 2359808 \n",
305 | "_________________________________________________________________\n",
306 | "block5_pool (MaxPooling2D) (None, 1, 1, 512) 0 \n",
307 | "_________________________________________________________________\n",
308 | "sequential_2 (Sequential) (None, 10) 133898 \n",
309 | "=================================================================\n",
310 | "Total params: 14,848,586\n",
311 | "Trainable params: 7,213,322\n",
312 | "Non-trainable params: 7,635,264\n",
313 | "_________________________________________________________________\n",
314 | "Saved trained model at /home/student/ChileWang/machine_learning_homework/question_one/saved_models_transfer_learning/keras_fashion_transfer_learning_trained_model.h5 \n"
315 | ]
316 | }
317 | ],
318 | "source": [
319 | "model.summary()\n",
320 | "# Save model and weights\n",
321 | "if not os.path.isdir(save_dir):\n",
322 | " os.makedirs(save_dir)\n",
323 | "model_path = os.path.join(save_dir, model_name)\n",
324 | "model.save(model_path)\n",
325 | "print('Saved trained model at %s ' % model_path)"
326 | ]
327 | },
328 | {
329 | "cell_type": "markdown",
330 | "metadata": {},
331 | "source": [
332 | "## 训练过程可视化"
333 | ]
334 | },
335 | {
336 | "cell_type": "code",
337 | "execution_count": 10,
338 | "metadata": {},
339 | "outputs": [
340 | {
341 | "data": {
342 | "text/plain": [
343 | ""
344 | ]
345 | },
346 | "metadata": {},
347 | "output_type": "display_data"
348 | },
349 | {
350 | "data": {
351 | "text/plain": [
352 | ""
353 | ]
354 | },
355 | "metadata": {},
356 | "output_type": "display_data"
357 | }
358 | ],
359 | "source": [
360 | "import matplotlib.pyplot as plt\n",
361 | "# 绘制训练 & 验证的准确率值\n",
362 | "plt.plot(history.history['acc'])\n",
363 | "plt.plot(history.history['val_acc'])\n",
364 | "plt.title('Model accuracy')\n",
365 | "plt.ylabel('Accuracy')\n",
366 | "plt.xlabel('Epoch')\n",
367 | "plt.legend(['Train', 'Valid'], loc='upper left')\n",
368 | "plt.savefig('tradition_cnn_valid_acc.png')\n",
369 | "plt.show()\n",
370 | "\n",
371 | "# 绘制训练 & 验证的损失值\n",
372 | "plt.plot(history.history['loss'])\n",
373 | "plt.plot(history.history['val_loss'])\n",
374 | "plt.title('Model loss')\n",
375 | "plt.ylabel('Loss')\n",
376 | "plt.xlabel('Epoch')\n",
377 | "plt.legend(['Train', 'Valid'], loc='upper left')\n",
378 | "plt.savefig('tradition_cnn_valid_loss.png')\n",
379 | "plt.show()"
380 | ]
381 | }
382 | ],
383 | "metadata": {
384 | "kernelspec": {
385 | "display_name": "Python 3",
386 | "language": "python",
387 | "name": "python3"
388 | },
389 | "language_info": {
390 | "codemirror_mode": {
391 | "name": "ipython",
392 | "version": 3
393 | },
394 | "file_extension": ".py",
395 | "mimetype": "text/x-python",
396 | "name": "python",
397 | "nbconvert_exporter": "python",
398 | "pygments_lexer": "ipython3",
399 | "version": "3.7.3"
400 | }
401 | },
402 | "nbformat": 4,
403 | "nbformat_minor": 2
404 | }
405 |
--------------------------------------------------------------------------------
/Transfer_learning/Trasnfer_learning_cnn_regression.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from tensorflow.python.keras.utils import get_file\n",
10 | "import gzip\n",
11 | "import numpy as np\n",
12 | "import keras\n",
13 | "from keras.preprocessing.image import ImageDataGenerator\n",
14 | "from keras.models import Sequential, Model\n",
15 | "from keras.layers import Dense, Dropout, Activation, Flatten\n",
16 | "from keras.layers import Conv2D, MaxPooling2D\n",
17 | "import os\n",
18 | "from keras import applications\n",
19 | "import cv2\n",
20 | "import functools\n",
21 | "from keras.models import load_model\n",
22 | "import pandas as pd\n",
23 | "from sklearn.preprocessing import MinMaxScaler\n",
24 | "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" # 使用第2个GPU"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": 98,
30 | "metadata": {},
31 | "outputs": [],
32 | "source": [
33 | "def load_data():\n",
34 | " paths = [\n",
35 | " 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n",
36 | " 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n",
37 | " ]\n",
38 | "\n",
39 | " with gzip.open(paths[0], 'rb') as lbpath:\n",
40 | " y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n",
41 | "\n",
42 | " with gzip.open(paths[1], 'rb') as imgpath:\n",
43 | " x_train = np.frombuffer(\n",
44 | " imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28, 1)\n",
45 | "\n",
46 | " with gzip.open(paths[2], 'rb') as lbpath:\n",
47 | " y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n",
48 | "\n",
49 | " with gzip.open(paths[3], 'rb') as imgpath:\n",
50 | " x_test = np.frombuffer(\n",
51 | " imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28, 1)\n",
52 | "\n",
53 | " return (x_train, y_train), (x_test, y_test)"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "## 读取数据与数据预处理"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 99,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | " # read dataset\n",
70 | "(x_train, y_train), (x_test, y_test) = load_data()\n",
71 | "batch_size = 32\n",
72 | "epochs = 5\n",
73 | "data_augmentation = True # 图像增强\n",
74 | "num_predictions = 20\n",
75 | "save_dir = os.path.join(os.getcwd(), 'saved_models_transfer_learning')\n",
76 | "model_name = 'keras_fashion_transfer_learning_trained_model.h5'\n",
77 | "\n",
78 | "\n",
79 | "# Convert class vectors to binary class matrices. 将类别弄成独热编码\n",
80 | "# y_train = keras.utils.to_categorical(y_train, num_classes)\n",
81 | "# y_test = keras.utils.to_categorical(y_test, num_classes)\n",
82 | "\n",
83 | "\n",
84 | "# x_train = x_train.astype('float32')\n",
85 | "# x_test = x_test.astype('float32')\n",
86 | "# 由于mist的输入数据维度是(num, 28, 28),vgg16 需要三维图像,因为扩充一下mnist的最后一维\n",
87 | "X_train = [cv2.cvtColor(cv2.resize(i, (48, 48)), cv2.COLOR_GRAY2RGB) for i in x_train]\n",
88 | "X_test = [cv2.cvtColor(cv2.resize(i, (48, 48)), cv2.COLOR_GRAY2RGB) for i in x_test]\n",
89 | "\n",
90 | "x_train = np.asarray(X_train)\n",
91 | "x_test = np.asarray(X_test)\n",
92 | "\n",
93 | "x_train = x_train.astype('float32')\n",
94 | "x_test = x_test.astype('float32')\n",
95 | "\n",
96 | "x_train /= 255 # 归一化\n",
97 | "x_test /= 255 # 归一化"
98 | ]
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "metadata": {},
103 | "source": [
104 | "## 伪造回归数据\n",
105 | "总共有10类衣服,他们每一类的价格设置规则如下: \n",
106 | "以(45, 57, 85, 99, 125, 27, 180, 152, 225, 33)为每一类衣服的均值,以3为标准差, \n",
107 | "利用正太分布小数点后两位的价格作为他们的衣服价格。"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 100,
113 | "metadata": {},
114 | "outputs": [
115 | {
116 | "name": "stdout",
117 | "output_type": "stream",
118 | "text": [
119 | " label price\n",
120 | "0 9 29.66\n",
121 | "1 0 45.15\n",
122 | "2 0 50.35\n",
123 | "3 3 99.19\n",
124 | "4 0 46.33\n",
125 | "-------------------\n",
126 | " label price\n",
127 | "0 9 32.51\n",
128 | "1 2 80.91\n",
129 | "2 1 56.77\n",
130 | "3 1 56.36\n",
131 | "4 6 181.70\n"
132 | ]
133 | }
134 | ],
135 | "source": [
136 | "# 转成DataFrame格式方便数据处理\n",
137 | "y_train_pd = pd.DataFrame(y_train)\n",
138 | "y_test_pd = pd.DataFrame(y_test)\n",
139 | "# 设置列名\n",
140 | "y_train_pd.columns = ['label'] \n",
141 | "y_test_pd.columns = ['label']\n",
142 | "\n",
143 | "# 给每一类衣服设置价格\n",
144 | "mean_value_list = [45, 57, 85, 99, 125, 27, 180, 152, 225, 33] # 均值列表\n",
145 | "def setting_clothes_price(row):\n",
146 | " price = sorted(np.random.normal(mean_value_list[int(row)], 3,size=1))[0] #均值mean,标准差std,数量\n",
147 | " return np.round(price, 2)\n",
148 | "y_train_pd['price'] = y_train_pd['label'].apply(setting_clothes_price)\n",
149 | "y_test_pd['price'] = y_test_pd['label'].apply(setting_clothes_price)\n",
150 | "\n",
151 | "print(y_train_pd.head(5))\n",
152 | "print('-------------------')\n",
153 | "print(y_test_pd.head(5))"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "metadata": {},
159 | "source": [
160 | "## 数据归一化"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": 115,
166 | "metadata": {},
167 | "outputs": [
168 | {
169 | "name": "stdout",
170 | "output_type": "stream",
171 | "text": [
172 | "60000\n",
173 | "10000\n"
174 | ]
175 | }
176 | ],
177 | "source": [
178 | "# y_train_price_pd = y_train_pd['price'].tolist()\n",
179 | "# y_test_price_pd = y_test_pd['price'].tolist()\n",
180 | "# 训练集归一化\n",
181 | "min_max_scaler = MinMaxScaler()\n",
182 | "min_max_scaler.fit(y_train_pd)\n",
183 | "y_train = min_max_scaler.transform(y_train_pd)[:, 1]\n",
184 | "\n",
185 | "# 验证集归一化\n",
186 | "min_max_scaler.fit(y_test_pd)\n",
187 | "y_test = min_max_scaler.transform(y_test_pd)[:, 1]\n",
188 | "y_test_label = min_max_scaler.transform(y_test_pd)[:, 0] # 归一化后的标签\n",
189 | "print(len(y_train))\n",
190 | "print(len(y_test))"
191 | ]
192 | },
193 | {
194 | "cell_type": "markdown",
195 | "metadata": {},
196 | "source": [
197 | "## 迁移学习建模"
198 | ]
199 | },
200 | {
201 | "cell_type": "code",
202 | "execution_count": 102,
203 | "metadata": {},
204 | "outputs": [
205 | {
206 | "name": "stdout",
207 | "output_type": "stream",
208 | "text": [
209 | "(48, 48, 3)\n",
210 | "Tensor(\"block5_pool_6/MaxPool:0\", shape=(?, 1, 1, 512), dtype=float32)\n"
211 | ]
212 | }
213 | ],
214 | "source": [
215 | "# 使用VGG16模型\n",
216 | "base_model = applications.VGG16(include_top=False, weights='imagenet', input_shape=x_train.shape[1:]) # 第一层需要指出图像的大小\n",
217 | "\n",
218 | "# # path to the model weights files.\n",
219 | "# top_model_weights_path = 'bottleneck_fc_model.h5'\n",
220 | "print(x_train.shape[1:])\n",
221 | "model = Sequential()\n",
222 | "print(base_model.output)\n",
223 | "model.add(Flatten(input_shape=base_model.output_shape[1:]))\n",
224 | "model.add(Dense(256, activation='relu'))\n",
225 | "model.add(Dropout(0.5))\n",
226 | "model.add(Dense(1))\n",
227 | "model.add(Activation('linear'))\n",
228 | "\n",
229 | "# add the model on top of the convolutional base\n",
230 | "model = Model(inputs=base_model.input, outputs=model(base_model.output)) # VGG16模型与自己构建的模型合并\n",
231 | "\n",
232 | "# 保持VGG16的前15层权值不变,即在训练过程中不训练 \n",
233 | "for layer in model.layers[:15]:\n",
234 | " layer.trainable = False\n",
235 | "\n",
236 | "# initiate RMSprop optimizer\n",
237 | "opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)\n",
238 | "\n",
239 | "# Let's train the model using RMSprop\n",
240 | "model.compile(loss='mse',\n",
241 | " optimizer=opt,\n",
242 | " )"
243 | ]
244 | },
245 | {
246 | "cell_type": "markdown",
247 | "metadata": {},
248 | "source": [
249 | "## 训练"
250 | ]
251 | },
252 | {
253 | "cell_type": "code",
254 | "execution_count": 103,
255 | "metadata": {},
256 | "outputs": [
257 | {
258 | "name": "stdout",
259 | "output_type": "stream",
260 | "text": [
261 | "Using real-time data augmentation.\n",
262 | "1875\n",
263 | "1875.0\n",
264 | "Epoch 1/5\n",
265 | "1875/1875 [==============================] - 1149s 613ms/step - loss: 0.0336 - val_loss: 0.0178\n",
266 | "Epoch 2/5\n",
267 | "1875/1875 [==============================] - 1805s 963ms/step - loss: 0.0183 - val_loss: 0.0146\n",
268 | "Epoch 3/5\n",
269 | "1875/1875 [==============================] - 2126s 1s/step - loss: 0.0162 - val_loss: 0.0140\n",
270 | "Epoch 4/5\n",
271 | "1875/1875 [==============================] - 2279s 1s/step - loss: 0.0151 - val_loss: 0.0145\n",
272 | "Epoch 5/5\n",
273 | "1875/1875 [==============================] - 2196s 1s/step - loss: 0.0144 - val_loss: 0.0136\n"
274 | ]
275 | }
276 | ],
277 | "source": [
278 | "if not data_augmentation:\n",
279 | " print('Not using data augmentation.')\n",
280 | " history = model.fit(x_train, y_train,\n",
281 | " batch_size=batch_size,\n",
282 | " epochs=epochs,\n",
283 | " validation_data=(x_test, y_test),\n",
284 | " shuffle=True)\n",
285 | "else:\n",
286 | " print('Using real-time data augmentation.')\n",
287 | " # This will do preprocessing and realtime data augmentation:\n",
288 | " datagen = ImageDataGenerator(\n",
289 | " featurewise_center=False, # set input mean to 0 over the dataset\n",
290 | " samplewise_center=False, # set each sample mean to 0\n",
291 | " featurewise_std_normalization=False, # divide inputs by std of the dataset\n",
292 | " samplewise_std_normalization=False, # divide each input by its std\n",
293 | " zca_whitening=False, # apply ZCA whitening\n",
294 | " zca_epsilon=1e-06, # epsilon for ZCA whitening\n",
295 | " rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n",
296 | " # randomly shift images horizontally (fraction of total width)\n",
297 | " width_shift_range=0.1,\n",
298 | " # randomly shift images vertically (fraction of total height)\n",
299 | " height_shift_range=0.1,\n",
300 | " shear_range=0., # set range for random shear\n",
301 | " zoom_range=0., # set range for random zoom\n",
302 | " channel_shift_range=0., # set range for random channel shifts\n",
303 | " # set mode for filling points outside the input boundaries\n",
304 | " fill_mode='nearest',\n",
305 | " cval=0., # value used for fill_mode = \"constant\"\n",
306 | " horizontal_flip=True, # randomly flip images\n",
307 | " vertical_flip=False, # randomly flip images\n",
308 | " # set rescaling factor (applied before any other transformation)\n",
309 | " rescale=None,\n",
310 | " # set function that will be applied on each input\n",
311 | " preprocessing_function=None,\n",
312 | " # image data format, either \"channels_first\" or \"channels_last\"\n",
313 | " data_format=None,\n",
314 | " # fraction of images reserved for validation (strictly between 0 and 1)\n",
315 | " validation_split=0.0)\n",
316 | "\n",
317 | " # Compute quantities required for feature-wise normalization\n",
318 | " # (std, mean, and principal components if ZCA whitening is applied).\n",
319 | " datagen.fit(x_train)\n",
320 | " print(x_train.shape[0]//batch_size) # 取整\n",
321 | " print(x_train.shape[0]/batch_size) # 保留小数\n",
322 | " # Fit the model on the batches generated by datagen.flow().\n",
323 | " history = model.fit_generator(datagen.flow(x_train, y_train, # 按batch_size大小从x,y生成增强数据\n",
324 | " batch_size=batch_size), \n",
325 | " # flow_from_directory()从路径生成增强数据,和flow方法相比最大的优点在于不用\n",
326 | " # 一次将所有的数据读入内存当中,这样减小内存压,这样不会发生OOM\n",
327 | " epochs=epochs,\n",
328 | " steps_per_epoch=x_train.shape[0]//batch_size,\n",
329 | " validation_data=(x_test, y_test),\n",
330 | " workers=10 # 在使用基于进程的线程时,最多需要启动的进程数量。\n",
331 | " )"
332 | ]
333 | },
334 | {
335 | "cell_type": "markdown",
336 | "metadata": {},
337 | "source": [
338 | "## 模型可视化与保存模型"
339 | ]
340 | },
341 | {
342 | "cell_type": "code",
343 | "execution_count": 104,
344 | "metadata": {},
345 | "outputs": [
346 | {
347 | "name": "stdout",
348 | "output_type": "stream",
349 | "text": [
350 | "_________________________________________________________________\n",
351 | "Layer (type) Output Shape Param # \n",
352 | "=================================================================\n",
353 | "input_7 (InputLayer) (None, 48, 48, 3) 0 \n",
354 | "_________________________________________________________________\n",
355 | "block1_conv1 (Conv2D) (None, 48, 48, 64) 1792 \n",
356 | "_________________________________________________________________\n",
357 | "block1_conv2 (Conv2D) (None, 48, 48, 64) 36928 \n",
358 | "_________________________________________________________________\n",
359 | "block1_pool (MaxPooling2D) (None, 24, 24, 64) 0 \n",
360 | "_________________________________________________________________\n",
361 | "block2_conv1 (Conv2D) (None, 24, 24, 128) 73856 \n",
362 | "_________________________________________________________________\n",
363 | "block2_conv2 (Conv2D) (None, 24, 24, 128) 147584 \n",
364 | "_________________________________________________________________\n",
365 | "block2_pool (MaxPooling2D) (None, 12, 12, 128) 0 \n",
366 | "_________________________________________________________________\n",
367 | "block3_conv1 (Conv2D) (None, 12, 12, 256) 295168 \n",
368 | "_________________________________________________________________\n",
369 | "block3_conv2 (Conv2D) (None, 12, 12, 256) 590080 \n",
370 | "_________________________________________________________________\n",
371 | "block3_conv3 (Conv2D) (None, 12, 12, 256) 590080 \n",
372 | "_________________________________________________________________\n",
373 | "block3_pool (MaxPooling2D) (None, 6, 6, 256) 0 \n",
374 | "_________________________________________________________________\n",
375 | "block4_conv1 (Conv2D) (None, 6, 6, 512) 1180160 \n",
376 | "_________________________________________________________________\n",
377 | "block4_conv2 (Conv2D) (None, 6, 6, 512) 2359808 \n",
378 | "_________________________________________________________________\n",
379 | "block4_conv3 (Conv2D) (None, 6, 6, 512) 2359808 \n",
380 | "_________________________________________________________________\n",
381 | "block4_pool (MaxPooling2D) (None, 3, 3, 512) 0 \n",
382 | "_________________________________________________________________\n",
383 | "block5_conv1 (Conv2D) (None, 3, 3, 512) 2359808 \n",
384 | "_________________________________________________________________\n",
385 | "block5_conv2 (Conv2D) (None, 3, 3, 512) 2359808 \n",
386 | "_________________________________________________________________\n",
387 | "block5_conv3 (Conv2D) (None, 3, 3, 512) 2359808 \n",
388 | "_________________________________________________________________\n",
389 | "block5_pool (MaxPooling2D) (None, 1, 1, 512) 0 \n",
390 | "_________________________________________________________________\n",
391 | "sequential_7 (Sequential) (None, 1) 131585 \n",
392 | "=================================================================\n",
393 | "Total params: 14,846,273\n",
394 | "Trainable params: 7,211,009\n",
395 | "Non-trainable params: 7,635,264\n",
396 | "_________________________________________________________________\n",
397 | "Saved trained model at /home/student/ChileWang/machine_learning_homework/question_one/saved_models_transfer_learning/keras_fashion_transfer_learning_trained_model.h5 \n"
398 | ]
399 | }
400 | ],
401 | "source": [
402 | "model.summary()\n",
403 | "# Save model and weights\n",
404 | "if not os.path.isdir(save_dir):\n",
405 | " os.makedirs(save_dir)\n",
406 | "model_path = os.path.join(save_dir, model_name)\n",
407 | "model.save(model_path)\n",
408 | "print('Saved trained model at %s ' % model_path)"
409 | ]
410 | },
411 | {
412 | "cell_type": "markdown",
413 | "metadata": {},
414 | "source": [
415 | "## 训练过程可视化"
416 | ]
417 | },
418 | {
419 | "cell_type": "code",
420 | "execution_count": 106,
421 | "metadata": {},
422 | "outputs": [
423 | {
424 | "data": {
425 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZgAAAEWCAYAAABbgYH9AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzt3Xl8XOV99/3PT7ssa7ElGa9YtiUTzGaMICw2YTEEaAJJIcU0rCFxSEoJoe1T0ru5k9D0KdzP3SRsWRwwBZKwFELrJE1JWRLMjgxmMQa8YLBsgyV5kSxZ++/54xzZ4/FIGtk6mpH0fb9e8/LMOdecuWZA+up3XdecY+6OiIjIYMtIdQdERGRkUsCIiEgkFDAiIhIJBYyIiERCASMiIpFQwIiISCQUMCJDzMwqzMzNLCuJtlea2bMHexyRVFDAiPTBzDaYWbuZlcVtXxn+cq9ITc9E0p8CRqR/7wOX9Dwws6OA/NR1R2R4UMCI9O9+4PKYx1cA98U2MLNiM7vPzOrM7AMz+0czywj3ZZrZ/zWzejNbD/xZgufebWZbzGyTmX3fzDIH2kkzm2xmy8xsm5mtNbOvxOw7wcxqzKzRzD42sx+E2/PM7Bdm1mBmO8zsFTM7ZKCvLZKIAkakfy8CRWZ2ePiL/2LgF3FtbgeKgZnApwgC6apw31eAzwDHAtXARXHPvRfoBCrDNmcDXz6Afj4A1AKTw9f4f83szHDfrcCt7l4EzAIeDrdfEfZ7GlAKXAPsPoDXFtmPAkYkOT1VzFnAO8Cmnh0xofMtd29y9w3AvwKXhU3+AviRu290923Av8Q89xDgXOB6d292963AD4FFA+mcmU0D5gN/7+6t7r4SuCumDx1ApZmVufsud38xZnspUOnuXe6+wt0bB/LaIr1RwIgk537gL4EriRseA8qAHOCDmG0fAFPC+5OBjXH7ekwHsoEt4RDVDuBnwIQB9m8ysM3dm3rpw9XAbOCdcBjsMzHv63HgQTPbbGb/x8yyB/jaIgkpYESS4O4fEEz2nwf8Om53PUElMD1m26HsrXK2EAxBxe7rsRFoA8rcvSS8Fbn7EQPs4mZgvJkVJuqDu69x90sIgusW4BEzK3D3Dnf/nrvPAU4mGMq7HJFBoIARSd7VwBnu3hy70d27COY0/tnMCs1sOnADe+dpHgauM7OpZjYOuDHmuVuAPwD/amZFZpZhZrPM7FMD6Zi7bwSeB/4lnLg/OuzvLwHM7FIzK3f3bmBH+LQuMzvdzI4Kh/kaCYKyayCvLdIbBYxIktx9nbvX9LL7r4FmYD3wLPArYGm47+cEw1CvA6+yfwV0OcEQ29vAduARYNIBdPESoIKgmnkM+I67/0+47xxglZntIpjwX+TurcDE8PUagdXAn9h/AYPIATFdcExERKKgCkZERCKhgBERkUgoYEREJBIKGBERicSoPs13WVmZV1RUpLobIiLDyooVK+rdvby/dqM6YCoqKqip6W3VqYiIJGJmH/TfSkNkIiISEQWMiIhEQgEjIiKRGNVzMIl0dHRQW1tLa2trqrsyZPLy8pg6dSrZ2TqJrogMHgVMnNraWgoLC6moqMDMUt2dyLk7DQ0N1NbWMmPGjFR3R0RGEA2RxWltbaW0tHRUhAuAmVFaWjqqKjYRGRoKmARGS7j0GG3vV0SGhgLmAOxu72TLzt3oTNQiIr1TwByA5vYu6praaG7rHPRjNzQ0MHfuXObOncvEiROZMmXKnsft7e1JHeOqq67i3XffHfS+iYgMhCb5D8D4MTnUNbXxcWMbBblZgzrEVFpaysqVKwH47ne/y9ixY/nbv/3bfdq4O+5ORkbivw/uueeeQeuPiMiBUgVzADIyjAmFuTS3d7IrgiomkbVr13LkkUdyzTXXMG/ePLZs2cLixYuprq7miCOO4KabbtrTdv78+axcuZLOzk5KSkq48cYbOeaYYzjppJPYunXrkPRXREQVTB++95tVvL25sdf9Le1dmEF+dmbSx5wzuYjvfPaIA+rP22+/zT333MNPf/pTAG6++WbGjx9PZ2cnp59+OhdddBFz5szZ5zk7d+7kU5/6FDfffDM33HADS5cu5cYbb0x0eBGRQaUK5iDkZBrd3U5X99BM9s+aNYvjjz9+z+MHHniAefPmMW/ePFavXs3bb7+933Py8/M599xzATjuuOPYsGHDkPRVREQVTB/6qzS63Xn3oyayMzOYVV4Q+XLfgoKCPffXrFnDrbfeyssvv0xJSQmXXnppwu+y5OTk7LmfmZlJZ+fQDOmJiKiCOQgZFszFtAzhXEyPxsZGCgsLKSoqYsuWLTz++OND+voiIv1RBXOQxhXsXVE2dpBXlPVl3rx5zJkzhyOPPJKZM2dyyimnDMnriogky6L8sqCZnQPcCmQCd7n7zXH7c4H7gOOABuBid99gZicAS3qaAd9198fMbFrYfiLQDSxx91vDY30X+ApQFz7vH9z9v/rqX3V1tcdfcGz16tUcfvjhA3qfDc1tbNq+m4qyAoryhucJIw/kfYvI6GRmK9y9ur92kQ2RmVkmcCdwLjAHuMTM5sQ1uxrY7u6VwA+BW8LtbwHV7j4XOAf4mZllAZ3A37j74cCJwF/FHfOH7j43vPUZLoNp3JgccjIz2NrYqm/3i4iEopyDOQFY6+7r3b0deBC4IK7NBcC94f1HgDPNzNy9xd17JjXyAAdw9y3u/mp4vwlYDUyJ8D0kJcOMCUW5tLR30dSqSXQREYg2YKYAG2Me17J/GOxpEwbKTqAUwMw+aWargDeBa2ICh3B/BXAs8FLM5mvN7A0zW2pm4xJ1yswWm1mNmdXU1dUlanJASsbkkJOVwcdNqmJERCDagEk02x3/m7fXNu7+krsfARwPfMvM8vY8yWws8Chwvbv3fBPyJ8AsYC6wBfjXRJ1y9yXuXu3u1eXl5QN5P30KVpTlsVtVjIgIEG3A1ALTYh5PBTb31iacYykGtsU2cPfVQDNwZNgumyBcfunuv45p97G7d7l7N/BzgiG6IVUyJjuoYjQXIyISacC8AlSZ2QwzywEWAcvi2iwDrgjvXwQ85e4ePicLwMymA4cBGyxYA3w3sNrdfxB7IDObFPPw8wQLBYbUniqmQ1WMiEhk34Nx904zuxZ4nGCZ8lJ3X2VmNwE17r6MICzuN7O1BJXLovDp84EbzayDYDny19293szmA5cBb5rZyrBtz3Lk/2NmcwmG2DYAX43qvfVl3JhstjYFVUxh3sC/F9PQ0MCZZ54JwEcffURmZiY9Q3kvv/zyPt/M78vSpUs577zzmDhx4sDegIjIIIn0i5bhL/7/itv2v2PutwJfSPC8+4H7E2x/lsTzNrj7ZQfb38FgYRVTu72FxtZOivMH9r2YZE7Xn4ylS5cyb948BYyIpIy+yR+BcWOyqWvKZGtjK0UHUMX05t577+XOO++kvb2dk08+mTvuuIPu7m6uuuoqVq5cibuzePFiDjnkEFauXMnFF19Mfn7+gCofEZHBooDpy+9vhI/eHPDTDJjZ3U1bRzdd2RlkxV4YbOJRcO7NvT63N2+99RaPPfYYzz//PFlZWSxevJgHH3yQWbNmUV9fz5tvBv3csWMHJSUl3H777dxxxx3MnTt3wK8lIjIYFDARycowOgzau7rJzDAs8che0p544gleeeUVqquDszPs3r2badOm8elPf5p3332Xb3zjG5x33nmcffbZg9F9EZGDpoDpywFUGj0MaGtpZ+O2FqaPH0PxmIMbonJ3vvSlL/FP//RP++174403+P3vf89tt93Go48+ypIlSxIcQURkaOl0/REqyc8mNyuTj5vaDvp7MQsXLuThhx+mvr4eCFabffjhh9TV1eHufOELX+B73/ser776KgCFhYU0NTUd9HsQETlQqmAiZGYcUpTLh9ta2Lm7g5KDqGKOOuoovvOd77Bw4UK6u7vJzs7mpz/9KZmZmVx99dW4O2bGLbcE5wu96qqr+PKXv6xJfhFJmUhP15/uBut0/X1xd977eBdmUDVh7JBdL2agdLp+EUlWyk/XL4GeKqa1o4uduztS3R0RkSGjgBkCxfnZ5GVlsrXx4OdiRESGCwVMAoMdAhZeL6a1Mz2rGIWeiERBARMnLy+PhoaGQf+lW5yfTV52Jh+nWRXj7jQ0NJCXl9d/YxGRAdAqsjhTp06ltraWwbwYWY/d7V00NLez6+NsxuSkz0efl5fH1KlTU90NERlh0ue3XJrIzs5mxowZkRy7u9s577bldHR184dvforMjPRcUSYiMhg0RDaEMjKMb5xZxbq6Zn7zevy110RERhYFzBD79BET+cTEQm57cg2dXd2p7o6ISGQUMEMsI8O4fmEV6+ubWaYqRkRGsEgDxszOMbN3zWytmd2YYH+umT0U7n/JzCrC7SeY2crw9rqZfb6/Y4aXWX7JzNaEx0zbc6OcPWcih08qUhUjIiNaZAFjZpnAncC5wBzgEjObE9fsamC7u1cCPwRuCbe/BVS7+1zgHOBnZpbVzzFvAX7o7lXA9vDYaamnitnQ0MJ/rlQVIyIjU5QVzAnAWndf7+7twIPABXFtLgDuDe8/ApxpZubuLe7eGW7PA3q+OJLwmBac4OuM8BiEx/xcJO9qkJw95xCOmFzE7U+pihGRkSnKgJkCbIx5XBtuS9gmDJSdQCmAmX3SzFYBbwLXhPt7O2YpsCMmlBK9FuFxF5tZjZnVRPFdl2SZGdcvnM2GhhYee21TyvohIhKVKAMm0Zc84r/C3msbd3/J3Y8Ajge+ZWZ5fbRP5rUIj7vE3avdvbq8vLzXzg+FhYdP4MgpRdz+1Fo6VMWIyAgTZcDUAtNiHk8F4icc9rQxsyygGNgW28DdVwPNwJF9HLMeKAmP0dtrpR0z4/ozZ/PhthYee1VVjIiMLFEGzCtAVbi6KwdYBCyLa7MMuCK8fxHwlLt7+JwsADObDhwGbOjtmB6c3Ovp8BiEx/zP6N7a4Dnz8AkcPbWY259eoypGREaUyAImnA+5FngcWA087O6rzOwmMzs/bHY3UGpma4EbgJ5lx/OB181sJfAY8HV3r+/tmOFz/h64ITxWaXjstBfMxVSxcdtufv1qbaq7IyIyaHRFy7grWqaCu/O5Hz9Pw642nvqb08jJ0vdfRSR96YqWw0hPFVO7fTePqooRkRFCAZMmTptdztxpJdzx1FraOzUXIyLDnwImTfRUMZt27ObfV2zs/wkiImlOAZNGPjW7nGMPLeFOVTEiMgIoYNKImfHNhbPZvLOVh2tUxYjI8KaASTMLqso4bvo47nx6LW2dXanujojIAVPApJmeKmbLzlYefkVVjIgMXwqYNHRKZSnV08dx59PraO1QFSMiw5MCJg2ZGd88azYfNbbykKoYERmmFDBp6uRZpZxQMZ4f/3GtqhgRGZYUMGnKzLj+rCo+bmzjwZc/THV3REQGTAGTxk6eVcYnZ4znx3/UXIyIDD8KmDR3/cLZbG1q41cvqYoRkeFFAZPmTppVyokzx/OTP6mKEZHhRQEzDHxz4Wzqmtr4paoYERlGFDDDwCdnlnLyrFJ+8sd17G5XFSMiw0OkAWNm55jZu2a21sxuTLA/18weCve/ZGYV4fazzGyFmb0Z/ntGuL3QzFbG3OrN7EfhvivNrC5m35ejfG9D7ZtnzaZ+Vxu/fOmDVHdFRCQpkQWMmWUCdwLnAnOAS8xsTlyzq4Ht7l4J/BC4JdxeD3zW3Y8CrgDuB3D3Jnef23MDPgB+HXO8h2L23xXVe0uF4yvGM7+yjJ/+aR0t7Z2p7o6ISL+irGBOANa6+3p3bwceBC6Ia3MBcG94/xHgTDMzd3/N3TeH21cBeWaWG/tEM6sCJgDLI3sHaeb6hVXU72rnFy+qihGR9BdlwEwBYs9zUhtuS9jG3TuBnUBpXJsLgdfcvS1u+yUEFYvHtjWzN8zsETOblqhTZrbYzGrMrKaurm5g7yjFqivGs6CqjJ/9ab2qGBFJe1EGjCXY5gNpY2ZHEAybfTVBu0XAAzGPfwNUuPvRwBPsrYz2Pbj7Enevdvfq8vLyPrqfnq5fOJuG5nbuf0FVjIiktygDphaIrSKmApt7a2NmWUAxsC18PBV4DLjc3dfFPsnMjgGy3H1FzzZ3b4ipcn4OHDd4byV9HDd9HKfOLudnz6ynuU1VjIikrygD5hWgysxmmFkOQcWxLK7NMoJJfICLgKfc3c2sBPgd8C13fy7BsS9h3+oFM5sU8/B8YPUgvIe09M2FVWxrbuc+VTEiksYiC5hwTuVa4HGCX/YPu/sqM7vJzM4Pm90NlJrZWuAGoGcp87VAJfDtmGXHE2IO/xfEBQxwnZmtMrPXgeuAKyN5Y2ng2EPHcdph5Sx5Zh27VMWISJqyfefIR5fq6mqvqalJdTcOyMqNO/jcnc/xd58+jL86vTLV3RGRUcTMVrh7dX/t9E3+YWrutBJOP6ycny9frypGRNKSAmYYu37hbHa0dHDv8xtS3RURkf0oYIaxY6aVcOYnJrDkmfU0tXakujsiIvtQwAxz1y+czc7dHfzbcxtS3RURkX0oYIa5o6YWs/DwQ/j58vU0qooRkTSigBkBrl9YRWNrJ/c8uyHVXRER2UMBMwIcOaWYs+Ycwt3PrmfnblUxIpIeFDAjxJ4q5rn3U90VERFAATNiHDG5mE8fcQh3P/u+qhgRSQsKmBHkG2fOpqm1k7ufVRUjIqmngBlB5kwu4pwjJnLPs++zs0VVjIiklgJmhPnGwiqa2jq569n1qe6KiIxyCpgR5vBJRZx31ETueW4DO1raU90dERnFFDAj0DfOnE1zeyd3LddcjIikjgJmBDpsYiHnHTWJe557n+3NqmJEJDUiDRgzO8fM3jWztWZ2Y4L9uWb2ULj/JTOrCLefZWYrzOzN8N8zYp7zx/CY+1yIrLdjjVbfOLOKlo4ufr5cczEikhqRBYyZZQJ3AucCc4BLzGxOXLOrge3uXgn8ELgl3F4PfNbdjyK4pPL9cc/7orvPDW9b+znWqDT7kEL+7KhJ3Pv8BrapihGRFIiygjkBWOvu6929HXgQuCCuzQXAveH9R4Azzczc/TV33xxuXwXkmVluP6+X8FgH/S6GMVUxIpJKUQbMFGBjzOPacFvCNu7eCewESuPaXAi85u5tMdvuCYfHvh0TIskcCzNbbGY1ZlZTV1d3YO9smKg6pJDPHj2Ze5/fQMOutv6fICIyiKIMmETVgw+kjZkdQTDU9dWY/V8Mh84WhLfLBvB6uPsSd6929+ry8vI+uj8yXHdmFa0dXSxRFSMiQyzKgKkFpsU8ngps7q2NmWUBxcC28PFU4DHgcndf1/MEd98U/tsE/IpgKK7PY41mlRPGcv4xk7nv+Q+oVxUjIkMoyoB5BagysxlmlgMsApbFtVlGMIkPcBHwlLu7mZUAvwO+5e7P9TQ2sywzKwvvZwOfAd7q61gRvK9h56/PrKKts4slz6iKEZGhE1nAhPMg1wKPA6uBh919lZndZGbnh83uBkrNbC1wA9CzlPlaoBL4dtxy5FzgcTN7A1gJbAJ+3s+xRr1Z5WO5YO4U7nthA3VNqmJEZGjYaP4jv7q62mtqalLdjSGxvm4XC3/wJ66eP4P/9Wfxq8VFRJJnZivcvbq/dklVMGY2q2eZsJmdZmbXhcNYMkzMLB/L546dwv0vfsDWptZUd0dERoFkh8geBbrMrJJgKGoGwQS7DCPXnVFFR5fzsz9pLkZEopdswHSHcyqfB37k7t8EJkXXLYlCRVkBnz92Cr948QO2NqqKEZFoJRswHWZ2CcEqrd+G27Kj6ZJE6a/PqKSz2/nJn9b131hE5CAkGzBXAScB/+zu75vZDOAX0XVLojK9tIA/P3YKv3rpQ1UxIhKppALG3d929+vc/QEzGwcUuvvNEfdNIvLXZ1TR1e38+I+qYkQkOsmuIvujmRWZ2XjgdYJzgf0g2q5JVA4tHcOF86byq5c/5KOdqmJEJBrJDpEVu3sj8OfAPe5+HLAwum5J1K49o5Lubucnf1yb6q6IyAiVbMBkmdkk4C/YO8kvw9i08WP4QvVUHnh5I1t27k51d0RkBEo2YG4iOOXLOnd/xcxmAmui65YMha+fVkm3Oz9+WnMxIjL4kp3k/3d3P9rdvxY+Xu/uF0bbNYlaUMVM46FXNrJ5h6oYERlcyU7yTzWzx8xsq5l9bGaPhqfTl2Hu2jMqcZwfay5GRAZZskNk9xCcDn8ywZUjfxNuk2FuSkk+fxFWMZtUxYjIIEo2YMrd/R537wxv/waM/MtBjhJ/dXolAHc+rSpGRAZPsgFTb2aXmllmeLsUaIiyYzJ0Jpfkc/Hx0/j3mo3Ubm9JdXdEZIRINmC+RLBE+SNgC8EVI6+KqlMy9P7q9EoMUxUjIoMm2VVkH7r7+e5e7u4T3P1zBF+67JOZnWNm75rZWjPb7wqTZpZrZg+F+18ys4pw+1lmtsLM3gz/PSPcPsbMfmdm75jZKjO7OeZYV5pZXcwVML+c5GcgwKTifBadMI1/r6ll4zZVMSJy8A7mksk39LXTzDKBO4FzgTnAJWYWfynFq4Ht7l4J/BC4JdxeD3zW3Y8iOIPz/THP+b/u/gngWOAUMzs3Zt9D7j43vN11oG9stPr6aZVkZKiKEZHBcTABY/3sPwFYG35nph14ELggrs0FwL3h/UeAM83M3P01d98cbl8F5JlZrru3uPvTAOExXwW0XHqQTCzO4y9POJRHVqiKEZGDdzAB4/3snwJsjHlcG25L2Ca8oNlOoDSuzYXAa+7eFrsxvGTzZ4EnY9ua2Rtm9oiZTUvUKTNbbGY1ZlZTV1fXz1sYfb522iwyMozbn9KJGkTk4PQZMGbWZGaNCW5NBN+J6fPpCbbFh1KfbczsCIJhs6/G9SsLeAC4zd17rv/7G6DC3Y8GnmBvZbTvwd2XuHu1u1eXl2uldbxDioIq5tFXN/FBQ3OquyMiw1ifAePuhe5elOBW6O5Z/Ry7FoitIqYCm3trE4ZGMbAtfDwVeAy43N3jT5a1BFjj7j+K6WtDTJXzc+C4fvonvfj6abPIyjDueEpzMSJy4A5miKw/rwBVZjbDzHKARQRnA4i1jGASH4Klz0+5u4fDX78DvuXuz8U+wcy+TxBE18dtnxTz8Hxg9aC9k1FmQlEeX/zkdH792iY21KuKEZEDE1nAhHMq1xKchXk18LC7rzKzm8zs/LDZ3UCpma0lWJXWs5T5WqAS+HbMsuMJYVXzvwhWpb0atxz5unDp8uvAdcCVUb230eCa02aSnWncripGRA6Qufc3Vz9yVVdXe01NTaq7kba+/9u3Wfrc+zz5N6cxo6wg1d0RkTRhZivcvbq/dlEOkckw99VPzSInK4Pbn9SKMhEZOAWM9Kq8MJfLTpzOf6zcxLq6XanujogMMwoY6dNXPzWL3KxMrSgTkQFTwEifysbmcvlJ0/lPVTEiMkAKGOnX4lNnkpedyW2aixGRAVDASL9Kx+Zy+UkVLHt9M2u3NqW6OyIyTChgJCmLT51JfnYmtz6puRgRSY4CRpIyviCHK06u4LdvbGbNx6piRKR/ChhJ2uIFMxmTncmtmosRkSQoYCRp4wpyuPKUCn735hbeUxUjIv1QwMiAfGXBTApysrj1CVUxItI3BYwMSMmYHK4Kq5h3PmpMdXdEJI0pYGTArp4/g8JcVTEi0jcFjAxYTxXz+7c+YvUWVTEikpgCRg7I1fNnUpinKkZEeqeAkQNSPCabL50yg/9e9RGrNu9MdXdEJA1FGjBmdo6ZvWtma83sxgT7c83soXD/S2ZWEW4/y8xWmNmb4b9nxDznuHD7WjO7zcws3D7ezP7HzNaE/46L8r0JfGn+DFUxItKryALGzDKBO4FzCS5xfImZzYlrdjWw3d0rgR8Ct4Tb64HPuvtRwBXA/THP+QmwGKgKb+eE228EnnT3KuBJ9l5+WSJSnJ/Nl+fP5A9vf8xbm1TFiMi+oqxgTgDWuvt6d28HHgQuiGtzAXBveP8R4EwzM3d/zd03h9tXAXlhtTMJKHL3Fzy41vN9wOcSHOvemO0SoavmV1CUl8WPVMWISJwoA2YKsDHmcW24LWEbd+8EdgKlcW0uBF5z97awfW0vxzzE3beEx9oCTEjUKTNbbGY1ZlZTV1c34Dcl+yrKy+bLC2byxGpVMSKyrygDxhJs84G0MbMjCIbNvjqAY/bJ3Ze4e7W7V5eXlw/kqdKLq06poDg/mx898V6quyIiaSTKgKkFpsU8ngps7q2NmWUBxcC28PFU4DHgcndfF9N+ai/H/DgcQiP8d+ugvRPpU2FeNl9ZMIMnVm/ljdodqe6OiKSJKAPmFaDKzGaYWQ6wCFgW12YZwSQ+wEXAU+7uZlYC/A74lrs/19M4HPpqMrMTw9VjlwP/meBYV8RslyFwxckVlIzJ1lyMiOwRWcCEcyrXAo8Dq4GH3X2Vmd1kZueHze4GSs1sLXADe1d+XQtUAt82s5XhrWdO5WvAXcBaYB3w+3D7zcBZZrYGOCt8LEMkqGJm8tQ7W1m5UVWMiIAFi7FGp+rqaq+pqUl1N0aMXW2dLLjlKeZOK+Geq05IdXdEJCJmtsLdq/trp2/yy6AZm5vFV06dydPv1vHah9tT3R0RSTEFjAyqK06qYHxBjuZiREQBI4OrIDeLxafO5E/v1bHiA1UxIqOZAkYG3eUnTQ+rGH0vRmQ0U8DIoBuTk8VXT53J8jX1rPhgW6q7IyIpooCRSFx20nTKxmouRmQ0U8BIJIIqZhbL19RTs0FVjMhopICRyFx64nTKxubyQ83FiIxKChiJTH5OJtd8aibPrW3g5fdVxYiMNgoYidSlJ06nvDCXf/3DuzS3daa6OyIyhBQwEqm87EyuPb2Sl97fxtyb/sCiJS9w59NreaN2B13do/c0RSKjgc5FpnORRc7deWFdA39aU8fy9+p5e0sjAOPGZHNyZRmnVpUxv6qcKSX5Ke6piCQj2XORKWAUMEOuflcbz62t55n36nl2bR0fN7YBMLO8gFOryplfWcaJs0oZm5uV4p6KSCIKmCQoYFLP3VmzdRfPvFfHs2vreXF9A60d3WRlGPOmj2NBZRkLZpdz1JRiMjMSXdBURIaaAiYJBxwwXR2AQab+wh46elcRAAAXEklEQVRsbZ1drNiwneVr61m+po63NgXDacX52ZxSWcqCsMKZNn5MinsqMnqlRcCY2TnArUAmcJe73xy3Pxe4DzgOaAAudvcNZlYKPAIcD/ybu18bti8ElsccYirwC3e/3syuBP4/YFO47w53v6uv/h1wwLx6H/zxZjjuSph3ORROHPgxJCkNu9p4bl0Dy8MKZ8vOVgBmlBWwoKqM+ZVlnDSrlMK87BT3VGT0SHnAmFkm8B7B1SVrCS6hfIm7vx3T5uvA0e5+jZktAj7v7hebWQFwLHAkcGRPwCR4jRXAN939mTBgqntrm8gBB8z7y+HZH8C6pyAjCz7xZ1B9Ncw4FUzDOFFxd9bV7WL5mnqWrwmG01rau8jMMOYdWsL8ynIWzC7j6CnFZGVqgaRIVNIhYE4Cvuvunw4ffwvA3f8lps3jYZsXzCwL+Ago97BTfYWGmVUBTwGHursPacD0aFgHK+6B134Bu7dDaRVUfwnmXgL54w78uJKU9s5uXv1wO8vX1LF8TT1vbtqJOxTmZXHKrDIWzC5jQWU5h5ZqOE1kMCUbMFFOIkwBNsY8rgU+2Vsbd+80s51AKVCfxPEvAR7yfRPyQjM7laBy+qa7b0z81EFSOgvO/j6c/o/w9n/AK3fD49+CJ2+CIy+E478EU46LtAujWU5WBifOLOXEmaX83adhe3M7z62rZ/l7wfzNf6/6CIDppWPC4bRyTq4spUjDaSJDIsqASTRWFF8uJdOmN4uAy2Ie/wZ4wN3bzOwa4F7gjP06ZbYYWAxw6KGHJvlS/cjOg2MWBbeP3gyC5o2HYeUvYNJcOP7qIHByCgbn9SShcQU5fOboyXzm6Mm4O+vrm3l2TRA2j726iV+8+CGZGcbcaSXMryzj1NllHDO1RMNpIhEZlkNkZnYM8O/uPruX184Etrl7cV99jHSZcmsjvPEQ1CyFrW9DbnEQQMdfDeWHRfOa0qv2zm5WbtzB8jV1PLOmnjdrd9DtUJibxUmzSlkwu5wFlWVMLx2DaR5NpE/pMET2ClBlZjMIVnYtAv4yrs0y4ArgBeAi4ClPLvEuAR6I3WBmk9x9S/jwfGD1QfT94OUVwQlfgeO/DB++CDV3B/M1L/8Mps8Phs8+8VnIyklpN0eLnKwMTpgxnhNmjOdvzj6MHS3tPL+ugeVr6nnmvTr+8PbHAEwbn8+CqiBsTp5VRvEYDaeJHKiolymfB/yIYJnyUnf/ZzO7Cahx92VmlgfcT7BibBuwyN3Xh8/dABQBOcAO4OyeFWhmth44z93fiXmtfyEIls7wWF+L3Z/IkH/RsrkeXrsfau6BHR9AwQSYd1mw3LlkkIbrZMDcnQ0NLTwbVjcvrGtgV1snGQZHTy3h1Krgy55zp5WQreE0kdSvIhsOUvZN/u5uWPdkMFez5vFgW9XZwVLnyjMhI3Po+yR7dHR18/rGHTwTzt+8vjEYThubm8WJM0s5dXbw/ZsZZQUaTpNRSQGThLQ4VcyOjbDi34IvbzZvDSqZ466CYy+DseWp7ZsAsHN3By+sq9/z/ZsPt7UAMKUkPwybck6pLKVkjIY7ZXRQwCQhLQKmR2c7vPPbYFHAhuWQkQ1zLggWBRx6kr7AmUY+aGgOw6aO59c10NTaiYXDaQsqy1hQVcaxh44jJ0vDaTIyKWCSkFYBE6vu3SBoVj4AbTuh/PAgaI6+OFg8IGmjs6ub12t3snxNHc+uqee1jcF1bsbkZHLSzNLg+zdV5cwq13CajBwKmCSkbcD0aG+Gtx4N5mq2rITsAjj6C8FczaSjU907SaCxtYMX1jXs+f7NhoZgOG1ycR7zq8pYUFXOKZVljC/QcJoMXwqYJKR9wMTatAJeWRoETudumFIdVDVHfB6ydaGudLVxW8ue4bTn1tbTGA6nHTm5mAVh4MybXkJulhZ2yPChgEnCsAqYHru3B0NnNUuhYU1wzrO5XwzOgVY6K9W9kz50dTtv1O5g+Zp6nl1Tz6sfbqez28nPzuTEmeOD799UlVE5YayG0yStKWCSMCwDpoc7vP9M8AXOd34H3Z0w8/QgaA47T9eqGQZ2tXXy4rqGPSfrXF/fDMDEojxOrizlsEMKmVFWwIyyAg4tHaMqR9KGAiYJwzpgYjV9BK/eHyx3bqyFwkkw7wo47goompzq3kmSare3hHM39bz0/jbqd7Xt2ZdhMLkkf0/gzCgroKKsgJllBUwpydf51GRIKWCSMGICpkdXJ6z5Q1DVrH0SLAMOOzeYq5lxGmTol9BwsnN3Bxvqm9nQ0Mz6uuDf9+ubeb+umaa2zj3tsjONaePHMLOsgIrSAmaUFzAj/PeQwjwydKlpGWQKmCSMuICJte39vdeqaWmA8TPDa9V8EcaMT3Xv5CC4Ow3N7UHYhLcNPf82NNPa0b2nbV52RhA6cVVPRVkBpQU5muuRA6KAScKIDpgeHa2welmw1Hnji5CZC0f+ebDUeWq1vsA5wnR3Ox81trKhvpn1ceHz4bYWOrv3/rwX5mXtCZv4oTddM0f6ooBJwqgImFgfvRWsPnvjIWjfBROPCoLmqC9A7thU904i1tnVTe323bzfEAyz7Rlyq29m047dxP4qKBubs6fyia16KkoLyM/RYoPRTgGThFEXMD3amoILotUshY/fgpzCvdeqmXB4qnsnKdDa0cXGbS2sj6l4eu5vbWrbp+2k4rx9Kp6eEJo2boxOjzNKKGCSMGoDpoc7bHw5WBSw6jHoaodDTw6C5vDPQlZuqnsoaWBXW+feOZ6eeZ+w+tnR0rGnXWaGMXVcsNKtorSAmeUFe+5PLsknU4sNRgwFTBJGfcDEam4ILvFcsxS2b4AxZXDspVB9FYyrSHXvJE1tb27n/YaY4Im5tbR37WmXk5XB9PFjEi6zLi/M1WKDYUYBkwQFTALd3bD+qeC0NO/9PqhyKhcGVU3V2bpWjSTF3alrattnyK3n9sG2Fto79650K8jJDOZ3euZ6wiXWM8sKdAmENJUWAWNm5wC3ElzR8i53vzlufy5wH3Ac0ABc7O4bzKwUeAQ4Hvg3d7825jl/BCYBu8NNZ7v71t6O1Vf/FDD92FkLK+6FV++FXR9D8bTgy5vHXg6Fh6S6dzJMdXU7m3fs3rOsOvY7PrXbd9MVs9KtZEx2UPHELDjoqYAKcnW2ilRJecCYWSbwHnAWUAu8AlzSc9njsM3XgaPd/RozWwR83t0vNrMCgssoHwkcmSBg/tbd90mG3o7VVx8VMEnq6ghOR1Nzd3B6moysYI6m+mqomK+lzjJo2ju72bi9Zb+qZ0N9M5t3tu7TdkJh7j4r3CYW5TGhMJcJRbmUj82jKD9LQ28RSTZgovwT4ARgrbuvDzv0IHAB8HZMmwuA74b3HwHuMDNz92bgWTOrHMDr9Xas0TsGOFgys+GIzwW3+jXhtWp+GSwMKDss+ALnMYsgvyTVPZVhLicrg1nlY5lVvv+y+d3tXXywLVhiHTv09sTqj6nf1b5f+9ysDMoLcykvzA2CpzAIoPIwhHoejy/I0al2IhJlwEwBNsY8rgU+2Vsbd+80s51AKVDfz7HvMbMu4FHg+2GIJHUsM1sMLAY49NBDD+BtjXJlVXDOv8AZ34ZVvw6+wPnffw9Pfg+OvDCYq5l8bKp7KSNQfk4mn5hYxCcm7n/RvabWDrY2tbG1sY2tTa3UNbVR19QWbGtq5f36Zl56f9s+q956ZBiML8iNqX72BtA+4VSUS1625iAHIsqASVSbxlcTybSJ90V332RmhQQBcxnB3EtSx3L3JcASCIbI+nkt6U3OmGCV2bGXwubXgqB58xF47X6YPC+8Vs2fB+1EIlaYl01hXnbCyidWW2dXXPi0UdfYSt2unnBq450tTdTtattnLmjP6+RmUV60N3T2BFA4LDch3Fecn63hOaINmFpgWszjqcDmXtrUmlkWUAxs6+ug7r4p/LfJzH5FMBR334EcSwbJ5GPhgjvg7O/D6w8GQ2j/+Vfw+D/svVZNWVWqeylCblYmU8eNYeq4vv/w6e52trW0s7WxLQyf1iCM9oRTK6/X7mBrYxu7O7r2e35OZtzwXFwA9YRT2diRPTwXZcC8AlSZ2QxgE7AI+Mu4NsuAK4AXgIuAp/qaMwmDo8Td680sG/gM8MSBHEsikF8CJ14Dn/wqbHg2WBTw8hJ48ccw7ZPB92nGlAa3grLguzZ77pdCXonO+CxpISPDKBubS9nYvr9s7O40t3ftE0A9w3J1YTh90NDCKxu2sT3B8JwZlBbkUL7PcFxPMO0NpPLCXMbkDL9Vc1EvUz4P+BHBMuWl7v7PZnYTUOPuy8wsD7ifYMXYNmBRzKKADUARkAPsAM4GPgCeAbLDYz4B3ODuXX0dqzdaRTYEmj4Ohs3e/T001wVndm7flbitZQZneh5Ttjd0YsOoIHwcuz9TJ2UcVbq7g/9/WndA6869t7Zdwf8fRVOCW97+8zSp1t7ZTf2uMIDiAqmuad8KqTPB8NzY3Kw9YRMbQLFzRhMKcykZE/3wXMqXKQ8HCpgU6WiFlvogbJrj/m2pj7kfbt+9nV6n5nKLw+DpCZ3YgOqpkGL2Z4/RsupUcoeOlr3BsDsuKFp3huGxI3Gbtkbw7v5fJ7coDJvJUDwFiqaG/07eez+nIPr3ewC6u53tLe37zAv1LFwI5oz2Pm5u3394LjvTKB+bS3lRzKq5BKvoysbmkn2Aw3MKmCQoYIaJrs4gZPYJoHpo2bb3fnP4uOd+9/7DEQBk5fVeDe1TLWnYrlcdrQkCYWfvoRDfrruz7+NnF0Be8d5bfsm+j/PiHxcHYdFcD42bgi8IN26Cxs177zfX7f86eSVBCBVP2Vv59NwvnhqEUXZ+NJ/hIGlu69xTEcUGUl1cKG1r3n8Z93c/O4crT5lxQK+bDt+DERkcmVkwtjy4JcM9+Eu3pSE4x9o+VVH93m0tDdCwLvlhu/hqKNF80phSyErz05t0dez/S7+vQIi97d4BXW19Hz8zd99QGDMexs9IHAx72sXsO9Bhz74WknS2BYHTuAl2bgoDqOd+LWxaEfx/EC9//N4KKGE1NCWlJ4UtyM1iRm4WM8r6rsY6usLhuZgAOr5iXOT9U8DIyGO295fV+JnJPaejtfchutgKaevqAQzblcZUS2W9zyflFAxs2K67K0EY9BEI8ds6mvs+fkbW/hVD0ZS+QyH2fnZe8u9lqGTlBiE3vo+/2Dt2x1Q9m4Pg2dlTCW2ED18IPuN4BeX7Dr3FV0NFk1M+V5idmcGk4nwmFQ9tRaaAEYHgl2Jx+EshGd1dQcjsE0BxodTSEPyy2rIyuWG7MeP3BlBeMbQ3Jw6OtsZ+Omf7Vwqls8Jg6CUUYsNjtM5TZecHn1PprN7btDfvO/QWWw1tWx+snmzbGfckg7ETYkInQTU0dmJQqY8wI+8diQyFjMwgDArKkmvvHlzoLX6Ibk9ANeytoBrWBUGSM3bvL/2SQ/sPhj3zEYWaN4pKTkEwFNfXcFxr494KqHHz3mG4nZug7j1Y9/T+Q7KWEYRM/EKE2Gpo7CHD7mzmChiRoWAWLJ3NK0p+2E6Gp57/zhM+kXi/e/AHRPxChJ5q6ONV8N4foHP3vs/LyILCSXvnfhJVQwXlafXHhQJGRGQomQUVZ34JHHJE4jbuwRBs7EKEPdXQpuD0TO/8bv8FFxnZUDQp8bLsnmqooGzIhkAVMCIi6cYsXL04HiYelbiN+955vvhqqHFzcDn0xs37z/1l5gbBc8Y/wlEXRfo2FDAiIsOR2d55wMlzE7fp7g6+AxS/LLtxc7CKMWIKGBGRkSojI7j6bOEhMGXe0L/8kL+iiIiMCgoYERGJhAJGREQioYAREZFIKGBERCQSChgREYmEAkZERCKhgBERkUiM6itamlkd8MEBPr0MqB/E7gwW9Wtg1K+BS9e+qV8DczD9mu7u/V4BcFQHzMEws5pkLhk61NSvgVG/Bi5d+6Z+DcxQ9EtDZCIiEgkFjIiIREIBc+CWpLoDvVC/Bkb9Grh07Zv6NTCR90tzMCIiEglVMCIiEgkFjIiIREIB0w8zO8fM3jWztWZ2Y4L9uWb2ULj/JTOrSJN+XWlmdWa2Mrx9eYj6tdTMtprZW73sNzO7Lez3G2Y2JFdBSqJfp5nZzpjP638PQZ+mmdnTZrbazFaZ2TcStBnyzyvJfqXi88ozs5fN7PWwX99L0GbIfx6T7FdKfh7D1840s9fM7LcJ9kX7ebm7br3cgExgHTATyAFeB+bEtfk68NPw/iLgoTTp15XAHSn4zE4F5gFv9bL/POD3gAEnAi+lSb9OA347xJ/VJGBeeL8QeC/Bf8ch/7yS7FcqPi8Dxob3s4GXgBPj2qTi5zGZfqXk5zF87RuAXyX67xX156UKpm8nAGvdfb27twMPAhfEtbkAuDe8/whwpplZGvQrJdz9GWBbH00uAO7zwItAiZlNSoN+DTl33+Lur4b3m4DVwJS4ZkP+eSXZryEXfga7wofZ4S1+ldKQ/zwm2a+UMLOpwJ8Bd/XSJNLPSwHTtynAxpjHtez/g7anjbt3AjuB0jToF8CF4bDKI2Y2LeI+JSvZvqfCSeEwx+/N7IihfOFwaOJYgr9+Y6X08+qjX5CCzysc7lkJbAX+x917/byG8OcxmX5Ban4efwT8P0B3L/sj/bwUMH1LlOTxf5kk02awJfOavwEq3P1o4An2/pWSaqn4vJLxKsH5lY4Bbgf+Y6he2MzGAo8C17t7Y/zuBE8Zks+rn36l5PNy9y53nwtMBU4wsyPjmqTk80qiX0P+82hmnwG2uvuKvpol2DZon5cCpm+1QOxfGlOBzb21MbMsoJjoh2L67Ze7N7h7W/jw58BxEfcpWcl8pkPO3Rt7hjnc/b+AbDMri/p1zSyb4Jf4L9391wmapOTz6q9fqfq8Yl5/B/BH4Jy4Xan4eey3Xyn6eTwFON/MNhAMo59hZr+IaxPp56WA6dsrQJWZzTCzHIJJsGVxbZYBV4T3LwKe8nDGLJX9ihunP59gHD0dLAMuD1dHnQjsdPctqe6UmU3sGXs2sxMIfjYaIn5NA+4GVrv7D3ppNuSfVzL9StHnVW5mJeH9fGAh8E5csyH/eUymX6n4eXT3b7n7VHevIPgd8ZS7XxrXLNLPK2uwDjQSuXunmV0LPE6wcmupu68ys5uAGndfRvCDeL+ZrSVI/kVp0q/rzOx8oDPs15VR9wvAzB4gWGFUZma1wHcIJj1x958C/0WwMmot0AJclSb9ugj4mpl1AruBRUPwh8IpwGXAm+H4PcA/AIfG9CsVn1cy/UrF5zUJuNfMMgkC7WF3/22qfx6T7FdKfh4TGcrPS6eKERGRSGiITEREIqGAERGRSChgREQkEgoYERGJhAJGREQioYARiZCZdcWcQXelJTjz9UEcu8J6OTu0SDrQ92BEorU7PIWIyKijCkYkBcxsg5ndEl5H5GUzqwy3TzezJ8OTIj5pZoeG2w8xs8fCk0u+bmYnh4fKNLOfW3Adkj+E3yQXSQsKGJFo5ccNkV0cs6/R3U8A7iA46y3h/fvCkyL+Ergt3H4b8Kfw5JLzgFXh9irgTnc/AtgBXBjx+xFJmr7JLxIhM9vl7mMTbN8AnOHu68MTS37k7qVmVg9McveOcPsWdy8zszpgaswJE3tOpf8/7l4VPv57INvdvx/9OxPpnyoYkdTxXu731iaRtpj7XWheVdKIAkYkdS6O+feF8P7z7D3h4BeBZ8P7TwJfgz0Xtyoaqk6KHCj9tSMSrfyYMxID/Le79yxVzjWzlwj+0Lsk3HYdsNTM/g6oY+/Zk78BLDGzqwkqla8BKb/MgUhfNAcjkgLhHEy1u9enui8iUdEQmYiIREIVjIiIREIVjIiIREIBIyIikVDAiIhIJBQwIiISCQWMiIhE4v8HzrwMtMc9rEMAAAAASUVORK5CYII=\n",
426 | "text/plain": [
427 | ""
428 | ]
429 | },
430 | "metadata": {
431 | "needs_background": "light"
432 | },
433 | "output_type": "display_data"
434 | }
435 | ],
436 | "source": [
437 | "import matplotlib.pyplot as plt\n",
438 | "# 绘制训练 & 验证的损失值\n",
439 | "plt.plot(history.history['loss'])\n",
440 | "plt.plot(history.history['val_loss'])\n",
441 | "plt.title('Model loss')\n",
442 | "plt.ylabel('Loss')\n",
443 | "plt.xlabel('Epoch')\n",
444 | "plt.legend(['Train', 'Test'], loc='upper left')\n",
445 | "plt.show()"
446 | ]
447 | },
448 | {
449 | "cell_type": "markdown",
450 | "metadata": {},
451 | "source": [
452 | "## 模型的预测功能"
453 | ]
454 | },
455 | {
456 | "cell_type": "code",
457 | "execution_count": 116,
458 | "metadata": {},
459 | "outputs": [
460 | {
461 | "name": "stdout",
462 | "output_type": "stream",
463 | "text": [
464 | "[[ 9. 36.39809359]\n",
465 | " [ 2. 90.78814453]\n",
466 | " [ 1. 58.21489228]\n",
467 | " ...\n",
468 | " [ 8. 225.04189952]\n",
469 | " [ 1. 58.29625367]\n",
470 | " [ 5. 118.09064091]]\n"
471 | ]
472 | }
473 | ],
474 | "source": [
475 | "# 预测\n",
476 | "y_new = model.predict(x_test)\n",
477 | "# 反归一化\n",
478 | "min_max_scaler.fit(y_test_pd)\n",
479 | "y_pred_pd = pd.DataFrame({'label':list(y_test_label), 'price':list(y_new)})\n",
480 | "y_new = min_max_scaler.inverse_transform(y_pred_pd)"
481 | ]
482 | },
483 | {
484 | "cell_type": "markdown",
485 | "metadata": {},
486 | "source": [
487 | "## 真实值与预测值"
488 | ]
489 | },
490 | {
491 | "cell_type": "code",
492 | "execution_count": 120,
493 | "metadata": {},
494 | "outputs": [
495 | {
496 | "name": "stdout",
497 | "output_type": "stream",
498 | "text": [
499 | " label price true_price\n",
500 | "0 9 32.51 32.51\n",
501 | "1 2 80.91 80.91\n",
502 | "2 1 56.77 56.77\n",
503 | "3 1 56.36 56.36\n",
504 | "4 6 181.70 181.70\n",
505 | "5 1 58.06 58.06\n",
506 | "6 4 127.31 127.31\n",
507 | "7 6 180.50 180.50\n",
508 | "8 5 27.21 27.21\n",
509 | "9 7 147.69 147.69\n",
510 | "10 4 123.40 123.40\n",
511 | "11 5 28.70 28.70\n",
512 | "12 7 153.20 153.20\n",
513 | "13 3 99.74 99.74\n",
514 | "14 4 117.12 117.12\n",
515 | "15 1 56.82 56.82\n",
516 | "16 2 84.18 84.18\n",
517 | "17 4 124.43 124.43\n",
518 | "18 8 221.80 221.80\n",
519 | "19 0 50.01 50.01\n"
520 | ]
521 | }
522 | ],
523 | "source": [
524 | "y_test_pd['true_price'] = pd.DataFrame(y_test_pd['price'])\n",
525 | "print(y_test_pd.head(20))"
526 | ]
527 | }
528 | ],
529 | "metadata": {
530 | "kernelspec": {
531 | "display_name": "Python 3",
532 | "language": "python",
533 | "name": "python3"
534 | },
535 | "language_info": {
536 | "codemirror_mode": {
537 | "name": "ipython",
538 | "version": 3
539 | },
540 | "file_extension": ".py",
541 | "mimetype": "text/x-python",
542 | "name": "python",
543 | "nbconvert_exporter": "python",
544 | "pygments_lexer": "ipython3",
545 | "version": "3.7.2"
546 | }
547 | },
548 | "nbformat": 4,
549 | "nbformat_minor": 2
550 | }
551 |
--------------------------------------------------------------------------------