├── .gitignore ├── B3J060370-zhinengjisuanxitong.pdf ├── LICENSE ├── README.md ├── exp_2_1_mnist_mlp ├── .gitignore ├── main_exp_2_1.py ├── readme.txt ├── requirements.txt └── stu_upload │ ├── __init__.py │ ├── layers_1.py │ └── mnist_mlp_cpu.py ├── exp_2_2_mnist_mlp_dlp ├── .gitignore ├── main_exp_2_2.py ├── readme.txt ├── requirements.txt ├── stu_upload │ ├── __init__.py │ ├── layers_1.py │ ├── mnist_mlp_cpu.py │ ├── mnist_mlp_demo.py │ └── weight.npy └── test_cpu.py ├── exp_3_1_vgg ├── .gitignore ├── main_exp_3_1.py ├── pool5_dump.npy ├── readme.txt ├── requirements.txt └── stu_upload │ ├── __init__.py │ ├── layers_1.py │ ├── layers_2.py │ └── vgg_cpu.py ├── exp_3_2_vgg_dlp ├── .gitignore ├── file_list ├── main_exp_3_2.py ├── readme.txt ├── requirements.txt ├── stu_upload │ ├── __init__.py │ └── vgg19_demo.py └── synset_words.txt ├── exp_3_3_style_transfer ├── .gitignore ├── main_exp_3_3.py ├── output │ ├── output_0.jpg │ ├── output_1.jpg │ ├── output_2.jpg │ ├── output_3.jpg │ └── output_4.jpg ├── readme.txt ├── requirements.txt └── stu_upload │ ├── __init__.py │ ├── exp_3_3_style_transfer.py │ ├── layers_1.py │ ├── layers_2.py │ └── layers_3.py ├── mnist_data ├── t10k-images-idx3-ubyte ├── t10k-labels-idx1-ubyte ├── train-images-idx3-ubyte └── train-labels-idx1-ubyte ├── 智能计算实验PDF ├── 2.1实验PDF.pdf ├── 2.2实验报告.pdf ├── 3.1实验报告.pdf ├── 3.2实验报告.pdf └── 3.3实验报告.pdf └── 课件 ├── ch1.pdf ├── ch2.pdf ├── ch3.pdf ├── ch4.pdf ├── ch5.pdf ├── ch6.pdf ├── ch7.pdf └── ch8.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /B3J060370-zhinengjisuanxitong.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/B3J060370-zhinengjisuanxitong.pdf -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 L. Jiang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI-Computing-Systems 2 | 3 | 智能计算系统 4 | 5 | http://novel.ict.ac.cn/aics/ 6 | -------------------------------------------------------------------------------- /exp_2_1_mnist_mlp/.gitignore: -------------------------------------------------------------------------------- 1 | /.idea -------------------------------------------------------------------------------- /exp_2_1_mnist_mlp/main_exp_2_1.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from stu_upload.layers_1 import FullyConnectedLayer, ReLULayer, SoftmaxLossLayer 4 | from stu_upload.mnist_mlp_cpu import MNIST_MLP, build_mnist_mlp 5 | import numpy as np 6 | import struct 7 | import time 8 | 9 | 10 | def evaluate(mlp): 11 | pred_results = np.zeros([mlp.test_data.shape[0]]) 12 | for idx in range(mlp.test_data.shape[0] / mlp.batch_size): 13 | batch_images = mlp.test_data[idx * mlp.batch_size:(idx + 1) * mlp.batch_size, :-1] 14 | prob = mlp.forward(batch_images) 15 | pred_labels = np.argmax(prob, axis=1) 16 | pred_results[idx * mlp.batch_size:(idx + 1) * mlp.batch_size] = pred_labels 17 | if mlp.test_data.shape[0] % mlp.batch_size > 0: 18 | last_batch = mlp.test_data.shape[0] / mlp.batch_size * mlp.batch_size 19 | batch_images = mlp.test_data[-last_batch:, :-1] 20 | prob = mlp.forward(batch_images) 21 | pred_labels = np.argmax(prob, axis=1) 22 | pred_results[-last_batch:] = pred_labels 23 | accuracy = np.mean(pred_results == mlp.test_data[:, -1]) 24 | print('Accuracy in test set: %f' % accuracy) 25 | 26 | 27 | if __name__ == '__main__': 28 | mlp = build_mnist_mlp() 29 | evaluate(mlp) 30 | -------------------------------------------------------------------------------- /exp_2_1_mnist_mlp/readme.txt: -------------------------------------------------------------------------------- 1 | 补全 stu_upload 中的 layer_1.py、mnist_mlp_cpu.py 文件,执行 main_exp_2_1.py 运行实验 -------------------------------------------------------------------------------- /exp_2_1_mnist_mlp/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy~=1.19.4 -------------------------------------------------------------------------------- /exp_2_1_mnist_mlp/stu_upload/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_2_1_mnist_mlp/stu_upload/__init__.py -------------------------------------------------------------------------------- /exp_2_1_mnist_mlp/stu_upload/layers_1.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import time 6 | 7 | 8 | class FullyConnectedLayer(object): 9 | def __init__(self, num_input, num_output): # 全连接层初始化 10 | self.num_input = num_input 11 | self.num_output = num_output 12 | print('\tFully connected layer with input %d, output %d.' % (self.num_input, self.num_output)) 13 | 14 | def init_param(self, std=0.01): # 参数初始化 15 | self.weight = np.random.normal(loc=0.0, scale=std, size=(self.num_input, self.num_output)) 16 | self.bias = np.zeros([1, self.num_output]) 17 | 18 | def forward(self, input): # 前向传播计算 19 | start_time = time.time() 20 | self.input = input 21 | # TODO:全连接层的前向传播,计算输出结果 22 | self.output = np.dot(self.input, self.weight) + self.bias 23 | return self.output 24 | 25 | def backward(self, top_diff): # 反向传播的计算 26 | # TODO:全连接层的反向传播,计算参数梯度和本层损失 27 | self.d_weight = np.dot(self.input.T, top_diff) 28 | self.d_bias = np.dot(np.ones([1, self.input.shape[0]]), top_diff) 29 | bottom_diff = np.dot(top_diff, self.weight.T) 30 | return bottom_diff 31 | 32 | def update_param(self, lr): # 参数更新 33 | # TODO:对全连接层参数利用参数进行更新 34 | self.weight = self.weight - lr * self.d_weight 35 | self.bias = self.bias - lr * self.d_bias 36 | 37 | def load_param(self, weight, bias): # 参数加载 38 | assert self.weight.shape == weight.shape 39 | assert self.bias.shape == bias.shape 40 | self.weight = weight 41 | self.bias = bias 42 | 43 | def save_param(self): # 参数保存 44 | return self.weight, self.bias 45 | 46 | 47 | class ReLULayer(object): 48 | def __init__(self): 49 | print('\tReLU layer.') 50 | 51 | def forward(self, input): # 前向传播的计算 52 | start_time = time.time() 53 | self.input = input 54 | # TODO:ReLU层的前向传播,计算输出结果 55 | output = np.maximum(0, self.input) 56 | return output 57 | 58 | def backward(self, top_diff): # 反向传播的计算 59 | # TODO:ReLU层的反向传播,计算本层损失 60 | bottom_diff = top_diff.copy() 61 | return bottom_diff 62 | 63 | 64 | class SoftmaxLossLayer(object): 65 | def __init__(self): 66 | print('\tSoftmax loss layer.') 67 | 68 | def forward(self, input): # 前向传播的计算 69 | # TODO:softmax 损失层的前向传播,计算输出结果 70 | input_max = np.max(input, axis=1, keepdims=True) 71 | input_exp = np.exp(input - input_max) 72 | self.prob = input_exp / np.sum(input_exp, axis=1, keepdims=True) 73 | return self.prob 74 | 75 | def get_loss(self, label): # 计算损失 76 | self.batch_size = self.prob.shape[0] 77 | self.label_onehot = np.zeros_like(self.prob) 78 | self.label_onehot[np.arange(self.batch_size), label] = 1.0 79 | loss = -np.sum(np.log(self.prob) * self.label_onehot) / self.batch_size 80 | return loss 81 | 82 | def backward(self): # 反向传播的计算 83 | # TODO:softmax 损失层的反向传播,计算本层损失 84 | bottom_diff = (self.prob - self.label_onehot) / self.batch_size 85 | return bottom_diff 86 | -------------------------------------------------------------------------------- /exp_2_1_mnist_mlp/stu_upload/mnist_mlp_cpu.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import time 6 | 7 | from stu_upload.layers_1 import FullyConnectedLayer, ReLULayer, SoftmaxLossLayer 8 | 9 | MNIST_DIR = "../mnist_data" 10 | TRAIN_DATA = "train-images-idx3-ubyte" 11 | TRAIN_LABEL = "train-labels-idx1-ubyte" 12 | TEST_DATA = "t10k-images-idx3-ubyte" 13 | TEST_LABEL = "t10k-labels-idx1-ubyte" 14 | 15 | 16 | def show_matrix(mat, name): 17 | # print(name + str(mat.shape) + ' mean %f, std %f' % (mat.mean(), mat.std())) 18 | pass 19 | 20 | 21 | class MNIST_MLP(object): 22 | def __init__(self, batch_size=100, input_size=784, hidden1=32, hidden2=16, out_classes=10, lr=0.01, max_epoch=1, 23 | print_iter=100): 24 | self.batch_size = batch_size 25 | self.input_size = input_size 26 | self.hidden1 = hidden1 27 | self.hidden2 = hidden2 28 | self.out_classes = out_classes 29 | self.lr = lr 30 | self.max_epoch = max_epoch 31 | self.print_iter = print_iter 32 | 33 | def load_mnist(self, file_dir, is_images='True'): 34 | # Read binary data 35 | bin_file = open(file_dir, 'rb') 36 | bin_data = bin_file.read() 37 | bin_file.close() 38 | # Analysis file header 39 | if is_images: 40 | # Read images 41 | fmt_header = '>iiii' 42 | magic, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, 0) 43 | else: 44 | # Read labels 45 | fmt_header = '>ii' 46 | magic, num_images = struct.unpack_from(fmt_header, bin_data, 0) 47 | num_rows, num_cols = 1, 1 48 | data_size = num_images * num_rows * num_cols 49 | mat_data = struct.unpack_from('>' + str(data_size) + 'B', bin_data, struct.calcsize(fmt_header)) 50 | mat_data = np.reshape(mat_data, [num_images, num_rows * num_cols]) 51 | print('Load images from %s, number: %d, data shape: %s' % (file_dir, num_images, str(mat_data.shape))) 52 | return mat_data 53 | 54 | def load_data(self): 55 | # TODO: 调用函数 load_mnist 读取和预处理 MNIST 中训练数据和测试数据的图像和标记 56 | print('Loading MNIST data from files...') 57 | train_images = self.load_mnist(os.path.join(MNIST_DIR, TRAIN_DATA), 'True') 58 | train_labels = self.load_mnist(os.path.join(MNIST_DIR, TRAIN_LABEL), 'True') 59 | test_images = self.load_mnist(os.path.join(MNIST_DIR, TEST_DATA), 'True') 60 | test_labels = self.load_mnist(os.path.join(MNIST_DIR, TEST_LABEL), 'True') 61 | self.train_data = np.append(train_images, train_labels, axis=1) 62 | self.test_data = np.append(test_images, test_labels, axis=1) 63 | # self.test_data = np.concatenate((self.train_data, self.test_data), axis=0) 64 | 65 | def shuffle_data(self): 66 | print('Randomly shuffle MNIST data...') 67 | np.random.shuffle(self.train_data) 68 | 69 | def build_model(self): # 建立网络结构 70 | # TODO:建立三层神经网络结构 71 | print('Building multi-layer perception model...') 72 | self.fc1 = FullyConnectedLayer(self.input_size, self.hidden1) 73 | self.relu1 = ReLULayer() 74 | self.fc2 = FullyConnectedLayer(self.hidden1, self.hidden2) 75 | self.relu2 = ReLULayer() 76 | self.fc3 = FullyConnectedLayer(self.hidden2, self.out_classes) 77 | self.softmax = SoftmaxLossLayer() 78 | self.update_layer_list = [self.fc1, self.fc2, self.fc3] 79 | 80 | def init_model(self): 81 | print('Initializing parameters of each layer in MLP...') 82 | for layer in self.update_layer_list: 83 | layer.init_param() 84 | 85 | def load_model(self, param_dir): 86 | print('Loading parameters from file ' + param_dir) 87 | params = np.load(param_dir).item() 88 | self.fc1.load_param(params['w1'], params['b1']) 89 | self.fc2.load_param(params['w2'], params['b2']) 90 | self.fc3.load_param(params['w3'], params['b3']) 91 | 92 | def save_model(self, param_dir): 93 | print('Saving parameters to file ' + param_dir) 94 | params = {} 95 | params['w1'], params['b1'] = self.fc1.save_param() 96 | params['w2'], params['b2'] = self.fc2.save_param() 97 | params['w3'], params['b3'] = self.fc3.save_param() 98 | np.save(param_dir, params) 99 | 100 | def forward(self, input): # 神经网络的前向传播 101 | # TODO:神经网络的前向传播 102 | h1 = self.fc1.forward(input) 103 | h1 = self.relu1.forward(h1) 104 | h2 = self.fc2.forward(h1) 105 | h2 = self.relu2.forward(h2) 106 | h3 = self.fc3.forward(h2) 107 | prob = self.softmax.forward(h3) 108 | return prob 109 | 110 | def backward(self): # 神经网络的反向传播 111 | # TODO:神经网络的反向传播 112 | dloss = self.softmax.backward() 113 | dh3 = self.fc3.backward(dloss) 114 | dh2 = self.relu2.backward(dh3) 115 | dh2 = self.fc2.backward(dh2) 116 | dh1 = self.relu1.backward(dh2) 117 | dh1 = self.fc1.backward(dh1) 118 | 119 | def update(self, lr): 120 | for layer in self.update_layer_list: 121 | layer.update_param(lr) 122 | 123 | def train(self): 124 | max_batch = self.train_data.shape[0] / self.batch_size 125 | print('Start training...') 126 | for idx_epoch in range(self.max_epoch): 127 | self.shuffle_data() 128 | for idx_batch in range(max_batch): 129 | batch_images = self.train_data[idx_batch * self.batch_size:(idx_batch + 1) * self.batch_size, :-1] 130 | batch_labels = self.train_data[idx_batch * self.batch_size:(idx_batch + 1) * self.batch_size, -1] 131 | prob = self.forward(batch_images) 132 | loss = self.softmax.get_loss(batch_labels) 133 | self.backward() 134 | self.update(self.lr) 135 | if idx_batch % self.print_iter == 0: 136 | print('Epoch %d, iter %d, loss: %.6f' % (idx_epoch, idx_batch, loss)) 137 | 138 | def evaluate(self): 139 | pred_results = np.zeros([self.test_data.shape[0]]) 140 | for idx in range(self.test_data.shape[0] / self.batch_size): 141 | batch_images = self.test_data[idx * self.batch_size:(idx + 1) * self.batch_size, :-1] 142 | start = time.time() 143 | prob = self.forward(batch_images) 144 | end = time.time() 145 | print("inferencing time: %f" % (end - start)) 146 | pred_labels = np.argmax(prob, axis=1) 147 | pred_results[idx * self.batch_size:(idx + 1) * self.batch_size] = pred_labels 148 | accuracy = np.mean(pred_results == self.test_data[:, -1]) 149 | print('Accuracy in test set: %f' % accuracy) 150 | 151 | 152 | def build_mnist_mlp(param_dir='weight.npy'): 153 | h1, h2, e = 32, 16, 10 154 | mlp = MNIST_MLP(hidden1=h1, hidden2=h2, max_epoch=e) 155 | mlp.load_data() 156 | mlp.build_model() 157 | mlp.init_model() 158 | mlp.train() 159 | mlp.save_model('mlp-%d-%d-%depoch.npy' % (h1, h2, e)) 160 | # mlp.load_model('mlp-%d-%d-%depoch.npy' % (h1, h2, e)) 161 | return mlp 162 | 163 | 164 | if __name__ == '__main__': 165 | mlp = build_mnist_mlp() 166 | mlp.evaluate() 167 | -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/.gitignore: -------------------------------------------------------------------------------- 1 | /.idea -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/main_exp_2_2.py: -------------------------------------------------------------------------------- 1 | from stu_upload.mnist_mlp_demo import MNIST_MLP, HIDDEN1, HIDDEN2, OUT 2 | import test_cpu 3 | import time 4 | import numpy as np 5 | 6 | 7 | def evaluate(mlp): 8 | pred_results = np.zeros([mlp.test_data.shape[0]]) 9 | for idx in range(mlp.test_data.shape[0] / mlp.batch_size): 10 | # print("batch %d"%idx) 11 | batch_images = mlp.test_data[idx * mlp.batch_size:(idx + 1) * mlp.batch_size, :-1] 12 | data = batch_images.flatten().tolist() 13 | mlp.net.setInputData(data) 14 | start = time.time() 15 | mlp.forward() 16 | end = time.time() 17 | print('inferencing time: %f' % (end - start)) 18 | prob = mlp.net.getOutputData() 19 | prob = np.array(prob).reshape((mlp.batch_size, mlp.out_classes)) 20 | pred_labels = np.argmax(prob, axis=1) 21 | pred_results[idx * mlp.batch_size:(idx + 1) * mlp.batch_size] = pred_labels 22 | if mlp.test_data.shape[0] % mlp.batch_size > 0: 23 | last_batch = mlp.test_data.shape[0] / mlp.batch_size * mlp.batch_size 24 | batch_images = mlp.test_data[-last_batch:, :-1] 25 | data = batch_images.flatten().tolist() 26 | mlp.net.setInputData(data) 27 | mlp.forward() 28 | prob = mlp.net.getOutputData() 29 | pred_labels = np.argmax(prob, axis=1) 30 | pred_results[-last_batch:] = pred_labels 31 | accuracy = np.mean(pred_results == mlp.test_data[:, -1]) 32 | print('Accuracy in test set: %f' % accuracy) 33 | 34 | 35 | def run_mnist(): 36 | batch_size = 10000 37 | h1, h2, c = HIDDEN1, HIDDEN2, OUT 38 | mlp = MNIST_MLP() 39 | mlp.build_model(batch_size=batch_size, hidden1=h1, hidden2=h2, out_classes=c, 40 | quant_param_path='../mnist_mlp_quant_param.npz') 41 | model_path = 'stu_upload/weight.npy' 42 | test_data = '../mnist_data/t10k-images-idx3-ubyte' 43 | test_label = '../mnist_data/t10k-labels-idx1-ubyte' 44 | mlp.load_data(test_data, test_label) 45 | mlp.load_model(model_path) 46 | 47 | for i in range(10): 48 | evaluate(mlp) 49 | 50 | 51 | if __name__ == '__main__': 52 | print('-------- TEST CPU --------') 53 | test_cpu.run_test() 54 | print('-------- TEST DLP --------') 55 | run_mnist() 56 | -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/readme.txt: -------------------------------------------------------------------------------- 1 | 补全 stu_upload 中的 mnist_mlp_demo.py 文件, 并复制实验2-1中实现的layer_1.py、mnist_mlp_cpu.py 以及训练得到的参数复制到 stu_upload 目录下,执行 main_exp_2_2.py 运行实验。 2 | 3 | 注意: 4 | 上传的实验2-1中训练生成的模型参数,如 mlp-32-16-10epoch.npy,需要修改名称为 weight.npy,否则无法识别。 5 | 上传的 mnist mlp 网络的 cpu 实现,即实验2-1中完成的 mnist_mlp_cpu.py 文件,需要做出以下修改: 6 | 7 | 修改 build_mnist_mlp() 函数中的内容: 8 | 1. 修改 batch_size. 9 | 将 mlp = MNIST_MLP(hidden1=h1, hidden2=h2, max_epoch=e) 10 | 修改为 mlp = MNIST_MLP(batch_size=10000, hidden1=h1, hidden2=h2, max_epoch=e) 11 | 12 | 2. 注释掉训练的函数 13 | mlp.train() 14 | 和 15 | mlp.save_model('mlp-%d-%d-%depoch.npy' % (h1, h2, e)) 16 | 两句,并将 17 | mlp.load_model('mlp-%d-%d-%depoch.npy' % (h1, h2, e)) 18 | 取消注释,同时修改函数参数为 param_dir 19 | mlp.load_model(param_dir) -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy~=1.19.4 -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/stu_upload/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_2_2_mnist_mlp_dlp/stu_upload/__init__.py -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/stu_upload/layers_1.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import time 6 | 7 | 8 | class FullyConnectedLayer(object): 9 | def __init__(self, num_input, num_output): # 全连接层初始化 10 | self.num_input = num_input 11 | self.num_output = num_output 12 | print('\tFully connected layer with input %d, output %d.' % (self.num_input, self.num_output)) 13 | 14 | def init_param(self, std=0.01): # 参数初始化 15 | self.weight = np.random.normal(loc=0.0, scale=std, size=(self.num_input, self.num_output)) 16 | self.bias = np.zeros([1, self.num_output]) 17 | 18 | def forward(self, input): # 前向传播计算 19 | start_time = time.time() 20 | self.input = input 21 | # TODO:全连接层的前向传播,计算输出结果 22 | self.output = np.dot(self.input, self.weight) + self.bias 23 | return self.output 24 | 25 | def backward(self, top_diff): # 反向传播的计算 26 | # TODO:全连接层的反向传播,计算参数梯度和本层损失 27 | self.d_weight = np.dot(self.input.T, top_diff) 28 | self.d_bias = np.dot(np.ones([1, self.input.shape[0]]), top_diff) 29 | bottom_diff = np.dot(top_diff, self.weight.T) 30 | return bottom_diff 31 | 32 | def update_param(self, lr): # 参数更新 33 | # TODO:对全连接层参数利用参数进行更新 34 | self.weight = self.weight - lr * self.d_weight 35 | self.bias = self.bias - lr * self.d_bias 36 | 37 | def load_param(self, weight, bias): # 参数加载 38 | assert self.weight.shape == weight.shape 39 | assert self.bias.shape == bias.shape 40 | self.weight = weight 41 | self.bias = bias 42 | 43 | def save_param(self): # 参数保存 44 | return self.weight, self.bias 45 | 46 | 47 | class ReLULayer(object): 48 | def __init__(self): 49 | print('\tReLU layer.') 50 | 51 | def forward(self, input): # 前向传播的计算 52 | start_time = time.time() 53 | self.input = input 54 | # TODO:ReLU层的前向传播,计算输出结果 55 | output = np.maximum(0, self.input) 56 | return output 57 | 58 | def backward(self, top_diff): # 反向传播的计算 59 | # TODO:ReLU层的反向传播,计算本层损失 60 | bottom_diff = top_diff.copy() 61 | return bottom_diff 62 | 63 | 64 | class SoftmaxLossLayer(object): 65 | def __init__(self): 66 | print('\tSoftmax loss layer.') 67 | 68 | def forward(self, input): # 前向传播的计算 69 | # TODO:softmax 损失层的前向传播,计算输出结果 70 | input_max = np.max(input, axis=1, keepdims=True) 71 | input_exp = np.exp(input - input_max) 72 | self.prob = input_exp / np.sum(input_exp, axis=1, keepdims=True) 73 | return self.prob 74 | 75 | def get_loss(self, label): # 计算损失 76 | self.batch_size = self.prob.shape[0] 77 | self.label_onehot = np.zeros_like(self.prob) 78 | self.label_onehot[np.arange(self.batch_size), label] = 1.0 79 | loss = -np.sum(np.log(self.prob) * self.label_onehot) / self.batch_size 80 | return loss 81 | 82 | def backward(self): # 反向传播的计算 83 | # TODO:softmax 损失层的反向传播,计算本层损失 84 | bottom_diff = (self.prob - self.label_onehot) / self.batch_size 85 | return bottom_diff 86 | -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/stu_upload/mnist_mlp_cpu.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import time 6 | 7 | from stu_upload.layers_1 import FullyConnectedLayer, ReLULayer, SoftmaxLossLayer 8 | 9 | MNIST_DIR = "../mnist_data" 10 | TRAIN_DATA = "train-images-idx3-ubyte" 11 | TRAIN_LABEL = "train-labels-idx1-ubyte" 12 | TEST_DATA = "t10k-images-idx3-ubyte" 13 | TEST_LABEL = "t10k-labels-idx1-ubyte" 14 | 15 | 16 | def show_matrix(mat, name): 17 | # print(name + str(mat.shape) + ' mean %f, std %f' % (mat.mean(), mat.std())) 18 | pass 19 | 20 | 21 | class MNIST_MLP(object): 22 | def __init__(self, batch_size=100, input_size=784, hidden1=32, hidden2=16, out_classes=10, lr=0.01, max_epoch=1, 23 | print_iter=100): 24 | self.batch_size = batch_size 25 | self.input_size = input_size 26 | self.hidden1 = hidden1 27 | self.hidden2 = hidden2 28 | self.out_classes = out_classes 29 | self.lr = lr 30 | self.max_epoch = max_epoch 31 | self.print_iter = print_iter 32 | 33 | def load_mnist(self, file_dir, is_images='True'): 34 | # Read binary data 35 | bin_file = open(file_dir, 'rb') 36 | bin_data = bin_file.read() 37 | bin_file.close() 38 | # Analysis file header 39 | if is_images: 40 | # Read images 41 | fmt_header = '>iiii' 42 | magic, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, 0) 43 | else: 44 | # Read labels 45 | fmt_header = '>ii' 46 | magic, num_images = struct.unpack_from(fmt_header, bin_data, 0) 47 | num_rows, num_cols = 1, 1 48 | data_size = num_images * num_rows * num_cols 49 | mat_data = struct.unpack_from('>' + str(data_size) + 'B', bin_data, struct.calcsize(fmt_header)) 50 | mat_data = np.reshape(mat_data, [num_images, num_rows * num_cols]) 51 | print('Load images from %s, number: %d, data shape: %s' % (file_dir, num_images, str(mat_data.shape))) 52 | return mat_data 53 | 54 | def load_data(self): 55 | # TODO: 调用函数 load_mnist 读取和预处理 MNIST 中训练数据和测试数据的图像和标记 56 | print('Loading MNIST data from files...') 57 | train_images = self.load_mnist(os.path.join(MNIST_DIR, TRAIN_DATA), True) 58 | train_labels = self.load_mnist(os.path.join(MNIST_DIR, TRAIN_LABEL), False) 59 | test_images = self.load_mnist(os.path.join(MNIST_DIR, TEST_DATA), True) 60 | test_labels = self.load_mnist(os.path.join(MNIST_DIR, TEST_LABEL), False) 61 | self.train_data = np.append(train_images, train_labels, axis=1) 62 | self.test_data = np.append(test_images, test_labels, axis=1) 63 | # self.test_data = np.concatenate((self.train_data, self.test_data), axis=0) 64 | 65 | def shuffle_data(self): 66 | print('Randomly shuffle MNIST data...') 67 | np.random.shuffle(self.train_data) 68 | 69 | def build_model(self): # 建立网络结构 70 | # TODO:建立三层神经网络结构 71 | print('Building multi-layer perception model...') 72 | self.fc1 = FullyConnectedLayer(self.input_size, self.hidden1) 73 | self.relu1 = ReLULayer() 74 | self.fc2 = FullyConnectedLayer(self.hidden1, self.hidden2) 75 | self.relu2 = ReLULayer() 76 | self.fc3 = FullyConnectedLayer(self.hidden2, self.out_classes) 77 | self.softmax = SoftmaxLossLayer() 78 | self.update_layer_list = [self.fc1, self.fc2, self.fc3] 79 | 80 | def init_model(self): 81 | print('Initializing parameters of each layer in MLP...') 82 | for layer in self.update_layer_list: 83 | layer.init_param() 84 | 85 | def load_model(self, param_dir): 86 | print('Loading parameters from file ' + param_dir) 87 | params = np.load(param_dir).item() 88 | self.fc1.load_param(params['w1'], params['b1']) 89 | self.fc2.load_param(params['w2'], params['b2']) 90 | self.fc3.load_param(params['w3'], params['b3']) 91 | 92 | def save_model(self, param_dir): 93 | print('Saving parameters to file ' + param_dir) 94 | params = {} 95 | params['w1'], params['b1'] = self.fc1.save_param() 96 | params['w2'], params['b2'] = self.fc2.save_param() 97 | params['w3'], params['b3'] = self.fc3.save_param() 98 | np.save(param_dir, params) 99 | 100 | def forward(self, input): # 神经网络的前向传播 101 | # TODO:神经网络的前向传播 102 | h1 = self.fc1.forward(input) 103 | h1 = self.relu1.forward(h1) 104 | h2 = self.fc2.forward(h1) 105 | h2 = self.relu2.forward(h2) 106 | h3 = self.fc3.forward(h2) 107 | prob = self.softmax.forward(h3) 108 | return prob 109 | 110 | def backward(self): # 神经网络的反向传播 111 | # TODO:神经网络的反向传播 112 | dloss = self.softmax.backward() 113 | dh3 = self.fc3.backward(dloss) 114 | dh2 = self.relu2.backward(dh3) 115 | dh2 = self.fc2.backward(dh2) 116 | dh1 = self.relu1.backward(dh2) 117 | dh1 = self.fc1.backward(dh1) 118 | 119 | def update(self, lr): 120 | for layer in self.update_layer_list: 121 | layer.update_param(lr) 122 | 123 | def train(self): 124 | max_batch = self.train_data.shape[0] / self.batch_size 125 | print('Start training...') 126 | for idx_epoch in range(self.max_epoch): 127 | self.shuffle_data() 128 | for idx_batch in range(max_batch): 129 | batch_images = self.train_data[idx_batch * self.batch_size:(idx_batch + 1) * self.batch_size, :-1] 130 | batch_labels = self.train_data[idx_batch * self.batch_size:(idx_batch + 1) * self.batch_size, -1] 131 | prob = self.forward(batch_images) 132 | loss = self.softmax.get_loss(batch_labels) 133 | self.backward() 134 | self.update(self.lr) 135 | if idx_batch % self.print_iter == 0: 136 | print('Epoch %d, iter %d, loss: %.6f' % (idx_epoch, idx_batch, loss)) 137 | 138 | def evaluate(self): 139 | pred_results = np.zeros([self.test_data.shape[0]]) 140 | for idx in range(self.test_data.shape[0] / self.batch_size): 141 | batch_images = self.test_data[idx * self.batch_size:(idx + 1) * self.batch_size, :-1] 142 | start = time.time() 143 | prob = self.forward(batch_images) 144 | end = time.time() 145 | print("inferencing time: %f" % (end - start)) 146 | pred_labels = np.argmax(prob, axis=1) 147 | pred_results[idx * self.batch_size:(idx + 1) * self.batch_size] = pred_labels 148 | accuracy = np.mean(pred_results == self.test_data[:, -1]) 149 | print('Accuracy in test set: %f' % accuracy) 150 | 151 | 152 | def build_mnist_mlp(param_dir='weight.npy'): 153 | h1, h2, e = 32, 16, 10 154 | mlp = MNIST_MLP(hidden1=h1, hidden2=h2, max_epoch=e) 155 | mlp.load_data() 156 | mlp.build_model() 157 | mlp.init_model() 158 | # mlp.train() 159 | # mlp.save_model('mlp-%d-%d-%depoch.npy' % (h1, h2, e)) 160 | mlp.load_model(param_dir) 161 | return mlp 162 | 163 | 164 | if __name__ == '__main__': 165 | mlp = build_mnist_mlp() 166 | mlp.evaluate() 167 | -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/stu_upload/mnist_mlp_demo.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import pycnml 3 | import time 4 | import numpy as np 5 | import struct 6 | import os 7 | 8 | 9 | class MNIST_MLP(object): 10 | def __init__(self): 11 | # set up net 12 | self.net = pycnml.CnmlNet(16) 13 | self.input_quant_params = [] 14 | self.filter_quant_params = [] 15 | 16 | def build_model(self, batch_size=100, input_size=784, 17 | hidden1=100, hidden2=100, out_classes=10, 18 | quant_param_path='../../mnist_mlp_quant_param.npz'): # 建立网络结构 19 | self.batch_size = batch_size 20 | self.out_classes = out_classes 21 | # 在创建全连层时需要输入对应的量化参数,为了简化实验,本实验已经提供了网络的量化参数, 22 | # 读取到了 input_quant_params 和 filter_quant_params 中,搭建网络时,只需要按顺序 23 | # 为每个全连接层输入 input_quant_params 即可,加载参数时,同样也只需要按顺序把 24 | # filter_quant_params 中的值输入。 25 | 26 | # 加载量化参数 27 | params = np.load(quant_param_path) 28 | input_params = params['input'] 29 | filter_params = params['filter'] 30 | for i in range(0, len(input_params), 2): 31 | self.input_quant_params.append(pycnml.QuantParam(int(input_params[i]), float(input_params[i + 1]))) 32 | for i in range(0, len(filter_params), 2): 33 | self.filter_quant_params.append(pycnml.QuantParam(int(filter_params[i]), float(filter_params[i + 1]))) 34 | 35 | # TODO:使用 pycnml 建立三层神经网络结构 36 | self.net.setInputShape(batch_size, input_size, 1, 1) 37 | # fc1 38 | self.net.createMlpLayer('fc1', hidden1, self.input_quant_params[0]) 39 | # relu1 40 | self.net.createReLuLayer('relu1') 41 | # fc2 42 | self.net.createMlpLayer('fc2', hidden2, self.input_quant_params[1]) 43 | # relu2 44 | self.net.createReLuLayer('relu3') 45 | # fc3 46 | self.net.createMlpLayer('fc3', out_classes, self.input_quant_params[2]) 47 | # softmax 48 | self.net.createSoftmaxLayer('softmax', 1) 49 | 50 | def load_mnist(self, file_dir, is_images='True'): 51 | # Read binary data 52 | bin_file = open(file_dir, 'rb') 53 | bin_data = bin_file.read() 54 | bin_file.close() 55 | # Analysis file header 56 | if is_images: 57 | # Read images 58 | fmt_header = '>iiii' 59 | magic, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, 0) 60 | else: 61 | # Read labels 62 | fmt_header = '>ii' 63 | magic, num_images = struct.unpack_from(fmt_header, bin_data, 0) 64 | num_rows, num_cols = 1, 1 65 | data_size = num_images * num_rows * num_cols 66 | mat_data = struct.unpack_from('>' + str(data_size) + 'B', bin_data, struct.calcsize(fmt_header)) 67 | mat_data = np.reshape(mat_data, [num_images, num_rows * num_cols]) 68 | print('Load images from %s, number: %d, data shape: %s' % (file_dir, num_images, str(mat_data.shape))) 69 | return mat_data 70 | 71 | def load_data(self, data_path, label_path): 72 | print('Loading MNIST data from files...') 73 | test_images = self.load_mnist(data_path, True) 74 | test_labels = self.load_mnist(label_path, False) 75 | self.test_data = np.append(test_images, test_labels, axis=1) 76 | 77 | def load_model(self, param_dir): # 加载参数 78 | # TODO:使用pycnml接口分别为三层全连接层加载参数 79 | print('Loading parameters from file ' + param_dir) 80 | params = np.load(param_dir).item() 81 | 82 | weigh1 = np.transpose(params['w1'], [1, 0]).flatten().astype(np.float64) 83 | bias1 = params['b1'].flatten().astype(np.float64) 84 | self.net.loadParams(0, weigh1, bias1, self.filter_quant_params[0]) 85 | 86 | weigh2 = np.transpose(params['w2'], [1, 0]).flatten().astype(np.float64) 87 | bias2 = params['b2'].flatten().astype(np.float64) 88 | self.net.loadParams(2, weigh2, bias2, self.filter_quant_params[1]) 89 | 90 | weigh3 = np.transpose(params['w3'], [1, 0]).flatten().astype(np.float64) 91 | bias3 = params['b3'].flatten().astype(np.float64) 92 | self.net.loadParams(4, weigh3, bias3, self.filter_quant_params[2]) 93 | 94 | def forward(self): 95 | return self.net.forward() 96 | 97 | def evaluate(self): 98 | pred_results = np.zeros([self.test_data.shape[0]]) 99 | for idx in range(self.test_data.shape[0] / self.batch_size): 100 | batch_images = self.test_data[idx * self.batch_size:(idx + 1) * self.batch_size, :-1] 101 | data = batch_images.flatten().tolist() 102 | self.net.setInputData(data) 103 | start = time.time() 104 | self.forward() 105 | end = time.time() 106 | print('inferencing time: %f' % (end - start)) 107 | prob = self.net.getOutputData() 108 | prob = np.array(prob).reshape((self.batch_size, self.out_classes)) 109 | pred_labels = np.argmax(prob, axis=1) 110 | pred_results[idx * self.batch_size:(idx + 1) * self.batch_size] = pred_labels 111 | if self.test_data.shape[0] % self.batch_size > 0: 112 | last_batch = self.test_data.shape[0] / self.batch_size * self.batch_size 113 | batch_images = self.test_data[-last_batch:, :-1] 114 | data = batch_images.flatten().tolist() 115 | self.net.setInputData(data) 116 | self.forward() 117 | prob = self.net.getOutputData() 118 | pred_labels = np.argmax(prob, axis=1) 119 | pred_results[-last_batch:] = pred_labels 120 | accuracy = np.mean(pred_results == self.test_data[:, -1]) 121 | print('Accuracy in test set: %f' % accuracy) 122 | 123 | 124 | HIDDEN1 = 32 125 | HIDDEN2 = 16 126 | OUT = 10 127 | 128 | 129 | def run_mnist(): 130 | batch_size = 10000 131 | h1, h2, c = HIDDEN1, HIDDEN2, OUT 132 | mlp = MNIST_MLP() 133 | mlp.build_model(batch_size=batch_size, hidden1=h1, hidden2=h2, out_classes=c) 134 | model_path = 'weight.npy' 135 | test_data = '../../mnist_data/t10k-images-idx3-ubyte' 136 | test_label = '../../mnist_data/t10k-labels-idx1-ubyte' 137 | mlp.load_data(test_data, test_label) 138 | mlp.load_model(model_path) 139 | 140 | for i in range(10): 141 | mlp.evaluate() 142 | 143 | 144 | if __name__ == '__main__': 145 | run_mnist() 146 | -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/stu_upload/weight.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_2_2_mnist_mlp_dlp/stu_upload/weight.npy -------------------------------------------------------------------------------- /exp_2_2_mnist_mlp_dlp/test_cpu.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from stu_upload.layers_1 import FullyConnectedLayer, ReLULayer, SoftmaxLossLayer 4 | from stu_upload.mnist_mlp_cpu import MNIST_MLP, build_mnist_mlp 5 | import numpy as np 6 | import struct 7 | import time 8 | 9 | 10 | def evaluate(mlp): 11 | pred_results = np.zeros([mlp.test_data.shape[0]]) 12 | for idx in range(mlp.test_data.shape[0] / mlp.batch_size): 13 | batch_images = mlp.test_data[idx * mlp.batch_size:(idx + 1) * mlp.batch_size, :-1] 14 | start = time.time() 15 | prob = mlp.forward(batch_images) 16 | end = time.time() 17 | print("inferencing time: %f" % (end - start)) 18 | pred_labels = np.argmax(prob, axis=1) 19 | pred_results[idx * mlp.batch_size:(idx + 1) * mlp.batch_size] = pred_labels 20 | if mlp.test_data.shape[0] % mlp.batch_size > 0: 21 | last_batch = mlp.test_data.shape[0] / mlp.batch_size * mlp.batch_size 22 | batch_images = mlp.test_data[-last_batch:, :-1] 23 | prob = mlp.forward(batch_images) 24 | pred_labels = np.argmax(prob, axis=1) 25 | pred_results[-last_batch:] = pred_labels 26 | accuracy = np.mean(pred_results == mlp.test_data[:, -1]) 27 | print('Accuracy in test set: %f' % accuracy) 28 | 29 | 30 | def run_test(): 31 | mlp = build_mnist_mlp('stu_upload/weight.npy') 32 | evaluate(mlp) 33 | 34 | 35 | if __name__ == '__main__': 36 | run_test() 37 | -------------------------------------------------------------------------------- /exp_3_1_vgg/.gitignore: -------------------------------------------------------------------------------- 1 | /.idea -------------------------------------------------------------------------------- /exp_3_1_vgg/main_exp_3_1.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | from stu_upload.vgg_cpu import VGG19 3 | import numpy as np 4 | import struct 5 | import os 6 | import scipy.io 7 | import time 8 | 9 | 10 | def computeMse(data1, data2): 11 | errors = [] 12 | for i in range(len(data1)): 13 | errors.append(data1[i] - data2[i]) 14 | 15 | squared_error = [] 16 | for val in errors: 17 | squared_error.append(pow(val, 2)) 18 | 19 | return sum(squared_error) / len(squared_error) 20 | 21 | 22 | def forward(vgg): 23 | print('Inferencing...') 24 | start_time = time.time() 25 | current = vgg.input_image 26 | pool5 = np.array([]) 27 | for idx in range(len(vgg.param_layer_name)): 28 | print('Inferencing layer: ' + vgg.param_layer_name[idx]) 29 | current = vgg.layers[vgg.param_layer_name[idx]].forward(current) 30 | if 'pool5' in vgg.param_layer_name[idx]: 31 | pool5 = current 32 | print('Inference time: %f' % (time.time() - start_time)) 33 | return current, pool5 34 | 35 | 36 | def check_pool5(stu_pool5): 37 | data = np.load('pool5_dump.npy') 38 | pool5_mse = computeMse(stu_pool5.flatten(), data.flatten()) 39 | print('test pool5 mse: %f' % pool5_mse) 40 | 41 | if pool5_mse < 0.003: 42 | print('CHECK POOL5 PASS.') 43 | else: 44 | print('CHECK POOL5 FAILED.') 45 | exit() 46 | 47 | 48 | def evaluate(vgg): 49 | prob, pool5 = forward(vgg) 50 | top1 = np.argmax(prob[0]) 51 | print('Classification result: id = %d, prob = %f' % (top1, prob[0, top1])) 52 | return pool5 53 | 54 | 55 | if __name__ == '__main__': 56 | vgg = VGG19(param_path='../imagenet-vgg-verydeep-19.mat') 57 | vgg.build_model() 58 | vgg.init_model() 59 | vgg.load_model() 60 | vgg.load_image('../cat1.jpg') 61 | pool5 = evaluate(vgg) 62 | print('-------------------------------') 63 | check_pool5(pool5) 64 | -------------------------------------------------------------------------------- /exp_3_1_vgg/pool5_dump.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_1_vgg/pool5_dump.npy -------------------------------------------------------------------------------- /exp_3_1_vgg/readme.txt: -------------------------------------------------------------------------------- 1 | 补全 stu_upload 中的 layer_1.py、layer_2.py、vgg_cpu.py 文件,执行 main_exp_3_1.py 运行实验 -------------------------------------------------------------------------------- /exp_3_1_vgg/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy~=1.19.4 2 | scipy~=1.5.4 -------------------------------------------------------------------------------- /exp_3_1_vgg/stu_upload/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_1_vgg/stu_upload/__init__.py -------------------------------------------------------------------------------- /exp_3_1_vgg/stu_upload/layers_1.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import time 6 | 7 | 8 | class FullyConnectedLayer(object): 9 | def __init__(self, num_input, num_output): # 全连接层初始化 10 | self.num_input = num_input 11 | self.num_output = num_output 12 | print('\tFully connected layer with input %d, output %d.' % (self.num_input, self.num_output)) 13 | 14 | def init_param(self, std=0.01): # 参数初始化 15 | self.weight = np.random.normal(loc=0.0, scale=std, size=(self.num_input, self.num_output)) 16 | self.bias = np.zeros([1, self.num_output]) 17 | 18 | def forward(self, input): # 前向传播计算 19 | start_time = time.time() 20 | self.input = input 21 | # TODO:全连接层的前向传播,计算输出结果 22 | self.output = ________________ 23 | return self.output 24 | 25 | def backward(self, top_diff): # 反向传播的计算 26 | # TODO:全连接层的反向传播,计算参数梯度和本层损失 27 | self.d_weight = ________________ 28 | self.d_bias = ________________ 29 | bottom_diff = ________________ 30 | return bottom_diff 31 | 32 | def update_param(self, lr): # 参数更新 33 | # TODO:对全连接层参数利用参数进行更新 34 | self.weight = ________________ 35 | self.bias = ________________ 36 | 37 | def load_param(self, weight, bias): # 参数加载 38 | assert self.weight.shape == weight.shape 39 | assert self.bias.shape == bias.shape 40 | self.weight = weight 41 | self.bias = bias 42 | 43 | def save_param(self): # 参数保存 44 | return self.weight, self.bias 45 | 46 | 47 | class ReLULayer(object): 48 | def __init__(self): 49 | print('\tReLU layer.') 50 | 51 | def forward(self, input): # 前向传播的计算 52 | start_time = time.time() 53 | self.input = input 54 | # TODO:ReLU层的前向传播,计算输出结果 55 | output = ________________ 56 | return output 57 | 58 | def backward(self, top_diff): # 反向传播的计算 59 | # TODO:ReLU层的反向传播,计算本层损失 60 | bottom_diff = ________________ 61 | return bottom_diff 62 | 63 | 64 | class SoftmaxLossLayer(object): 65 | def __init__(self): 66 | print('\tSoftmax loss layer.') 67 | 68 | def forward(self, input): # 前向传播的计算 69 | # TODO:softmax 损失层的前向传播,计算输出结果 70 | input_max = np.max(input, axis=1, keepdims=True) 71 | input_exp = np.exp(input - input_max) 72 | self.prob = ________________ 73 | return self.prob 74 | 75 | def get_loss(self, label): # 计算损失 76 | self.batch_size = self.prob.shape[0] 77 | self.label_onehot = np.zeros_like(self.prob) 78 | self.label_onehot[np.arange(self.batch_size), label] = 1.0 79 | loss = -np.sum(np.log(self.prob) * self.label_onehot) / self.batch_size 80 | return loss 81 | 82 | def backward(self): # 反向传播的计算 83 | # TODO:softmax 损失层的反向传播,计算本层损失 84 | bottom_diff = ________________ 85 | return bottom_diff 86 | -------------------------------------------------------------------------------- /exp_3_1_vgg/stu_upload/layers_2.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import time 6 | 7 | 8 | def show_matrix(mat, name): 9 | # print(name + str(mat.shape) + ' mean %f, std %f' % (mat.mean(), mat.std())) 10 | pass 11 | 12 | 13 | def show_time(time, name): 14 | # print(name + str(time)) 15 | pass 16 | 17 | 18 | class ConvolutionalLayer(object): 19 | def __init__(self, kernel_size, channel_in, channel_out, padding, stride): 20 | # 卷积层的初始化 21 | self.kernel_size = kernel_size 22 | self.channel_in = channel_in 23 | self.channel_out = channel_out 24 | self.padding = padding 25 | self.stride = stride 26 | print('\tConvolutional layer with kernel size %d, input channel %d, output channel %d.' % ( 27 | self.kernel_size, self.channel_in, self.channel_out)) 28 | 29 | def init_param(self, std=0.01): # 参数初始化 30 | self.weight = np.random.normal(loc=0.0, scale=std, 31 | size=(self.channel_in, self.kernel_size, self.kernel_size, self.channel_out)) 32 | self.bias = np.zeros([self.channel_out]) 33 | 34 | def forward(self, input): # 前向传播的计算 35 | start_time = time.time() 36 | self.input = input # [N, C, H, W] 37 | height = self.input.shape[2] + self.padding * 2 38 | width = self.input.shape[3] + self.padding * 2 39 | self.input_pad = np.zeros([self.input.shape[0], self.input.shape[1], height, width]) 40 | self.input_pad[:, :, self.padding:self.padding + self.input.shape[2], 41 | self.padding:self.padding + self.input.shape[3]] = self.input 42 | height_out = (height - self.kernel_size) / self.stride + 1 43 | width_out = (width - self.kernel_size) / self.stride + 1 44 | self.output = np.zeros([self.input.shape[0], self.channel_out, height_out, width_out]) 45 | for idxn in range(self.input.shape[0]): 46 | for idxc in range(self.channel_out): 47 | for idxh in range(height_out): 48 | for idxw in range(width_out): 49 | # TODO: 计算卷积层的前向传播,特征图与卷积核的内积再加偏置 50 | self.output[idxn, idxc, idxh, idxw] = _______________________ 51 | return self.output 52 | 53 | def load_param(self, weight, bias): # 参数加载 54 | assert self.weight.shape == weight.shape 55 | assert self.bias.shape == bias.shape 56 | self.weight = weight 57 | self.bias = bias 58 | 59 | 60 | class MaxPoolingLayer(object): 61 | def __init__(self, kernel_size, stride): # 最大池化层的初始化 62 | self.kernel_size = kernel_size 63 | self.stride = stride 64 | print('\tMax pooling layer with kernel size %d, stride %d.' % (self.kernel_size, self.stride)) 65 | 66 | def forward(self, input): # 前向传播的计算 67 | start_time = time.time() 68 | self.input = input # [N, C, H, W] 69 | self.max_index = np.zeros(self.input.shape) 70 | height_out = (self.input.shape[2] - self.kernel_size) / self.stride + 1 71 | width_out = (self.input.shape[3] - self.kernel_size) / self.stride + 1 72 | self.output = np.zeros([self.input.shape[0], self.input.shape[1], height_out, width_out]) 73 | for idxn in range(self.input.shape[0]): 74 | for idxc in range(self.input.shape[1]): 75 | for idxh in range(height_out): 76 | for idxw in range(width_out): 77 | # TODO: 计算最大池化层的前向传播, 取池化窗口内的最大值 78 | self.output[idxn, idxc, idxh, idxw] = _______________________ 79 | return self.output 80 | 81 | 82 | class FlattenLayer(object): 83 | def __init__(self, input_shape, output_shape): # 扁平化层的初始化 84 | self.input_shape = input_shape 85 | self.output_shape = output_shape 86 | assert np.prod(self.input_shape) == np.prod(self.output_shape) 87 | print('\tFlatten layer with input shape %s, output shape %s.' % (str(self.input_shape), str(self.output_shape))) 88 | 89 | def forward(self, input): # 前向传播的计算 90 | assert list(input.shape[1:]) == list(self.input_shape) 91 | # matconvnet feature map dim: [N, height, width, channel] 92 | # ours feature map dim: [N, channel, height, width] 93 | self.input = np.transpose(input, [0, 2, 3, 1]) 94 | self.output = self.input.reshape([self.input.shape[0]] + list(self.output_shape)) 95 | show_matrix(self.output, 'flatten out ') 96 | return self.output 97 | -------------------------------------------------------------------------------- /exp_3_1_vgg/stu_upload/vgg_cpu.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import scipy.io 6 | import time 7 | 8 | from layers_1 import FullyConnectedLayer, ReLULayer, SoftmaxLossLayer 9 | from layers_2 import ConvolutionalLayer, MaxPoolingLayer, FlattenLayer 10 | 11 | 12 | def show_matrix(mat, name): 13 | # print(name + str(mat.shape) + ' mean %f, std %f' % (mat.mean(), mat.std())) 14 | pass 15 | 16 | 17 | class VGG19(object): 18 | def __init__(self, param_path='../../imagenet-vgg-verydeep-19.mat'): 19 | self.param_path = param_path 20 | self.param_layer_name = ( 21 | 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 22 | 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 23 | 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 24 | 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 25 | 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5', 26 | 'flatten', 'fc6', 'relu6', 'fc7', 'relu7', 'fc8', 'softmax' 27 | ) 28 | 29 | def build_model(self): 30 | # TODO:定义VGG19 的网络结构 31 | print('Building vgg-19 model...') 32 | 33 | self.layers = {} 34 | self.layers['conv1_1'] = ConvolutionalLayer(3, 3, 64, 1, 1) 35 | self.layers['relu1_1'] = ReLULayer() 36 | self.layers['conv1_2'] = ConvolutionalLayer(3, 64, 64, 1, 1) 37 | self.layers['relu1_2'] = ReLULayer() 38 | self.layers['pool1'] = MaxPoolingLayer(2, 2) 39 | 40 | _______________________ 41 | 42 | self.layers['conv5_4'] = ConvolutionalLayer(3, 512, 512, 1, 1) 43 | self.layers['relu5_4'] = ReLULayer() 44 | self.layers['pool5'] = MaxPoolingLayer(2, 2) 45 | 46 | self.layers['flatten'] = FlattenLayer([512, 7, 7], [512 * 7 * 7]) 47 | self.layers['fc6'] = FullyConnectedLayer(512 * 7 * 7, 4096) 48 | self.layers['relu6'] = ReLULayer() 49 | 50 | _______________________ 51 | 52 | self.layers['fc8'] = FullyConnectedLayer(4096, 1000) 53 | 54 | self.layers['softmax'] = SoftmaxLossLayer() 55 | 56 | self.update_layer_list = [] 57 | for layer_name in self.layers.keys(): 58 | if 'conv' in layer_name or 'fc' in layer_name: 59 | self.update_layer_list.append(layer_name) 60 | 61 | def init_model(self): 62 | print('Initializing parameters of each layer in vgg-19...') 63 | for layer_name in self.update_layer_list: 64 | self.layers[layer_name].init_param() 65 | 66 | def load_model(self): 67 | print('Loading parameters from file ' + self.param_path) 68 | params = scipy.io.loadmat(self.param_path) 69 | self.image_mean = params['normalization'][0][0][0] 70 | self.image_mean = np.mean(self.image_mean, axis=(0, 1)) 71 | print('Get image mean: ' + str(self.image_mean)) 72 | 73 | for idx in range(43): 74 | if 'conv' in self.param_layer_name[idx]: 75 | weight, bias = params['layers'][0][idx][0][0][0][0] 76 | # matconvnet: weights dim [height, width, in_channel, out_channel] 77 | # ours: weights dim [in_channel, height, width, out_channel] 78 | weight = np.transpose(weight, [2, 0, 1, 3]) 79 | bias = bias.reshape(-1) 80 | self.layers[self.param_layer_name[idx]].load_param(weight, bias) 81 | if idx >= 37 and 'fc' in self.param_layer_name[idx]: 82 | weight, bias = params['layers'][0][idx - 1][0][0][0][0] 83 | weight = weight.reshape([weight.shape[0] * weight.shape[1] * weight.shape[2], weight.shape[3]]) 84 | self.layers[self.param_layer_name[idx]].load_param(weight, bias) 85 | 86 | def load_image(self, image_dir): 87 | print('Loading and preprocessing image from ' + image_dir) 88 | self.input_image = scipy.misc.imread(image_dir) 89 | self.input_image = scipy.misc.imresize(self.input_image, [224, 224, 3]) 90 | self.input_image = np.array(self.input_image).astype(np.float32) 91 | self.input_image -= self.image_mean 92 | self.input_image = np.reshape(self.input_image, [1] + list(self.input_image.shape)) 93 | # input dim [N, channel, height, width] 94 | self.input_image = np.transpose(self.input_image, [0, 3, 1, 2]) 95 | 96 | def forward(self): 97 | print('Inferencing...') 98 | start_time = time.time() 99 | current = self.input_image 100 | for idx in range(len(self.param_layer_name)): 101 | print('Inferencing layer: ' + self.param_layer_name[idx]) 102 | current = self.layers[self.param_layer_name[idx]].forward(current) 103 | print('Inference time: %f' % (time.time() - start_time)) 104 | return current 105 | 106 | def evaluate(self): 107 | prob = self.forward() 108 | top1 = np.argmax(prob[0]) 109 | print('Classification result: id = %d, prob = %f' % (top1, prob[0, top1])) 110 | 111 | 112 | if __name__ == '__main__': 113 | vgg = VGG19() 114 | vgg.build_model() 115 | vgg.init_model() 116 | vgg.load_model() 117 | vgg.load_image('../../cat1.jpg') 118 | prob = vgg.evaluate() 119 | -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/.gitignore: -------------------------------------------------------------------------------- 1 | /.idea -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/file_list: -------------------------------------------------------------------------------- 1 | ../../cat1.jpg 281 2 | ../../cat1.jpg 281 3 | ../../cat1.jpg 281 4 | ../../cat1.jpg 281 5 | ../../cat1.jpg 281 6 | -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/main_exp_3_2.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | from stu_upload.vgg19_demo import VGG19 3 | import time 4 | import numpy as np 5 | import os 6 | import scipy.io 7 | 8 | 9 | def evaluate(vgg): 10 | start = time.time() 11 | vgg.forward() 12 | end = time.time() 13 | print('inference time: %f' % (end - start)) 14 | result = vgg.net.getOutputData() 15 | prob = max(result) 16 | top1 = result.index(prob) 17 | print('Classification result: id = %d, prob = %f' % (top1, prob)) 18 | 19 | 20 | if __name__ == '__main__': 21 | vgg = VGG19() 22 | vgg.build_model(param_path='../imagenet-vgg-verydeep-19.mat', 23 | quant_param_path='../vgg19_quant_param_new.npz') 24 | vgg.load_model() 25 | vgg.load_image('../cat1.jpg') 26 | for i in range(10): 27 | evaluate(vgg) 28 | -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/readme.txt: -------------------------------------------------------------------------------- 1 | 补全stu_upload中的 vgg19_demo.py 文件,执行 main_exp_3_2.py 运行实验 -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy~=1.19.4 2 | scipy~=1.5.4 -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/stu_upload/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_2_vgg_dlp/stu_upload/__init__.py -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/stu_upload/vgg19_demo.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import pycnml 3 | import time 4 | import numpy as np 5 | import os 6 | import scipy.io 7 | 8 | 9 | class VGG19(object): 10 | def __init__(self): 11 | # set up net 12 | self.net = pycnml.CnmlNet(16) 13 | self.input_quant_params = [] 14 | self.filter_quant_params = [] 15 | 16 | def build_model(self, 17 | param_path='../../imagenet-vgg-verydeep-19.mat', 18 | quant_param_path='../../vgg19_quant_param_new.npz'): 19 | self.param_path = param_path 20 | 21 | # loading quant params 22 | # before creating layers, you should run through the net with cpu and get positions and scales used for quantizing input data. you can get quant params by using pycnml.QuantTool 23 | # only conv and mlp layer need to be quantized 24 | # in this experiment these quant params have already been created and saved into local files. 25 | params = np.load(quant_param_path) 26 | input_params = params['input'] 27 | filter_params = params['filter'] 28 | for i in range(0, len(input_params), 2): 29 | self.input_quant_params.append(pycnml.QuantParam(int(input_params[i]), float(input_params[i + 1]))) 30 | for i in range(0, len(filter_params), 2): 31 | self.filter_quant_params.append(pycnml.QuantParam(int(filter_params[i]), float(filter_params[i + 1]))) 32 | 33 | # TODO: 使用net的createXXXLayer接口搭建VGG19网络 34 | # creating layers 35 | self.net.setInputShape(1, 3, 224, 224) 36 | # conv1_1 37 | self.net.createConvLayer('conv1_1', 64, 3, 1, 1, 1, self.input_quant_params[0]) 38 | # relu1_1 39 | self.net.createReLuLayer('relu1_1') 40 | # conv1_2 41 | self.net.createConvLayer('conv1_2', 64, 3, 1, 1, 1, self.input_quant_params[1]) 42 | # relu1_2 43 | self.net.createReLuLayer('relu1_2') 44 | # pool1 45 | _______________________ 46 | # conv2_1 47 | _______________________ 48 | # relu2_1 49 | _______________________ 50 | # conv2_2 51 | _______________________ 52 | # relu2_2 53 | _______________________ 54 | # pool2 55 | _______________________ 56 | # conv3_1 57 | _______________________ 58 | # relu3_1 59 | _______________________ 60 | # conv3_2 61 | _______________________ 62 | # relu3_2 63 | _______________________ 64 | # conv3_3 65 | _______________________ 66 | # relu3_3 67 | _______________________ 68 | # conv3_4 69 | _______________________ 70 | # relu3_4 71 | _______________________ 72 | # pool3 73 | _______________________ 74 | # conv4_1 75 | _______________________ 76 | # relu4_1 77 | _______________________ 78 | # conv4_2 79 | _______________________ 80 | # relu4_2 81 | _______________________ 82 | # conv4_3 83 | _______________________ 84 | # relu4_3 85 | _______________________ 86 | # conv4_4 87 | _______________________ 88 | # relu4_4 89 | _______________________ 90 | # pool4 91 | _______________________ 92 | # conv5_1 93 | _______________________ 94 | # relu5_1 95 | _______________________ 96 | # conv5_2 97 | _______________________ 98 | # relu5_2 99 | _______________________ 100 | # conv5_3 101 | _______________________ 102 | # relu5_3 103 | _______________________ 104 | # conv5_4 105 | _______________________ 106 | # relu5_4 107 | _______________________ 108 | # pool5 109 | _______________________ 110 | 111 | # flatten 112 | self.net.createFlattenLayer('flatten', [1, 512 * 7 * 7, 1, 1]) 113 | # fc6 114 | _______________________ 115 | # relu6 116 | _______________________ 117 | # fc7 118 | _______________________ 119 | # relu7 120 | _______________________ 121 | # fc8 122 | self.net.createMlpLayer('fc8', 1000, self.input_quant_params[18]) 123 | # softmax 124 | self.net.createSoftmaxLayer('softmax', 1) 125 | 126 | def load_model(self): 127 | # loading params ... 128 | print('Loading parameters from file ' + self.param_path) 129 | params = scipy.io.loadmat(self.param_path) 130 | self.image_mean = params['normalization'][0][0][0] 131 | self.image_mean = np.mean(self.image_mean, axis=(0, 1)) 132 | 133 | count = 0 134 | for idx in range(self.net.size()): 135 | if 'conv' in self.net.getLayerName(idx): 136 | weight, bias = params['layers'][0][idx][0][0][0][0] 137 | # matconvnet: weights dim [height, width, in_channel, out_channel] 138 | # ours: weights dim [out_channel, in_channel, height, width] 139 | weight = np.transpose(weight, [3, 2, 0, 1]).flatten().astype(np.float) 140 | bias = bias.reshape(-1).astype(np.float) 141 | self.net.loadParams(idx, weight, bias, self.filter_quant_params[count]) 142 | count += 1 143 | if 'fc' in self.net.getLayerName(idx): 144 | # Loading params may take quite a while. Please be patient. 145 | weight, bias = params['layers'][0][idx - 1][0][0][0][0] 146 | weight = weight.reshape([weight.shape[0] * weight.shape[1] * weight.shape[2], weight.shape[3]]) 147 | weight = np.transpose(weight, [1, 0]).flatten().astype(np.float) 148 | bias = bias.reshape(-1).astype(np.float) 149 | self.net.loadParams(idx, weight, bias, self.filter_quant_params[count]) 150 | count += 1 151 | 152 | def load_image(self, image_dir): 153 | # loading image 154 | self.image = image_dir 155 | image_mean = np.array([123.68, 116.779, 103.939]) 156 | print('Loading and preprocessing image from ' + image_dir) 157 | input_image = scipy.misc.imread(image_dir) 158 | input_image = scipy.misc.imresize(input_image, [224, 224, 3]) 159 | input_image = np.array(input_image).astype(np.float32) 160 | input_image -= image_mean 161 | input_image = np.reshape(input_image, [1] + list(input_image.shape)) 162 | # input dim [N, channel, height, width] 163 | input_image = np.transpose(input_image, [0, 3, 1, 2]) 164 | input_data = input_image.flatten().astype(np.float) 165 | self.net.setInputData(input_data) 166 | 167 | def forward(self): 168 | return self.net.forward() 169 | 170 | def get_top5(self, label): 171 | start = time.time() 172 | self.forward() 173 | end = time.time() 174 | 175 | result = self.net.getOutputData() 176 | 177 | # loading labels 178 | labels = [] 179 | with open('../synset_words.txt', 'r') as f: 180 | labels = f.readlines() 181 | 182 | # print results 183 | top1 = False 184 | top5 = False 185 | print('------ Top 5 of ' + self.image + ' ------') 186 | prob = sorted(list(result), reverse=True)[:6] 187 | if result.index(prob[0]) == label: 188 | top1 = True 189 | for i in range(5): 190 | top = prob[i] 191 | idx = result.index(top) 192 | if idx == label: 193 | top5 = True 194 | print('%f - ' % top + labels[idx].strip()) 195 | 196 | print('inference time: %f' % (end - start)) 197 | return top1, top5 198 | 199 | def evaluate(self, file_list): 200 | top1_num = 0 201 | top5_num = 0 202 | total_num = 0 203 | 204 | start = time.time() 205 | with open(file_list, 'r') as f: 206 | file_list = f.readlines() 207 | total_num = len(file_list) 208 | for line in file_list: 209 | image = line.split()[0].strip() 210 | label = int(line.split()[1].strip()) 211 | vgg.load_image(image) 212 | top1, top5 = vgg.get_top5(label) 213 | if top1: 214 | top1_num += 1 215 | if top5: 216 | top5_num += 1 217 | end = time.time() 218 | 219 | print('Global accuracy : ') 220 | print('accuracy1: %f (%d/%d) ' % (float(top1_num) / float(total_num), top1_num, total_num)) 221 | print('accuracy5: %f (%d/%d) ' % (float(top5_num) / float(total_num), top5_num, total_num)) 222 | print('Total execution time: %f' % (end - start)) 223 | 224 | 225 | if __name__ == '__main__': 226 | vgg = VGG19() 227 | vgg.build_model() 228 | vgg.load_model() 229 | vgg.evaluate('../file_list') 230 | -------------------------------------------------------------------------------- /exp_3_2_vgg_dlp/synset_words.txt: -------------------------------------------------------------------------------- 1 | n01440764 tench, Tinca tinca 2 | n01443537 goldfish, Carassius auratus 3 | n01484850 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias 4 | n01491361 tiger shark, Galeocerdo cuvieri 5 | n01494475 hammerhead, hammerhead shark 6 | n01496331 electric ray, crampfish, numbfish, torpedo 7 | n01498041 stingray 8 | n01514668 cock 9 | n01514859 hen 10 | n01518878 ostrich, Struthio camelus 11 | n01530575 brambling, Fringilla montifringilla 12 | n01531178 goldfinch, Carduelis carduelis 13 | n01532829 house finch, linnet, Carpodacus mexicanus 14 | n01534433 junco, snowbird 15 | n01537544 indigo bunting, indigo finch, indigo bird, Passerina cyanea 16 | n01558993 robin, American robin, Turdus migratorius 17 | n01560419 bulbul 18 | n01580077 jay 19 | n01582220 magpie 20 | n01592084 chickadee 21 | n01601694 water ouzel, dipper 22 | n01608432 kite 23 | n01614925 bald eagle, American eagle, Haliaeetus leucocephalus 24 | n01616318 vulture 25 | n01622779 great grey owl, great gray owl, Strix nebulosa 26 | n01629819 European fire salamander, Salamandra salamandra 27 | n01630670 common newt, Triturus vulgaris 28 | n01631663 eft 29 | n01632458 spotted salamander, Ambystoma maculatum 30 | n01632777 axolotl, mud puppy, Ambystoma mexicanum 31 | n01641577 bullfrog, Rana catesbeiana 32 | n01644373 tree frog, tree-frog 33 | n01644900 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui 34 | n01664065 loggerhead, loggerhead turtle, Caretta caretta 35 | n01665541 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea 36 | n01667114 mud turtle 37 | n01667778 terrapin 38 | n01669191 box turtle, box tortoise 39 | n01675722 banded gecko 40 | n01677366 common iguana, iguana, Iguana iguana 41 | n01682714 American chameleon, anole, Anolis carolinensis 42 | n01685808 whiptail, whiptail lizard 43 | n01687978 agama 44 | n01688243 frilled lizard, Chlamydosaurus kingi 45 | n01689811 alligator lizard 46 | n01692333 Gila monster, Heloderma suspectum 47 | n01693334 green lizard, Lacerta viridis 48 | n01694178 African chameleon, Chamaeleo chamaeleon 49 | n01695060 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis 50 | n01697457 African crocodile, Nile crocodile, Crocodylus niloticus 51 | n01698640 American alligator, Alligator mississipiensis 52 | n01704323 triceratops 53 | n01728572 thunder snake, worm snake, Carphophis amoenus 54 | n01728920 ringneck snake, ring-necked snake, ring snake 55 | n01729322 hognose snake, puff adder, sand viper 56 | n01729977 green snake, grass snake 57 | n01734418 king snake, kingsnake 58 | n01735189 garter snake, grass snake 59 | n01737021 water snake 60 | n01739381 vine snake 61 | n01740131 night snake, Hypsiglena torquata 62 | n01742172 boa constrictor, Constrictor constrictor 63 | n01744401 rock python, rock snake, Python sebae 64 | n01748264 Indian cobra, Naja naja 65 | n01749939 green mamba 66 | n01751748 sea snake 67 | n01753488 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus 68 | n01755581 diamondback, diamondback rattlesnake, Crotalus adamanteus 69 | n01756291 sidewinder, horned rattlesnake, Crotalus cerastes 70 | n01768244 trilobite 71 | n01770081 harvestman, daddy longlegs, Phalangium opilio 72 | n01770393 scorpion 73 | n01773157 black and gold garden spider, Argiope aurantia 74 | n01773549 barn spider, Araneus cavaticus 75 | n01773797 garden spider, Aranea diademata 76 | n01774384 black widow, Latrodectus mactans 77 | n01774750 tarantula 78 | n01775062 wolf spider, hunting spider 79 | n01776313 tick 80 | n01784675 centipede 81 | n01795545 black grouse 82 | n01796340 ptarmigan 83 | n01797886 ruffed grouse, partridge, Bonasa umbellus 84 | n01798484 prairie chicken, prairie grouse, prairie fowl 85 | n01806143 peacock 86 | n01806567 quail 87 | n01807496 partridge 88 | n01817953 African grey, African gray, Psittacus erithacus 89 | n01818515 macaw 90 | n01819313 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita 91 | n01820546 lorikeet 92 | n01824575 coucal 93 | n01828970 bee eater 94 | n01829413 hornbill 95 | n01833805 hummingbird 96 | n01843065 jacamar 97 | n01843383 toucan 98 | n01847000 drake 99 | n01855032 red-breasted merganser, Mergus serrator 100 | n01855672 goose 101 | n01860187 black swan, Cygnus atratus 102 | n01871265 tusker 103 | n01872401 echidna, spiny anteater, anteater 104 | n01873310 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus 105 | n01877812 wallaby, brush kangaroo 106 | n01882714 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus 107 | n01883070 wombat 108 | n01910747 jellyfish 109 | n01914609 sea anemone, anemone 110 | n01917289 brain coral 111 | n01924916 flatworm, platyhelminth 112 | n01930112 nematode, nematode worm, roundworm 113 | n01943899 conch 114 | n01944390 snail 115 | n01945685 slug 116 | n01950731 sea slug, nudibranch 117 | n01955084 chiton, coat-of-mail shell, sea cradle, polyplacophore 118 | n01968897 chambered nautilus, pearly nautilus, nautilus 119 | n01978287 Dungeness crab, Cancer magister 120 | n01978455 rock crab, Cancer irroratus 121 | n01980166 fiddler crab 122 | n01981276 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica 123 | n01983481 American lobster, Northern lobster, Maine lobster, Homarus americanus 124 | n01984695 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish 125 | n01985128 crayfish, crawfish, crawdad, crawdaddy 126 | n01986214 hermit crab 127 | n01990800 isopod 128 | n02002556 white stork, Ciconia ciconia 129 | n02002724 black stork, Ciconia nigra 130 | n02006656 spoonbill 131 | n02007558 flamingo 132 | n02009229 little blue heron, Egretta caerulea 133 | n02009912 American egret, great white heron, Egretta albus 134 | n02011460 bittern 135 | n02012849 crane 136 | n02013706 limpkin, Aramus pictus 137 | n02017213 European gallinule, Porphyrio porphyrio 138 | n02018207 American coot, marsh hen, mud hen, water hen, Fulica americana 139 | n02018795 bustard 140 | n02025239 ruddy turnstone, Arenaria interpres 141 | n02027492 red-backed sandpiper, dunlin, Erolia alpina 142 | n02028035 redshank, Tringa totanus 143 | n02033041 dowitcher 144 | n02037110 oystercatcher, oyster catcher 145 | n02051845 pelican 146 | n02056570 king penguin, Aptenodytes patagonica 147 | n02058221 albatross, mollymawk 148 | n02066245 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus 149 | n02071294 killer whale, killer, orca, grampus, sea wolf, Orcinus orca 150 | n02074367 dugong, Dugong dugon 151 | n02077923 sea lion 152 | n02085620 Chihuahua 153 | n02085782 Japanese spaniel 154 | n02085936 Maltese dog, Maltese terrier, Maltese 155 | n02086079 Pekinese, Pekingese, Peke 156 | n02086240 Shih-Tzu 157 | n02086646 Blenheim spaniel 158 | n02086910 papillon 159 | n02087046 toy terrier 160 | n02087394 Rhodesian ridgeback 161 | n02088094 Afghan hound, Afghan 162 | n02088238 basset, basset hound 163 | n02088364 beagle 164 | n02088466 bloodhound, sleuthhound 165 | n02088632 bluetick 166 | n02089078 black-and-tan coonhound 167 | n02089867 Walker hound, Walker foxhound 168 | n02089973 English foxhound 169 | n02090379 redbone 170 | n02090622 borzoi, Russian wolfhound 171 | n02090721 Irish wolfhound 172 | n02091032 Italian greyhound 173 | n02091134 whippet 174 | n02091244 Ibizan hound, Ibizan Podenco 175 | n02091467 Norwegian elkhound, elkhound 176 | n02091635 otterhound, otter hound 177 | n02091831 Saluki, gazelle hound 178 | n02092002 Scottish deerhound, deerhound 179 | n02092339 Weimaraner 180 | n02093256 Staffordshire bullterrier, Staffordshire bull terrier 181 | n02093428 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier 182 | n02093647 Bedlington terrier 183 | n02093754 Border terrier 184 | n02093859 Kerry blue terrier 185 | n02093991 Irish terrier 186 | n02094114 Norfolk terrier 187 | n02094258 Norwich terrier 188 | n02094433 Yorkshire terrier 189 | n02095314 wire-haired fox terrier 190 | n02095570 Lakeland terrier 191 | n02095889 Sealyham terrier, Sealyham 192 | n02096051 Airedale, Airedale terrier 193 | n02096177 cairn, cairn terrier 194 | n02096294 Australian terrier 195 | n02096437 Dandie Dinmont, Dandie Dinmont terrier 196 | n02096585 Boston bull, Boston terrier 197 | n02097047 miniature schnauzer 198 | n02097130 giant schnauzer 199 | n02097209 standard schnauzer 200 | n02097298 Scotch terrier, Scottish terrier, Scottie 201 | n02097474 Tibetan terrier, chrysanthemum dog 202 | n02097658 silky terrier, Sydney silky 203 | n02098105 soft-coated wheaten terrier 204 | n02098286 West Highland white terrier 205 | n02098413 Lhasa, Lhasa apso 206 | n02099267 flat-coated retriever 207 | n02099429 curly-coated retriever 208 | n02099601 golden retriever 209 | n02099712 Labrador retriever 210 | n02099849 Chesapeake Bay retriever 211 | n02100236 German short-haired pointer 212 | n02100583 vizsla, Hungarian pointer 213 | n02100735 English setter 214 | n02100877 Irish setter, red setter 215 | n02101006 Gordon setter 216 | n02101388 Brittany spaniel 217 | n02101556 clumber, clumber spaniel 218 | n02102040 English springer, English springer spaniel 219 | n02102177 Welsh springer spaniel 220 | n02102318 cocker spaniel, English cocker spaniel, cocker 221 | n02102480 Sussex spaniel 222 | n02102973 Irish water spaniel 223 | n02104029 kuvasz 224 | n02104365 schipperke 225 | n02105056 groenendael 226 | n02105162 malinois 227 | n02105251 briard 228 | n02105412 kelpie 229 | n02105505 komondor 230 | n02105641 Old English sheepdog, bobtail 231 | n02105855 Shetland sheepdog, Shetland sheep dog, Shetland 232 | n02106030 collie 233 | n02106166 Border collie 234 | n02106382 Bouvier des Flandres, Bouviers des Flandres 235 | n02106550 Rottweiler 236 | n02106662 German shepherd, German shepherd dog, German police dog, alsatian 237 | n02107142 Doberman, Doberman pinscher 238 | n02107312 miniature pinscher 239 | n02107574 Greater Swiss Mountain dog 240 | n02107683 Bernese mountain dog 241 | n02107908 Appenzeller 242 | n02108000 EntleBucher 243 | n02108089 boxer 244 | n02108422 bull mastiff 245 | n02108551 Tibetan mastiff 246 | n02108915 French bulldog 247 | n02109047 Great Dane 248 | n02109525 Saint Bernard, St Bernard 249 | n02109961 Eskimo dog, husky 250 | n02110063 malamute, malemute, Alaskan malamute 251 | n02110185 Siberian husky 252 | n02110341 dalmatian, coach dog, carriage dog 253 | n02110627 affenpinscher, monkey pinscher, monkey dog 254 | n02110806 basenji 255 | n02110958 pug, pug-dog 256 | n02111129 Leonberg 257 | n02111277 Newfoundland, Newfoundland dog 258 | n02111500 Great Pyrenees 259 | n02111889 Samoyed, Samoyede 260 | n02112018 Pomeranian 261 | n02112137 chow, chow chow 262 | n02112350 keeshond 263 | n02112706 Brabancon griffon 264 | n02113023 Pembroke, Pembroke Welsh corgi 265 | n02113186 Cardigan, Cardigan Welsh corgi 266 | n02113624 toy poodle 267 | n02113712 miniature poodle 268 | n02113799 standard poodle 269 | n02113978 Mexican hairless 270 | n02114367 timber wolf, grey wolf, gray wolf, Canis lupus 271 | n02114548 white wolf, Arctic wolf, Canis lupus tundrarum 272 | n02114712 red wolf, maned wolf, Canis rufus, Canis niger 273 | n02114855 coyote, prairie wolf, brush wolf, Canis latrans 274 | n02115641 dingo, warrigal, warragal, Canis dingo 275 | n02115913 dhole, Cuon alpinus 276 | n02116738 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus 277 | n02117135 hyena, hyaena 278 | n02119022 red fox, Vulpes vulpes 279 | n02119789 kit fox, Vulpes macrotis 280 | n02120079 Arctic fox, white fox, Alopex lagopus 281 | n02120505 grey fox, gray fox, Urocyon cinereoargenteus 282 | n02123045 tabby, tabby cat 283 | n02123159 tiger cat 284 | n02123394 Persian cat 285 | n02123597 Siamese cat, Siamese 286 | n02124075 Egyptian cat 287 | n02125311 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor 288 | n02127052 lynx, catamount 289 | n02128385 leopard, Panthera pardus 290 | n02128757 snow leopard, ounce, Panthera uncia 291 | n02128925 jaguar, panther, Panthera onca, Felis onca 292 | n02129165 lion, king of beasts, Panthera leo 293 | n02129604 tiger, Panthera tigris 294 | n02130308 cheetah, chetah, Acinonyx jubatus 295 | n02132136 brown bear, bruin, Ursus arctos 296 | n02133161 American black bear, black bear, Ursus americanus, Euarctos americanus 297 | n02134084 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus 298 | n02134418 sloth bear, Melursus ursinus, Ursus ursinus 299 | n02137549 mongoose 300 | n02138441 meerkat, mierkat 301 | n02165105 tiger beetle 302 | n02165456 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle 303 | n02167151 ground beetle, carabid beetle 304 | n02168699 long-horned beetle, longicorn, longicorn beetle 305 | n02169497 leaf beetle, chrysomelid 306 | n02172182 dung beetle 307 | n02174001 rhinoceros beetle 308 | n02177972 weevil 309 | n02190166 fly 310 | n02206856 bee 311 | n02219486 ant, emmet, pismire 312 | n02226429 grasshopper, hopper 313 | n02229544 cricket 314 | n02231487 walking stick, walkingstick, stick insect 315 | n02233338 cockroach, roach 316 | n02236044 mantis, mantid 317 | n02256656 cicada, cicala 318 | n02259212 leafhopper 319 | n02264363 lacewing, lacewing fly 320 | n02268443 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk 321 | n02268853 damselfly 322 | n02276258 admiral 323 | n02277742 ringlet, ringlet butterfly 324 | n02279972 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus 325 | n02280649 cabbage butterfly 326 | n02281406 sulphur butterfly, sulfur butterfly 327 | n02281787 lycaenid, lycaenid butterfly 328 | n02317335 starfish, sea star 329 | n02319095 sea urchin 330 | n02321529 sea cucumber, holothurian 331 | n02325366 wood rabbit, cottontail, cottontail rabbit 332 | n02326432 hare 333 | n02328150 Angora, Angora rabbit 334 | n02342885 hamster 335 | n02346627 porcupine, hedgehog 336 | n02356798 fox squirrel, eastern fox squirrel, Sciurus niger 337 | n02361337 marmot 338 | n02363005 beaver 339 | n02364673 guinea pig, Cavia cobaya 340 | n02389026 sorrel 341 | n02391049 zebra 342 | n02395406 hog, pig, grunter, squealer, Sus scrofa 343 | n02396427 wild boar, boar, Sus scrofa 344 | n02397096 warthog 345 | n02398521 hippopotamus, hippo, river horse, Hippopotamus amphibius 346 | n02403003 ox 347 | n02408429 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis 348 | n02410509 bison 349 | n02412080 ram, tup 350 | n02415577 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis 351 | n02417914 ibex, Capra ibex 352 | n02422106 hartebeest 353 | n02422699 impala, Aepyceros melampus 354 | n02423022 gazelle 355 | n02437312 Arabian camel, dromedary, Camelus dromedarius 356 | n02437616 llama 357 | n02441942 weasel 358 | n02442845 mink 359 | n02443114 polecat, fitch, foulmart, foumart, Mustela putorius 360 | n02443484 black-footed ferret, ferret, Mustela nigripes 361 | n02444819 otter 362 | n02445715 skunk, polecat, wood pussy 363 | n02447366 badger 364 | n02454379 armadillo 365 | n02457408 three-toed sloth, ai, Bradypus tridactylus 366 | n02480495 orangutan, orang, orangutang, Pongo pygmaeus 367 | n02480855 gorilla, Gorilla gorilla 368 | n02481823 chimpanzee, chimp, Pan troglodytes 369 | n02483362 gibbon, Hylobates lar 370 | n02483708 siamang, Hylobates syndactylus, Symphalangus syndactylus 371 | n02484975 guenon, guenon monkey 372 | n02486261 patas, hussar monkey, Erythrocebus patas 373 | n02486410 baboon 374 | n02487347 macaque 375 | n02488291 langur 376 | n02488702 colobus, colobus monkey 377 | n02489166 proboscis monkey, Nasalis larvatus 378 | n02490219 marmoset 379 | n02492035 capuchin, ringtail, Cebus capucinus 380 | n02492660 howler monkey, howler 381 | n02493509 titi, titi monkey 382 | n02493793 spider monkey, Ateles geoffroyi 383 | n02494079 squirrel monkey, Saimiri sciureus 384 | n02497673 Madagascar cat, ring-tailed lemur, Lemur catta 385 | n02500267 indri, indris, Indri indri, Indri brevicaudatus 386 | n02504013 Indian elephant, Elephas maximus 387 | n02504458 African elephant, Loxodonta africana 388 | n02509815 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens 389 | n02510455 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca 390 | n02514041 barracouta, snoek 391 | n02526121 eel 392 | n02536864 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch 393 | n02606052 rock beauty, Holocanthus tricolor 394 | n02607072 anemone fish 395 | n02640242 sturgeon 396 | n02641379 gar, garfish, garpike, billfish, Lepisosteus osseus 397 | n02643566 lionfish 398 | n02655020 puffer, pufferfish, blowfish, globefish 399 | n02666196 abacus 400 | n02667093 abaya 401 | n02669723 academic gown, academic robe, judge's robe 402 | n02672831 accordion, piano accordion, squeeze box 403 | n02676566 acoustic guitar 404 | n02687172 aircraft carrier, carrier, flattop, attack aircraft carrier 405 | n02690373 airliner 406 | n02692877 airship, dirigible 407 | n02699494 altar 408 | n02701002 ambulance 409 | n02704792 amphibian, amphibious vehicle 410 | n02708093 analog clock 411 | n02727426 apiary, bee house 412 | n02730930 apron 413 | n02747177 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin 414 | n02749479 assault rifle, assault gun 415 | n02769748 backpack, back pack, knapsack, packsack, rucksack, haversack 416 | n02776631 bakery, bakeshop, bakehouse 417 | n02777292 balance beam, beam 418 | n02782093 balloon 419 | n02783161 ballpoint, ballpoint pen, ballpen, Biro 420 | n02786058 Band Aid 421 | n02787622 banjo 422 | n02788148 bannister, banister, balustrade, balusters, handrail 423 | n02790996 barbell 424 | n02791124 barber chair 425 | n02791270 barbershop 426 | n02793495 barn 427 | n02794156 barometer 428 | n02795169 barrel, cask 429 | n02797295 barrow, garden cart, lawn cart, wheelbarrow 430 | n02799071 baseball 431 | n02802426 basketball 432 | n02804414 bassinet 433 | n02804610 bassoon 434 | n02807133 bathing cap, swimming cap 435 | n02808304 bath towel 436 | n02808440 bathtub, bathing tub, bath, tub 437 | n02814533 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon 438 | n02814860 beacon, lighthouse, beacon light, pharos 439 | n02815834 beaker 440 | n02817516 bearskin, busby, shako 441 | n02823428 beer bottle 442 | n02823750 beer glass 443 | n02825657 bell cote, bell cot 444 | n02834397 bib 445 | n02835271 bicycle-built-for-two, tandem bicycle, tandem 446 | n02837789 bikini, two-piece 447 | n02840245 binder, ring-binder 448 | n02841315 binoculars, field glasses, opera glasses 449 | n02843684 birdhouse 450 | n02859443 boathouse 451 | n02860847 bobsled, bobsleigh, bob 452 | n02865351 bolo tie, bolo, bola tie, bola 453 | n02869837 bonnet, poke bonnet 454 | n02870880 bookcase 455 | n02871525 bookshop, bookstore, bookstall 456 | n02877765 bottlecap 457 | n02879718 bow 458 | n02883205 bow tie, bow-tie, bowtie 459 | n02892201 brass, memorial tablet, plaque 460 | n02892767 brassiere, bra, bandeau 461 | n02894605 breakwater, groin, groyne, mole, bulwark, seawall, jetty 462 | n02895154 breastplate, aegis, egis 463 | n02906734 broom 464 | n02909870 bucket, pail 465 | n02910353 buckle 466 | n02916936 bulletproof vest 467 | n02917067 bullet train, bullet 468 | n02927161 butcher shop, meat market 469 | n02930766 cab, hack, taxi, taxicab 470 | n02939185 caldron, cauldron 471 | n02948072 candle, taper, wax light 472 | n02950826 cannon 473 | n02951358 canoe 474 | n02951585 can opener, tin opener 475 | n02963159 cardigan 476 | n02965783 car mirror 477 | n02966193 carousel, carrousel, merry-go-round, roundabout, whirligig 478 | n02966687 carpenter's kit, tool kit 479 | n02971356 carton 480 | n02974003 car wheel 481 | n02977058 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM 482 | n02978881 cassette 483 | n02979186 cassette player 484 | n02980441 castle 485 | n02981792 catamaran 486 | n02988304 CD player 487 | n02992211 cello, violoncello 488 | n02992529 cellular telephone, cellular phone, cellphone, cell, mobile phone 489 | n02999410 chain 490 | n03000134 chainlink fence 491 | n03000247 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour 492 | n03000684 chain saw, chainsaw 493 | n03014705 chest 494 | n03016953 chiffonier, commode 495 | n03017168 chime, bell, gong 496 | n03018349 china cabinet, china closet 497 | n03026506 Christmas stocking 498 | n03028079 church, church building 499 | n03032252 cinema, movie theater, movie theatre, movie house, picture palace 500 | n03041632 cleaver, meat cleaver, chopper 501 | n03042490 cliff dwelling 502 | n03045698 cloak 503 | n03047690 clog, geta, patten, sabot 504 | n03062245 cocktail shaker 505 | n03063599 coffee mug 506 | n03063689 coffeepot 507 | n03065424 coil, spiral, volute, whorl, helix 508 | n03075370 combination lock 509 | n03085013 computer keyboard, keypad 510 | n03089624 confectionery, confectionary, candy store 511 | n03095699 container ship, containership, container vessel 512 | n03100240 convertible 513 | n03109150 corkscrew, bottle screw 514 | n03110669 cornet, horn, trumpet, trump 515 | n03124043 cowboy boot 516 | n03124170 cowboy hat, ten-gallon hat 517 | n03125729 cradle 518 | n03126707 crane 519 | n03127747 crash helmet 520 | n03127925 crate 521 | n03131574 crib, cot 522 | n03133878 Crock Pot 523 | n03134739 croquet ball 524 | n03141823 crutch 525 | n03146219 cuirass 526 | n03160309 dam, dike, dyke 527 | n03179701 desk 528 | n03180011 desktop computer 529 | n03187595 dial telephone, dial phone 530 | n03188531 diaper, nappy, napkin 531 | n03196217 digital clock 532 | n03197337 digital watch 533 | n03201208 dining table, board 534 | n03207743 dishrag, dishcloth 535 | n03207941 dishwasher, dish washer, dishwashing machine 536 | n03208938 disk brake, disc brake 537 | n03216828 dock, dockage, docking facility 538 | n03218198 dogsled, dog sled, dog sleigh 539 | n03220513 dome 540 | n03223299 doormat, welcome mat 541 | n03240683 drilling platform, offshore rig 542 | n03249569 drum, membranophone, tympan 543 | n03250847 drumstick 544 | n03255030 dumbbell 545 | n03259280 Dutch oven 546 | n03271574 electric fan, blower 547 | n03272010 electric guitar 548 | n03272562 electric locomotive 549 | n03290653 entertainment center 550 | n03291819 envelope 551 | n03297495 espresso maker 552 | n03314780 face powder 553 | n03325584 feather boa, boa 554 | n03337140 file, file cabinet, filing cabinet 555 | n03344393 fireboat 556 | n03345487 fire engine, fire truck 557 | n03347037 fire screen, fireguard 558 | n03355925 flagpole, flagstaff 559 | n03372029 flute, transverse flute 560 | n03376595 folding chair 561 | n03379051 football helmet 562 | n03384352 forklift 563 | n03388043 fountain 564 | n03388183 fountain pen 565 | n03388549 four-poster 566 | n03393912 freight car 567 | n03394916 French horn, horn 568 | n03400231 frying pan, frypan, skillet 569 | n03404251 fur coat 570 | n03417042 garbage truck, dustcart 571 | n03424325 gasmask, respirator, gas helmet 572 | n03425413 gas pump, gasoline pump, petrol pump, island dispenser 573 | n03443371 goblet 574 | n03444034 go-kart 575 | n03445777 golf ball 576 | n03445924 golfcart, golf cart 577 | n03447447 gondola 578 | n03447721 gong, tam-tam 579 | n03450230 gown 580 | n03452741 grand piano, grand 581 | n03457902 greenhouse, nursery, glasshouse 582 | n03459775 grille, radiator grille 583 | n03461385 grocery store, grocery, food market, market 584 | n03467068 guillotine 585 | n03476684 hair slide 586 | n03476991 hair spray 587 | n03478589 half track 588 | n03481172 hammer 589 | n03482405 hamper 590 | n03483316 hand blower, blow dryer, blow drier, hair dryer, hair drier 591 | n03485407 hand-held computer, hand-held microcomputer 592 | n03485794 handkerchief, hankie, hanky, hankey 593 | n03492542 hard disc, hard disk, fixed disk 594 | n03494278 harmonica, mouth organ, harp, mouth harp 595 | n03495258 harp 596 | n03496892 harvester, reaper 597 | n03498962 hatchet 598 | n03527444 holster 599 | n03529860 home theater, home theatre 600 | n03530642 honeycomb 601 | n03532672 hook, claw 602 | n03534580 hoopskirt, crinoline 603 | n03535780 horizontal bar, high bar 604 | n03538406 horse cart, horse-cart 605 | n03544143 hourglass 606 | n03584254 iPod 607 | n03584829 iron, smoothing iron 608 | n03590841 jack-o'-lantern 609 | n03594734 jean, blue jean, denim 610 | n03594945 jeep, landrover 611 | n03595614 jersey, T-shirt, tee shirt 612 | n03598930 jigsaw puzzle 613 | n03599486 jinrikisha, ricksha, rickshaw 614 | n03602883 joystick 615 | n03617480 kimono 616 | n03623198 knee pad 617 | n03627232 knot 618 | n03630383 lab coat, laboratory coat 619 | n03633091 ladle 620 | n03637318 lampshade, lamp shade 621 | n03642806 laptop, laptop computer 622 | n03649909 lawn mower, mower 623 | n03657121 lens cap, lens cover 624 | n03658185 letter opener, paper knife, paperknife 625 | n03661043 library 626 | n03662601 lifeboat 627 | n03666591 lighter, light, igniter, ignitor 628 | n03670208 limousine, limo 629 | n03673027 liner, ocean liner 630 | n03676483 lipstick, lip rouge 631 | n03680355 Loafer 632 | n03690938 lotion 633 | n03691459 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system 634 | n03692522 loupe, jeweler's loupe 635 | n03697007 lumbermill, sawmill 636 | n03706229 magnetic compass 637 | n03709823 mailbag, postbag 638 | n03710193 mailbox, letter box 639 | n03710637 maillot 640 | n03710721 maillot, tank suit 641 | n03717622 manhole cover 642 | n03720891 maraca 643 | n03721384 marimba, xylophone 644 | n03724870 mask 645 | n03729826 matchstick 646 | n03733131 maypole 647 | n03733281 maze, labyrinth 648 | n03733805 measuring cup 649 | n03742115 medicine chest, medicine cabinet 650 | n03743016 megalith, megalithic structure 651 | n03759954 microphone, mike 652 | n03761084 microwave, microwave oven 653 | n03763968 military uniform 654 | n03764736 milk can 655 | n03769881 minibus 656 | n03770439 miniskirt, mini 657 | n03770679 minivan 658 | n03773504 missile 659 | n03775071 mitten 660 | n03775546 mixing bowl 661 | n03776460 mobile home, manufactured home 662 | n03777568 Model T 663 | n03777754 modem 664 | n03781244 monastery 665 | n03782006 monitor 666 | n03785016 moped 667 | n03786901 mortar 668 | n03787032 mortarboard 669 | n03788195 mosque 670 | n03788365 mosquito net 671 | n03791053 motor scooter, scooter 672 | n03792782 mountain bike, all-terrain bike, off-roader 673 | n03792972 mountain tent 674 | n03793489 mouse, computer mouse 675 | n03794056 mousetrap 676 | n03796401 moving van 677 | n03803284 muzzle 678 | n03804744 nail 679 | n03814639 neck brace 680 | n03814906 necklace 681 | n03825788 nipple 682 | n03832673 notebook, notebook computer 683 | n03837869 obelisk 684 | n03838899 oboe, hautboy, hautbois 685 | n03840681 ocarina, sweet potato 686 | n03841143 odometer, hodometer, mileometer, milometer 687 | n03843555 oil filter 688 | n03854065 organ, pipe organ 689 | n03857828 oscilloscope, scope, cathode-ray oscilloscope, CRO 690 | n03866082 overskirt 691 | n03868242 oxcart 692 | n03868863 oxygen mask 693 | n03871628 packet 694 | n03873416 paddle, boat paddle 695 | n03874293 paddlewheel, paddle wheel 696 | n03874599 padlock 697 | n03876231 paintbrush 698 | n03877472 pajama, pyjama, pj's, jammies 699 | n03877845 palace 700 | n03884397 panpipe, pandean pipe, syrinx 701 | n03887697 paper towel 702 | n03888257 parachute, chute 703 | n03888605 parallel bars, bars 704 | n03891251 park bench 705 | n03891332 parking meter 706 | n03895866 passenger car, coach, carriage 707 | n03899768 patio, terrace 708 | n03902125 pay-phone, pay-station 709 | n03903868 pedestal, plinth, footstall 710 | n03908618 pencil box, pencil case 711 | n03908714 pencil sharpener 712 | n03916031 perfume, essence 713 | n03920288 Petri dish 714 | n03924679 photocopier 715 | n03929660 pick, plectrum, plectron 716 | n03929855 pickelhaube 717 | n03930313 picket fence, paling 718 | n03930630 pickup, pickup truck 719 | n03933933 pier 720 | n03935335 piggy bank, penny bank 721 | n03937543 pill bottle 722 | n03938244 pillow 723 | n03942813 ping-pong ball 724 | n03944341 pinwheel 725 | n03947888 pirate, pirate ship 726 | n03950228 pitcher, ewer 727 | n03954731 plane, carpenter's plane, woodworking plane 728 | n03956157 planetarium 729 | n03958227 plastic bag 730 | n03961711 plate rack 731 | n03967562 plow, plough 732 | n03970156 plunger, plumber's helper 733 | n03976467 Polaroid camera, Polaroid Land camera 734 | n03976657 pole 735 | n03977966 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria 736 | n03980874 poncho 737 | n03982430 pool table, billiard table, snooker table 738 | n03983396 pop bottle, soda bottle 739 | n03991062 pot, flowerpot 740 | n03992509 potter's wheel 741 | n03995372 power drill 742 | n03998194 prayer rug, prayer mat 743 | n04004767 printer 744 | n04005630 prison, prison house 745 | n04008634 projectile, missile 746 | n04009552 projector 747 | n04019541 puck, hockey puck 748 | n04023962 punching bag, punch bag, punching ball, punchball 749 | n04026417 purse 750 | n04033901 quill, quill pen 751 | n04033995 quilt, comforter, comfort, puff 752 | n04037443 racer, race car, racing car 753 | n04039381 racket, racquet 754 | n04040759 radiator 755 | n04041544 radio, wireless 756 | n04044716 radio telescope, radio reflector 757 | n04049303 rain barrel 758 | n04065272 recreational vehicle, RV, R.V. 759 | n04067472 reel 760 | n04069434 reflex camera 761 | n04070727 refrigerator, icebox 762 | n04074963 remote control, remote 763 | n04081281 restaurant, eating house, eating place, eatery 764 | n04086273 revolver, six-gun, six-shooter 765 | n04090263 rifle 766 | n04099969 rocking chair, rocker 767 | n04111531 rotisserie 768 | n04116512 rubber eraser, rubber, pencil eraser 769 | n04118538 rugby ball 770 | n04118776 rule, ruler 771 | n04120489 running shoe 772 | n04125021 safe 773 | n04127249 safety pin 774 | n04131690 saltshaker, salt shaker 775 | n04133789 sandal 776 | n04136333 sarong 777 | n04141076 sax, saxophone 778 | n04141327 scabbard 779 | n04141975 scale, weighing machine 780 | n04146614 school bus 781 | n04147183 schooner 782 | n04149813 scoreboard 783 | n04152593 screen, CRT screen 784 | n04153751 screw 785 | n04154565 screwdriver 786 | n04162706 seat belt, seatbelt 787 | n04179913 sewing machine 788 | n04192698 shield, buckler 789 | n04200800 shoe shop, shoe-shop, shoe store 790 | n04201297 shoji 791 | n04204238 shopping basket 792 | n04204347 shopping cart 793 | n04208210 shovel 794 | n04209133 shower cap 795 | n04209239 shower curtain 796 | n04228054 ski 797 | n04229816 ski mask 798 | n04235860 sleeping bag 799 | n04238763 slide rule, slipstick 800 | n04239074 sliding door 801 | n04243546 slot, one-armed bandit 802 | n04251144 snorkel 803 | n04252077 snowmobile 804 | n04252225 snowplow, snowplough 805 | n04254120 soap dispenser 806 | n04254680 soccer ball 807 | n04254777 sock 808 | n04258138 solar dish, solar collector, solar furnace 809 | n04259630 sombrero 810 | n04263257 soup bowl 811 | n04264628 space bar 812 | n04265275 space heater 813 | n04266014 space shuttle 814 | n04270147 spatula 815 | n04273569 speedboat 816 | n04275548 spider web, spider's web 817 | n04277352 spindle 818 | n04285008 sports car, sport car 819 | n04286575 spotlight, spot 820 | n04296562 stage 821 | n04310018 steam locomotive 822 | n04311004 steel arch bridge 823 | n04311174 steel drum 824 | n04317175 stethoscope 825 | n04325704 stole 826 | n04326547 stone wall 827 | n04328186 stopwatch, stop watch 828 | n04330267 stove 829 | n04332243 strainer 830 | n04335435 streetcar, tram, tramcar, trolley, trolley car 831 | n04336792 stretcher 832 | n04344873 studio couch, day bed 833 | n04346328 stupa, tope 834 | n04347754 submarine, pigboat, sub, U-boat 835 | n04350905 suit, suit of clothes 836 | n04355338 sundial 837 | n04355933 sunglass 838 | n04356056 sunglasses, dark glasses, shades 839 | n04357314 sunscreen, sunblock, sun blocker 840 | n04366367 suspension bridge 841 | n04367480 swab, swob, mop 842 | n04370456 sweatshirt 843 | n04371430 swimming trunks, bathing trunks 844 | n04371774 swing 845 | n04372370 switch, electric switch, electrical switch 846 | n04376876 syringe 847 | n04380533 table lamp 848 | n04389033 tank, army tank, armored combat vehicle, armoured combat vehicle 849 | n04392985 tape player 850 | n04398044 teapot 851 | n04399382 teddy, teddy bear 852 | n04404412 television, television system 853 | n04409515 tennis ball 854 | n04417672 thatch, thatched roof 855 | n04418357 theater curtain, theatre curtain 856 | n04423845 thimble 857 | n04428191 thresher, thrasher, threshing machine 858 | n04429376 throne 859 | n04435653 tile roof 860 | n04442312 toaster 861 | n04443257 tobacco shop, tobacconist shop, tobacconist 862 | n04447861 toilet seat 863 | n04456115 torch 864 | n04458633 totem pole 865 | n04461696 tow truck, tow car, wrecker 866 | n04462240 toyshop 867 | n04465501 tractor 868 | n04467665 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi 869 | n04476259 tray 870 | n04479046 trench coat 871 | n04482393 tricycle, trike, velocipede 872 | n04483307 trimaran 873 | n04485082 tripod 874 | n04486054 triumphal arch 875 | n04487081 trolleybus, trolley coach, trackless trolley 876 | n04487394 trombone 877 | n04493381 tub, vat 878 | n04501370 turnstile 879 | n04505470 typewriter keyboard 880 | n04507155 umbrella 881 | n04509417 unicycle, monocycle 882 | n04515003 upright, upright piano 883 | n04517823 vacuum, vacuum cleaner 884 | n04522168 vase 885 | n04523525 vault 886 | n04525038 velvet 887 | n04525305 vending machine 888 | n04532106 vestment 889 | n04532670 viaduct 890 | n04536866 violin, fiddle 891 | n04540053 volleyball 892 | n04542943 waffle iron 893 | n04548280 wall clock 894 | n04548362 wallet, billfold, notecase, pocketbook 895 | n04550184 wardrobe, closet, press 896 | n04552348 warplane, military plane 897 | n04553703 washbasin, handbasin, washbowl, lavabo, wash-hand basin 898 | n04554684 washer, automatic washer, washing machine 899 | n04557648 water bottle 900 | n04560804 water jug 901 | n04562935 water tower 902 | n04579145 whiskey jug 903 | n04579432 whistle 904 | n04584207 wig 905 | n04589890 window screen 906 | n04590129 window shade 907 | n04591157 Windsor tie 908 | n04591713 wine bottle 909 | n04592741 wing 910 | n04596742 wok 911 | n04597913 wooden spoon 912 | n04599235 wool, woolen, woollen 913 | n04604644 worm fence, snake fence, snake-rail fence, Virginia fence 914 | n04606251 wreck 915 | n04612504 yawl 916 | n04613696 yurt 917 | n06359193 web site, website, internet site, site 918 | n06596364 comic book 919 | n06785654 crossword puzzle, crossword 920 | n06794110 street sign 921 | n06874185 traffic light, traffic signal, stoplight 922 | n07248320 book jacket, dust cover, dust jacket, dust wrapper 923 | n07565083 menu 924 | n07579787 plate 925 | n07583066 guacamole 926 | n07584110 consomme 927 | n07590611 hot pot, hotpot 928 | n07613480 trifle 929 | n07614500 ice cream, icecream 930 | n07615774 ice lolly, lolly, lollipop, popsicle 931 | n07684084 French loaf 932 | n07693725 bagel, beigel 933 | n07695742 pretzel 934 | n07697313 cheeseburger 935 | n07697537 hotdog, hot dog, red hot 936 | n07711569 mashed potato 937 | n07714571 head cabbage 938 | n07714990 broccoli 939 | n07715103 cauliflower 940 | n07716358 zucchini, courgette 941 | n07716906 spaghetti squash 942 | n07717410 acorn squash 943 | n07717556 butternut squash 944 | n07718472 cucumber, cuke 945 | n07718747 artichoke, globe artichoke 946 | n07720875 bell pepper 947 | n07730033 cardoon 948 | n07734744 mushroom 949 | n07742313 Granny Smith 950 | n07745940 strawberry 951 | n07747607 orange 952 | n07749582 lemon 953 | n07753113 fig 954 | n07753275 pineapple, ananas 955 | n07753592 banana 956 | n07754684 jackfruit, jak, jack 957 | n07760859 custard apple 958 | n07768694 pomegranate 959 | n07802026 hay 960 | n07831146 carbonara 961 | n07836838 chocolate sauce, chocolate syrup 962 | n07860988 dough 963 | n07871810 meat loaf, meatloaf 964 | n07873807 pizza, pizza pie 965 | n07875152 potpie 966 | n07880968 burrito 967 | n07892512 red wine 968 | n07920052 espresso 969 | n07930864 cup 970 | n07932039 eggnog 971 | n09193705 alp 972 | n09229709 bubble 973 | n09246464 cliff, drop, drop-off 974 | n09256479 coral reef 975 | n09288635 geyser 976 | n09332890 lakeside, lakeshore 977 | n09399592 promontory, headland, head, foreland 978 | n09421951 sandbar, sand bar 979 | n09428293 seashore, coast, seacoast, sea-coast 980 | n09468604 valley, vale 981 | n09472597 volcano 982 | n09835506 ballplayer, baseball player 983 | n10148035 groom, bridegroom 984 | n10565667 scuba diver 985 | n11879895 rapeseed 986 | n11939491 daisy 987 | n12057211 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum 988 | n12144580 corn 989 | n12267677 acorn 990 | n12620546 hip, rose hip, rosehip 991 | n12768682 buckeye, horse chestnut, conker 992 | n12985857 coral fungus 993 | n12998815 agaric 994 | n13037406 gyromitra 995 | n13040303 stinkhorn, carrion fungus 996 | n13044778 earthstar 997 | n13052670 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa 998 | n13054560 bolete 999 | n13133613 ear, spike, capitulum 1000 | n15075141 toilet tissue, toilet paper, bathroom tissue 1001 | -------------------------------------------------------------------------------- /exp_3_3_style_transfer/.gitignore: -------------------------------------------------------------------------------- 1 | /.idea -------------------------------------------------------------------------------- /exp_3_3_style_transfer/main_exp_3_3.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | from stu_upload.exp_3_3_style_transfer import * 3 | from stu_upload.layers_2 import ConvolutionalLayer, MaxPoolingLayer 4 | import numpy as np 5 | import struct 6 | import os 7 | import scipy.io 8 | import time 9 | 10 | 11 | def computeMse(data1, data2): 12 | errors = [] 13 | for i in range(len(data1)): 14 | errors.append(data1[i] - data2[i]) 15 | 16 | squared_error = [] 17 | for val in errors: 18 | squared_error.append(pow(val, 2)) 19 | 20 | return sum(squared_error) / len(squared_error) 21 | 22 | 23 | def test_speed_up(): 24 | test_data = np.random.rand(1, 256, 24, 40) 25 | test_dloss = np.random.rand(1, 256, 24, 40) 26 | test_filter = np.random.rand(256, 3, 3, 256) 27 | test_bias = np.random.rand(256) 28 | 29 | conv = ConvolutionalLayer(3, 256, 256, 1, 1) 30 | conv.init_param() 31 | conv.load_param(test_filter, test_bias) 32 | stamp = time.time() 33 | conv_forward_result = conv.forward(test_data) 34 | conv_forward_time = time.time() - stamp 35 | print('conv forward raw time: %f ms' % (conv_forward_time * 1000)) 36 | stamp = time.time() 37 | conv_backward_result = conv.backward(test_dloss) 38 | conv_backward_time = time.time() - stamp 39 | print('conv backward raw time: %f ms' % (conv_backward_time * 1000)) 40 | 41 | speedup_conv = ConvolutionalLayer(3, 256, 256, 1, 1, 1) 42 | speedup_conv.init_param() 43 | speedup_conv.load_param(test_filter, test_bias) 44 | stamp = time.time() 45 | speedup_conv_forward_result = speedup_conv.forward(test_data) 46 | speedup_conv_forward_time = time.time() - stamp 47 | print('conv forward speedup time: %f ms' % (speedup_conv_forward_time * 1000)) 48 | stamp = time.time() 49 | speedup_conv_backward_result = speedup_conv.backward(test_dloss) 50 | speedup_conv_backward_time = time.time() - stamp 51 | print('conv backward speedup time: %f ms' % (speedup_conv_backward_time * 1000)) 52 | 53 | speedup_conv_forward_mse = computeMse(conv_forward_result.flatten(), speedup_conv_forward_result.flatten()) 54 | speedup_conv_backward_mse = computeMse(conv_backward_result.flatten(), speedup_conv_backward_result.flatten()) 55 | if speedup_conv_forward_mse < 0.003 and speedup_conv_backward_mse < 0.003: 56 | print('SPEEDUP CONV TEST PASS.') 57 | else: 58 | print('SPEEDUP CONV TEST FAILED.') 59 | exit() 60 | 61 | print('CONV FORWARD SPEEDUP RATIO: %f' % (conv_forward_time / speedup_conv_forward_time)) 62 | print('CONV BACKWARD SPEEDUP RATIO: %f' % (conv_backward_time / speedup_conv_backward_time)) 63 | 64 | 65 | if __name__ == '__main__': 66 | np.random.seed(1234) 67 | test_speed_up() 68 | print('-------------------------') 69 | CONTENT_LOSS_LAYERS = ['relu4_2'] 70 | STYLE_LOSS_LAYERS = ['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1'] 71 | NOISE = 0.5 72 | ALPHA, BETA = 1, 500 73 | TRAIN_STEP = 100 74 | LEARNING_RATE = 1.0 75 | IMAGE_HEIGHT, IMAGE_WIDTH = 192, 320 76 | 77 | vgg = VGG19(param_path='../imagenet-vgg-verydeep-19.mat') 78 | vgg.build_model() 79 | vgg.init_model() 80 | vgg.load_model() 81 | content_loss_layer = ContentLossLayer() 82 | style_loss_layer = StyleLossLayer() 83 | adam_optimizer = AdamOptimizer(1.0, [1, 3, IMAGE_HEIGHT, IMAGE_WIDTH]) 84 | 85 | content_image, content_shape = vgg.load_image('../weinisi.jpg', IMAGE_HEIGHT, IMAGE_WIDTH) 86 | style_image, _ = vgg.load_image('../style.jpg', IMAGE_HEIGHT, IMAGE_WIDTH) 87 | content_layers = vgg.forward(content_image, CONTENT_LOSS_LAYERS) 88 | style_layers = vgg.forward(style_image, STYLE_LOSS_LAYERS) 89 | transfer_image = get_random_img(content_image, NOISE) 90 | 91 | start = time.time() 92 | for step in range(TRAIN_STEP): 93 | transfer_layers = vgg.forward(transfer_image, CONTENT_LOSS_LAYERS + STYLE_LOSS_LAYERS) 94 | content_loss = np.array([]) 95 | style_loss = np.array([]) 96 | content_diff = np.zeros(transfer_image.shape) 97 | style_diff = np.zeros(transfer_image.shape) 98 | for layer in CONTENT_LOSS_LAYERS: 99 | current_loss = content_loss_layer.forward(transfer_layers[layer], content_layers[layer]) 100 | content_loss = np.append(content_loss, current_loss) 101 | dloss = content_loss_layer.backward(transfer_layers[layer], content_layers[layer]) 102 | content_diff += vgg.backward(dloss, layer) 103 | for layer in STYLE_LOSS_LAYERS: 104 | current_loss = style_loss_layer.forward(transfer_layers[layer], style_layers[layer]) 105 | style_loss = np.append(style_loss, current_loss) 106 | dloss = style_loss_layer.backward(transfer_layers[layer], style_layers[layer]) 107 | style_diff += vgg.backward(dloss, layer) 108 | total_loss = ALPHA * np.mean(content_loss) + BETA * np.mean(style_loss) 109 | image_diff = ALPHA * content_diff / len(CONTENT_LOSS_LAYERS) + BETA * style_diff / len(STYLE_LOSS_LAYERS) 110 | transfer_image = adam_optimizer.update(transfer_image, image_diff) 111 | if step % 1 == 0: 112 | print('Step %d, loss = %f' % (step, total_loss), content_loss, style_loss) 113 | print('cost time: %f' % (time.time() - start)) 114 | vgg.save_image(transfer_image, content_shape, 'output/output_' + str(step) + '.jpg') 115 | start = time.time() 116 | -------------------------------------------------------------------------------- /exp_3_3_style_transfer/output/output_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_3_style_transfer/output/output_0.jpg -------------------------------------------------------------------------------- /exp_3_3_style_transfer/output/output_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_3_style_transfer/output/output_1.jpg -------------------------------------------------------------------------------- /exp_3_3_style_transfer/output/output_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_3_style_transfer/output/output_2.jpg -------------------------------------------------------------------------------- /exp_3_3_style_transfer/output/output_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_3_style_transfer/output/output_3.jpg -------------------------------------------------------------------------------- /exp_3_3_style_transfer/output/output_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_3_style_transfer/output/output_4.jpg -------------------------------------------------------------------------------- /exp_3_3_style_transfer/readme.txt: -------------------------------------------------------------------------------- 1 | 补全 stu_upload 中的 layer_1.py、layer_2.py、layer_3.py、style_transfer.py 文件,执行 main_exp_3_3.py 运行实验. 2 | -------------------------------------------------------------------------------- /exp_3_3_style_transfer/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy~=1.19.4 2 | scipy~=1.5.4 -------------------------------------------------------------------------------- /exp_3_3_style_transfer/stu_upload/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/exp_3_3_style_transfer/stu_upload/__init__.py -------------------------------------------------------------------------------- /exp_3_3_style_transfer/stu_upload/exp_3_3_style_transfer.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import scipy.io 6 | import time 7 | 8 | from layers_1 import FullyConnectedLayer, ReLULayer, SoftmaxLossLayer 9 | from layers_2 import ConvolutionalLayer, MaxPoolingLayer, FlattenLayer 10 | from layers_3 import ContentLossLayer, StyleLossLayer 11 | 12 | 13 | class VGG19(object): 14 | def __init__(self, param_path='../../imagenet-vgg-verydeep-19.mat'): 15 | self.param_path = param_path 16 | self.param_layer_name = [ 17 | 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 18 | 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 19 | 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 20 | 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 21 | 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5' 22 | ] 23 | 24 | def build_model(self): 25 | # TODO: 建立VGG19网络结构 26 | # 可以通过设置 type=1 来使用优化后的卷积和池化层,如 ConvolutionalLayer(3, 3, 64, 1, 1, type=1) 27 | print('Building vgg-19 model...') 28 | 29 | self.layers = {} 30 | self.layers['conv1_1'] = ConvolutionalLayer(3, 3, 64, 1, 1) 31 | self.layers['relu1_1'] = ReLULayer() 32 | self.layers['conv1_2'] = ConvolutionalLayer(3, 64, 64, 1, 1) 33 | self.layers['relu1_2'] = ReLULayer() 34 | self.layers['pool1'] = MaxPoolingLayer(2, 2) 35 | 36 | _______________________ 37 | 38 | self.layers['conv5_4'] = ConvolutionalLayer(3, 512, 512, 1, 1) 39 | self.layers['relu5_4'] = ReLULayer() 40 | self.layers['pool5'] = MaxPoolingLayer(2, 2) 41 | 42 | self.update_layer_list = [] 43 | for layer_name in self.layers.keys(): 44 | if 'conv' in layer_name: 45 | self.update_layer_list.append(layer_name) 46 | 47 | def init_model(self): 48 | print('Initializing parameters of each layer in vgg-19...') 49 | for layer_name in self.update_layer_list: 50 | self.layers[layer_name].init_param() 51 | 52 | def load_model(self): 53 | print('Loading parameters from file ' + self.param_path) 54 | params = scipy.io.loadmat(self.param_path) 55 | self.image_mean = params['normalization'][0][0][0] 56 | self.image_mean = np.mean(self.image_mean, axis=(0, 1)) 57 | print('Get image mean: ' + str(self.image_mean)) 58 | for idx in range(37): 59 | if 'conv' in self.param_layer_name[idx]: 60 | weight, bias = params['layers'][0][idx][0][0][0][0] 61 | # matconvnet: weights dim [height, width, in_channel, out_channel] 62 | # ours: weights dim [in_channel, height, width, out_channel] 63 | weight = np.transpose(weight, [2, 0, 1, 3]) 64 | bias = bias.reshape(-1) 65 | self.layers[self.param_layer_name[idx]].load_param(weight, bias) 66 | 67 | def load_image(self, image_dir, image_height, image_width): 68 | print('Loading and preprocessing image from ' + image_dir) 69 | self.input_image = scipy.misc.imread(image_dir) 70 | image_shape = self.input_image.shape 71 | self.input_image = scipy.misc.imresize(self.input_image, [image_height, image_width, 3]) 72 | self.input_image = np.array(self.input_image).astype(np.float32) 73 | self.input_image -= self.image_mean 74 | self.input_image = np.reshape(self.input_image, [1] + list(self.input_image.shape)) 75 | # input dim [N, channel, height, width] 76 | self.input_image = np.transpose(self.input_image, [0, 3, 1, 2]) 77 | return self.input_image, image_shape 78 | 79 | def save_image(self, input_image, image_shape, image_dir): 80 | # print('Save image at ' + image_dir) 81 | input_image = np.transpose(input_image, [0, 2, 3, 1]) 82 | input_image = input_image[0] + self.image_mean 83 | input_image = np.clip(input_image, 0, 255).astype(np.uint8) 84 | input_image = scipy.misc.imresize(input_image, image_shape) 85 | scipy.misc.imsave(image_dir, input_image) 86 | 87 | def forward(self, input_image, layer_list): 88 | start_time = time.time() 89 | current = input_image 90 | layer_forward = {} 91 | for idx in range(len(self.param_layer_name)): 92 | # TODO: 计算VGG19网络的前向传播 93 | current = _______________________ 94 | if self.param_layer_name[idx] in layer_list: 95 | layer_forward[self.param_layer_name[idx]] = current 96 | # print('Forward time: %f' % (time.time()-start_time)) 97 | return layer_forward 98 | 99 | def backward(self, dloss, layer_name): 100 | start_time = time.time() 101 | layer_idx = list.index(self.param_layer_name, layer_name) 102 | for idx in range(layer_idx, -1, -1): 103 | # TODO: 计算VGG19网络的反向传播 104 | dloss = _______________________ 105 | 106 | # print('Backward time: %f' % (time.time()-start_time)) 107 | return dloss 108 | 109 | 110 | def get_random_img(content_image, noise): 111 | noise_image = np.random.uniform(-20, 20, content_image.shape) 112 | random_img = noise_image * noise + content_image * (1 - noise) 113 | return random_img 114 | 115 | 116 | class AdamOptimizer(object): 117 | def __init__(self, lr, diff_shape): 118 | self.beta1 = 0.9 119 | self.beta2 = 0.999 120 | self.eps = 1e-8 121 | self.lr = lr 122 | self.mt = np.zeros(diff_shape) 123 | self.vt = np.zeros(diff_shape) 124 | self.step = 0 125 | 126 | def update(self, input, grad): 127 | self.step += 1 128 | self.mt = self.beta1 * self.mt + (1 - self.beta1) * grad 129 | self.vt = self.beta2 * self.vt + (1 - self.beta2) * np.square(grad) 130 | mt_hat = self.mt / (1 - self.beta1 ** self.step) 131 | vt_hat = self.vt / (1 - self.beta2 ** self.step) 132 | # TODO: 利用梯度的一阶矩和二阶矩的无偏估计更新风格迁移图像 133 | output = _______________________ 134 | return output 135 | 136 | 137 | if __name__ == '__main__': 138 | 139 | CONTENT_LOSS_LAYERS = ['relu4_2'] 140 | STYLE_LOSS_LAYERS = ['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1'] 141 | NOISE = 0.5 142 | ALPHA, BETA = 1, 500 143 | TRAIN_STEP = 100 144 | LEARNING_RATE = 1.0 145 | IMAGE_HEIGHT, IMAGE_WIDTH = 192, 320 146 | 147 | vgg = VGG19() 148 | vgg.build_model() 149 | vgg.init_model() 150 | vgg.load_model() 151 | content_loss_layer = ContentLossLayer() 152 | style_loss_layer = StyleLossLayer() 153 | adam_optimizer = AdamOptimizer(1.0, [1, 3, IMAGE_HEIGHT, IMAGE_WIDTH]) 154 | 155 | content_image, content_shape = vgg.load_image('../../weinisi.jpg', IMAGE_HEIGHT, IMAGE_WIDTH) 156 | style_image, _ = vgg.load_image('../../style.jpg', IMAGE_HEIGHT, IMAGE_WIDTH) 157 | content_layers = vgg.forward(content_image, CONTENT_LOSS_LAYERS) 158 | style_layers = vgg.forward(style_image, STYLE_LOSS_LAYERS) 159 | transfer_image = get_random_img(content_image, NOISE) 160 | 161 | for step in range(TRAIN_STEP): 162 | transfer_layers = vgg.forward(transfer_image, CONTENT_LOSS_LAYERS + STYLE_LOSS_LAYERS) 163 | content_loss = np.array([]) 164 | style_loss = np.array([]) 165 | content_diff = np.zeros(transfer_image.shape) 166 | style_diff = np.zeros(transfer_image.shape) 167 | for layer in CONTENT_LOSS_LAYERS: 168 | # TODO: 计算内容损失的前向传播 169 | current_loss = _______________________ 170 | content_loss = np.append(content_loss, current_loss) 171 | # TODO: 计算内容损失的反向传播 172 | dloss = content_loss_layer.backward(transfer_layers[layer], content_layers[layer]) 173 | content_diff += _______________________ 174 | for layer in STYLE_LOSS_LAYERS: 175 | # TODO: 计算风格损失的前向传播 176 | current_loss = _______________________ 177 | style_loss = np.append(style_loss, current_loss) 178 | # TODO: 计算风格损失的反向传播 179 | dloss = style_loss_layer.backward(transfer_layers[layer], style_layers[layer]) 180 | style_diff += _______________________ 181 | total_loss = ALPHA * np.mean(content_loss) + BETA * np.mean(style_loss) 182 | image_diff = ALPHA * content_diff / len(CONTENT_LOSS_LAYERS) + BETA * style_diff / len(STYLE_LOSS_LAYERS) 183 | # TODO: 利用Adam优化器对风格迁移图像进行更新 184 | transfer_image = _______________________ 185 | if step % 20 == 0: 186 | print('Step %d, loss = %f' % (step, total_loss), content_loss, style_loss) 187 | vgg.save_image(transfer_image, content_shape, '../output/output_' + str(step) + '.jpg') 188 | -------------------------------------------------------------------------------- /exp_3_3_style_transfer/stu_upload/layers_1.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import time 6 | 7 | 8 | class FullyConnectedLayer(object): 9 | def __init__(self, num_input, num_output): # 全连接层初始化 10 | self.num_input = num_input 11 | self.num_output = num_output 12 | print('\tFully connected layer with input %d, output %d.' % (self.num_input, self.num_output)) 13 | 14 | def init_param(self, std=0.01): # 参数初始化 15 | self.weight = np.random.normal(loc=0.0, scale=std, size=(self.num_input, self.num_output)) 16 | self.bias = np.zeros([1, self.num_output]) 17 | 18 | def forward(self, input): # 前向传播计算 19 | start_time = time.time() 20 | self.input = input 21 | # TODO:全连接层的前向传播,计算输出结果 22 | self.output = ________________ 23 | return self.output 24 | 25 | def backward(self, top_diff): # 反向传播的计算 26 | # TODO:全连接层的反向传播,计算参数梯度和本层损失 27 | self.d_weight = ________________ 28 | self.d_bias = ________________ 29 | bottom_diff = ________________ 30 | return bottom_diff 31 | 32 | def update_param(self, lr): # 参数更新 33 | # TODO:对全连接层参数利用参数进行更新 34 | self.weight = ________________ 35 | self.bias = ________________ 36 | 37 | def load_param(self, weight, bias): # 参数加载 38 | assert self.weight.shape == weight.shape 39 | assert self.bias.shape == bias.shape 40 | self.weight = weight 41 | self.bias = bias 42 | 43 | def save_param(self): # 参数保存 44 | return self.weight, self.bias 45 | 46 | 47 | class ReLULayer(object): 48 | def __init__(self): 49 | print('\tReLU layer.') 50 | 51 | def forward(self, input): # 前向传播的计算 52 | start_time = time.time() 53 | self.input = input 54 | # TODO:ReLU层的前向传播,计算输出结果 55 | output = ________________ 56 | return output 57 | 58 | def backward(self, top_diff): # 反向传播的计算 59 | # TODO:ReLU层的反向传播,计算本层损失 60 | bottom_diff = ________________ 61 | return bottom_diff 62 | 63 | 64 | class SoftmaxLossLayer(object): 65 | def __init__(self): 66 | print('\tSoftmax loss layer.') 67 | 68 | def forward(self, input): # 前向传播的计算 69 | # TODO:softmax 损失层的前向传播,计算输出结果 70 | input_max = np.max(input, axis=1, keepdims=True) 71 | input_exp = np.exp(input - input_max) 72 | self.prob = ________________ 73 | return self.prob 74 | 75 | def get_loss(self, label): # 计算损失 76 | self.batch_size = self.prob.shape[0] 77 | self.label_onehot = np.zeros_like(self.prob) 78 | self.label_onehot[np.arange(self.batch_size), label] = 1.0 79 | loss = -np.sum(np.log(self.prob) * self.label_onehot) / self.batch_size 80 | return loss 81 | 82 | def backward(self): # 反向传播的计算 83 | # TODO:softmax 损失层的反向传播,计算本层损失 84 | bottom_diff = ________________ 85 | return bottom_diff 86 | -------------------------------------------------------------------------------- /exp_3_3_style_transfer/stu_upload/layers_2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import struct 3 | import os 4 | import time 5 | 6 | 7 | class ConvolutionalLayer(object): 8 | def __init__(self, kernel_size, channel_in, channel_out, padding, stride, type=0): 9 | self.kernel_size = kernel_size 10 | self.channel_in = channel_in 11 | self.channel_out = channel_out 12 | self.padding = padding 13 | self.stride = stride 14 | self.forward = self.forward_raw 15 | self.backward = self.backward_raw 16 | if type == 1: # type 设为 1 时,使用优化后的 foward 和 backward 函数 17 | self.forward = self.forward_speedup 18 | self.backward = self.backward_speedup 19 | print('\tConvolutional layer with kernel size %d, input channel %d, output channel %d.' % ( 20 | self.kernel_size, self.channel_in, self.channel_out)) 21 | 22 | def init_param(self, std=0.01): 23 | self.weight = np.random.normal(loc=0.0, scale=std, 24 | size=(self.channel_in, self.kernel_size, self.kernel_size, self.channel_out)) 25 | self.bias = np.zeros([self.channel_out]) 26 | 27 | def forward_raw(self, input): 28 | start_time = time.time() 29 | self.input = input # [N, C, H, W] 30 | height = self.input.shape[2] + self.padding * 2 31 | width = self.input.shape[3] + self.padding * 2 32 | self.input_pad = np.zeros([self.input.shape[0], self.input.shape[1], height, width]) 33 | self.input_pad[:, :, self.padding:self.padding + self.input.shape[2], 34 | self.padding:self.padding + self.input.shape[3]] = self.input 35 | height_out = (height - self.kernel_size) / self.stride + 1 36 | width_out = (width - self.kernel_size) / self.stride + 1 37 | self.output = np.zeros([self.input.shape[0], self.channel_out, height_out, width_out]) 38 | for idxn in range(self.input.shape[0]): 39 | for idxc in range(self.channel_out): 40 | for idxh in range(height_out): 41 | for idxw in range(width_out): 42 | # TODO: 计算卷积层的前向传播,特征图与卷积核的内积再加偏置 43 | self.output[idxn, idxc, idxh, idxw] = _______________________ 44 | self.forward_time = time.time() - start_time 45 | return self.output 46 | 47 | def forward_speedup(self, input): 48 | # TODO: 改进forward函数,使得计算加速 49 | start_time = time.time() 50 | 51 | _______________________ 52 | 53 | self.forward_time = time.time() - start_time 54 | return self.output 55 | 56 | def backward_speedup(self, top_diff): 57 | # TODO: 改进backward函数,使得计算加速 58 | start_time = time.time() 59 | 60 | _______________________ 61 | 62 | self.backward_time = time.time() - start_time 63 | return bottom_diff 64 | 65 | def backward_raw(self, top_diff): 66 | start_time = time.time() 67 | self.d_weight = np.zeros(self.weight.shape) 68 | self.d_bias = np.zeros(self.bias.shape) 69 | bottom_diff = np.zeros(self.input_pad.shape) 70 | for idxn in range(top_diff.shape[0]): 71 | for idxc in range(top_diff.shape[1]): 72 | for idxh in range(top_diff.shape[2]): 73 | for idxw in range(top_diff.shape[3]): 74 | # TODO: 计算卷积层的反向传播, 权重、偏置的梯度和本层损失 75 | self.d_weight[:, :, :, idxc] += _______________________ 76 | self.d_bias[idxc] += _______________________ 77 | bottom_diff[idxn, :, idxh * self.stride:idxh * self.stride + self.kernel_size, 78 | idxw * self.stride:idxw * self.stride + self.kernel_size] += _______________________ 79 | bottom_diff = bottom_diff[:, :, self.padding:self.padding + self.input.shape[2], 80 | self.padding:self.padding + self.input.shape[3]] 81 | self.backward_time = time.time() - start_time 82 | return bottom_diff 83 | 84 | def get_gradient(self): 85 | return self.d_weight, self.d_bias 86 | 87 | def update_param(self, lr): 88 | self.weight += - lr * self.d_weight 89 | self.bias += - lr * self.d_bias 90 | 91 | def load_param(self, weight, bias): 92 | assert self.weight.shape == weight.shape 93 | assert self.bias.shape == bias.shape 94 | self.weight = weight 95 | self.bias = bias 96 | 97 | def get_forward_time(self): 98 | return self.forward_time 99 | 100 | def get_backward_time(self): 101 | return self.backward_time 102 | 103 | 104 | class MaxPoolingLayer(object): 105 | def __init__(self, kernel_size, stride, type=0): 106 | self.kernel_size = kernel_size 107 | self.stride = stride 108 | self.forward = self.forward_raw 109 | self.backward = self.backward_raw_book 110 | if type == 1: # type 设为 1 时,使用优化后的 foward 和 backward 函数 111 | self.forward = self.forward_speedup 112 | self.backward = self.backward_speedup 113 | print('\tMax pooling layer with kernel size %d, stride %d.' % (self.kernel_size, self.stride)) 114 | 115 | def forward_raw(self, input): 116 | start_time = time.time() 117 | self.input = input # [N, C, H, W] 118 | self.max_index = np.zeros(self.input.shape) 119 | height_out = (self.input.shape[2] - self.kernel_size) / self.stride + 1 120 | width_out = (self.input.shape[3] - self.kernel_size) / self.stride + 1 121 | self.output = np.zeros([self.input.shape[0], self.input.shape[1], height_out, width_out]) 122 | for idxn in range(self.input.shape[0]): 123 | for idxc in range(self.input.shape[1]): 124 | for idxh in range(height_out): 125 | for idxw in range(width_out): 126 | # TODO: 计算最大池化层的前向传播, 取池化窗口内的最大值 127 | self.output[idxn, idxc, idxh, idxw] = _______________________ 128 | curren_max_index = np.argmax( 129 | self.input[idxn, idxc, idxh * self.stride:idxh * self.stride + self.kernel_size, 130 | idxw * self.stride:idxw * self.stride + self.kernel_size]) 131 | curren_max_index = np.unravel_index(curren_max_index, [self.kernel_size, self.kernel_size]) 132 | self.max_index[ 133 | idxn, idxc, idxh * self.stride + curren_max_index[0], idxw * self.stride + curren_max_index[ 134 | 1]] = 1 135 | return self.output 136 | 137 | def forward_speedup(self, input): 138 | # TODO: 改进forward函数,使得计算加速 139 | start_time = time.time() 140 | 141 | _______________________ 142 | 143 | return self.output 144 | 145 | def backward_speedup(self, top_diff): 146 | # TODO: 改进backward函数,使得计算加速 147 | 148 | _______________________ 149 | 150 | return bottom_diff 151 | 152 | def backward_raw_book(self, top_diff): 153 | bottom_diff = np.zeros(self.input.shape) 154 | for idxn in range(top_diff.shape[0]): 155 | for idxc in range(top_diff.shape[1]): 156 | for idxh in range(top_diff.shape[2]): 157 | for idxw in range(top_diff.shape[3]): 158 | # TODO: 最大池化层的反向传播, 计算池化窗口中最大值位置, 并传递损失 159 | max_index = _______________________ 160 | bottom_diff[idxn, idxc, idxh * self.stride + max_index[0], idxw * self.stride + max_index[ 161 | 1]] = _______________________ 162 | return bottom_diff 163 | 164 | 165 | class FlattenLayer(object): 166 | def __init__(self, input_shape, output_shape): 167 | self.input_shape = input_shape 168 | self.output_shape = output_shape 169 | assert np.prod(self.input_shape) == np.prod(self.output_shape) 170 | print('\tFlatten layer with input shape %s, output shape %s.' % (str(self.input_shape), str(self.output_shape))) 171 | 172 | def forward(self, input): 173 | assert list(input.shape[1:]) == list(self.input_shape) 174 | # matconvnet feature map dim: [N, height, width, channel] 175 | # ours feature map dim: [N, channel, height, width] 176 | self.input = np.transpose(input, [0, 2, 3, 1]) 177 | self.output = self.input.reshape([self.input.shape[0]] + list(self.output_shape)) 178 | return self.output 179 | 180 | def backward(self, top_diff): 181 | assert list(top_diff.shape[1:]) == list(self.output_shape) 182 | top_diff = np.transpose(top_diff, [0, 3, 1, 2]) 183 | bottom_diff = top_diff.reshape([top_diff.shape[0]] + list(self.input_shape)) 184 | return bottom_diff 185 | -------------------------------------------------------------------------------- /exp_3_3_style_transfer/stu_upload/layers_3.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import numpy as np 3 | import struct 4 | import os 5 | import scipy.io 6 | import time 7 | 8 | 9 | class ContentLossLayer(object): 10 | def __init__(self): 11 | print('\tContent loss layer.') 12 | 13 | def forward(self, input_layer, content_layer): 14 | # TODO: 计算风格迁移图像和目标内容图像的内容损失 15 | loss = _______________________ 16 | return loss 17 | 18 | def backward(self, input_layer, content_layer): 19 | # TODO: 计算内容损失的反向传播 20 | bottom_diff = _______________________ 21 | return bottom_diff 22 | 23 | 24 | class StyleLossLayer(object): 25 | def __init__(self): 26 | print('\tStyle loss layer.') 27 | 28 | def forward(self, input_layer, style_layer): 29 | # TODO: 计算风格迁移图像和目标风格图像的Gram 矩阵 30 | style_layer_reshape = np.reshape(style_layer, [style_layer.shape[0], style_layer.shape[1], -1]) 31 | self.gram_style = _______________________ 32 | self.input_layer_reshape = np.reshape(input_layer, [input_layer.shape[0], input_layer.shape[1], -1]) 33 | self.gram_input = np.zeros([input_layer.shape[0], input_layer.shape[1], input_layer.shape[1]]) 34 | for idxn in range(input_layer.shape[0]): 35 | self.gram_input[idxn, :, :] = _______________________ 36 | 37 | M = input_layer.shape[2] * input_layer.shape[3] 38 | N = input_layer.shape[1] 39 | self.div = M * M * N * N 40 | # TODO: 计算风格迁移图像和目标风格图像的风格损失 41 | style_diff = _______________________ 42 | loss = _______________________ 43 | return loss 44 | 45 | def backward(self, input_layer, style_layer): 46 | 47 | bottom_diff = np.zeros( 48 | [input_layer.shape[0], input_layer.shape[1], input_layer.shape[2] * input_layer.shape[3]]) 49 | for idxn in range(input_layer.shape[0]): 50 | # TODO: 计算风格损失的反向传播 51 | bottom_diff[idxn, :, :] = _______________________ 52 | bottom_diff = np.reshape(bottom_diff, input_layer.shape) 53 | return bottom_diff 54 | -------------------------------------------------------------------------------- /mnist_data/t10k-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/mnist_data/t10k-images-idx3-ubyte -------------------------------------------------------------------------------- /mnist_data/t10k-labels-idx1-ubyte: -------------------------------------------------------------------------------- 1 | '                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                             -------------------------------------------------------------------------------- /mnist_data/train-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/mnist_data/train-images-idx3-ubyte -------------------------------------------------------------------------------- /mnist_data/train-labels-idx1-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/mnist_data/train-labels-idx1-ubyte -------------------------------------------------------------------------------- /智能计算实验PDF/2.1实验PDF.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/智能计算实验PDF/2.1实验PDF.pdf -------------------------------------------------------------------------------- /智能计算实验PDF/2.2实验报告.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/智能计算实验PDF/2.2实验报告.pdf -------------------------------------------------------------------------------- /智能计算实验PDF/3.1实验报告.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/智能计算实验PDF/3.1实验报告.pdf -------------------------------------------------------------------------------- /智能计算实验PDF/3.2实验报告.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/智能计算实验PDF/3.2实验报告.pdf -------------------------------------------------------------------------------- /智能计算实验PDF/3.3实验报告.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/智能计算实验PDF/3.3实验报告.pdf -------------------------------------------------------------------------------- /课件/ch1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch1.pdf -------------------------------------------------------------------------------- /课件/ch2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch2.pdf -------------------------------------------------------------------------------- /课件/ch3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch3.pdf -------------------------------------------------------------------------------- /课件/ch4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch4.pdf -------------------------------------------------------------------------------- /课件/ch5.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch5.pdf -------------------------------------------------------------------------------- /课件/ch6.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch6.pdf -------------------------------------------------------------------------------- /课件/ch7.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch7.pdf -------------------------------------------------------------------------------- /课件/ch8.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismdeep/AI-Computing-Systems/2e003bc8ffe9a55a5d7ad722e1e1da8d4e0d3a96/课件/ch8.pdf --------------------------------------------------------------------------------