├── README.md ├── 实验1-1190201215-冯开来 ├── main.py ├── requirements.txt └── 实验1-1190201215-冯开来.pdf ├── 实验2-1190201215-冯开来 ├── main.py ├── model │ └── net.png ├── requirements.txt ├── runs │ └── May26_11-50-15_LAPTOP-EMMOR93D │ │ └── events.out.tfevents.1653537015.LAPTOP-EMMOR93D.38396.0 └── 实验2-1190201215-冯开来.pdf ├── 实验3-1190201215-冯开来-1190201225-韩庸平 ├── codes │ ├── ResNet.py │ ├── SE_ResNet.py │ ├── VGG.py │ ├── blocks.py │ ├── data_path.py │ ├── main.py │ ├── model.py │ ├── train.py │ └── writer.py ├── results │ └── ResNet_Adam_augmentation.csv └── 实验报告3.pdf ├── 实验4-1190201215-冯开来 ├── codes │ ├── datasets.py │ ├── engine.py │ ├── main.py │ ├── models.py │ └── requirements.txt ├── models │ └── lab4曾经保存的模型.jpg ├── results │ ├── 文本分类 │ │ ├── Bi-lstm pricious recal f1.jpg │ │ ├── Bi-lstm-loss.png │ │ ├── GRU-loss.png │ │ ├── GRU-precious&recall&f1.jpg │ │ ├── RNN-accuracy&recall&f1.jpg │ │ ├── RNN-loss.png │ │ ├── lstm-accuracy&recall&f1.jpg │ │ ├── lstm-loss and accuracy.jpg │ │ └── lstm-loss.png │ └── 温度预测 │ │ ├── GRU-climate-loss.png │ │ ├── pred-true-0.jpg │ │ ├── pred-true-1.jpg │ │ ├── pred-true-2.jpg │ │ ├── pred-true-3.jpg │ │ ├── pred-true-4.jpg │ │ ├── pred-true-5.jpg │ │ ├── pred-true-6.jpg │ │ ├── pred-true-7.jpg │ │ ├── pred-true-8.jpg │ │ ├── pred-true-9.jpg │ │ ├── test-climate-result.jpg │ │ └── test-climate-running.jpg └── 实验4-1190201215-冯开来.pdf ├── 实验5-1190201215-冯开来 ├── SeFa生成的动图 │ └── pggan_celebahq1024_N5_K5_seed0.mov ├── results │ ├── GAN-adam │ │ ├── epoch-120.jpg │ │ ├── epoch-150.jpg │ │ ├── epoch-180.jpg │ │ ├── epoch-210.jpg │ │ ├── epoch-240.jpg │ │ ├── epoch-270.jpg │ │ ├── epoch-30.jpg │ │ ├── epoch-300.jpg │ │ ├── epoch-330.jpg │ │ ├── epoch-360.jpg │ │ ├── epoch-390.jpg │ │ ├── epoch-420.jpg │ │ ├── epoch-450.jpg │ │ ├── epoch-480.jpg │ │ ├── epoch-510.jpg │ │ ├── epoch-540.jpg │ │ ├── epoch-570.jpg │ │ ├── epoch-60.jpg │ │ ├── epoch-600.jpg │ │ ├── epoch-630.jpg │ │ ├── epoch-660.jpg │ │ ├── epoch-690.jpg │ │ ├── epoch-720.jpg │ │ ├── epoch-750.jpg │ │ ├── epoch-780.jpg │ │ ├── epoch-810.jpg │ │ ├── epoch-840.jpg │ │ ├── epoch-870.jpg │ │ ├── epoch-90.jpg │ │ ├── epoch-900.jpg │ │ ├── epoch-930.jpg │ │ ├── epoch-960.jpg │ │ └── epoch-990.jpg │ ├── GAN-sgd │ │ ├── epoch-120.jpg │ │ ├── epoch-150.jpg │ │ ├── epoch-180.jpg │ │ ├── epoch-210.jpg │ │ ├── epoch-240.jpg │ │ ├── epoch-270.jpg │ │ ├── epoch-30.jpg │ │ ├── epoch-300.jpg │ │ ├── epoch-330.jpg │ │ ├── epoch-360.jpg │ │ ├── epoch-390.jpg │ │ ├── epoch-420.jpg │ │ ├── epoch-450.jpg │ │ ├── epoch-480.jpg │ │ ├── epoch-510.jpg │ │ ├── epoch-540.jpg │ │ ├── epoch-570.jpg │ │ ├── epoch-60.jpg │ │ ├── epoch-600.jpg │ │ ├── epoch-630.jpg │ │ ├── epoch-660.jpg │ │ ├── epoch-690.jpg │ │ ├── epoch-720.jpg │ │ ├── epoch-750.jpg │ │ ├── epoch-780.jpg │ │ ├── epoch-810.jpg │ │ ├── epoch-840.jpg │ │ ├── epoch-870.jpg │ │ ├── epoch-90.jpg │ │ ├── epoch-900.jpg │ │ ├── epoch-930.jpg │ │ ├── epoch-960.jpg │ │ └── epoch-990.jpg │ ├── GAN │ │ ├── epoch-120.jpg │ │ ├── epoch-150.jpg │ │ ├── epoch-180.jpg │ │ ├── epoch-210.jpg │ │ ├── epoch-240.jpg │ │ ├── epoch-270.jpg │ │ ├── epoch-30.jpg │ │ ├── epoch-300.jpg │ │ ├── epoch-330.jpg │ │ ├── epoch-360.jpg │ │ ├── epoch-390.jpg │ │ ├── epoch-420.jpg │ │ ├── epoch-450.jpg │ │ ├── epoch-480.jpg │ │ ├── epoch-510.jpg │ │ ├── epoch-540.jpg │ │ ├── epoch-570.jpg │ │ ├── epoch-60.jpg │ │ ├── epoch-600.jpg │ │ ├── epoch-630.jpg │ │ ├── epoch-660.jpg │ │ ├── epoch-690.jpg │ │ ├── epoch-720.jpg │ │ ├── epoch-750.jpg │ │ ├── epoch-780.jpg │ │ ├── epoch-810.jpg │ │ ├── epoch-840.jpg │ │ ├── epoch-870.jpg │ │ ├── epoch-90.jpg │ │ ├── epoch-900.jpg │ │ ├── epoch-930.jpg │ │ ├── epoch-960.jpg │ │ └── epoch-990.jpg │ ├── WGAN-GP │ │ ├── epoch-120.jpg │ │ ├── epoch-150.jpg │ │ ├── epoch-180.jpg │ │ ├── epoch-210.jpg │ │ ├── epoch-240.jpg │ │ ├── epoch-270.jpg │ │ ├── epoch-30.jpg │ │ ├── epoch-300.jpg │ │ ├── epoch-330.jpg │ │ ├── epoch-360.jpg │ │ ├── epoch-390.jpg │ │ ├── epoch-420.jpg │ │ ├── epoch-450.jpg │ │ ├── epoch-480.jpg │ │ ├── epoch-510.jpg │ │ ├── epoch-540.jpg │ │ ├── epoch-570.jpg │ │ ├── epoch-60.jpg │ │ ├── epoch-600.jpg │ │ ├── epoch-630.jpg │ │ ├── epoch-660.jpg │ │ ├── epoch-690.jpg │ │ ├── epoch-720.jpg │ │ ├── epoch-750.jpg │ │ ├── epoch-780.jpg │ │ ├── epoch-810.jpg │ │ ├── epoch-840.jpg │ │ ├── epoch-870.jpg │ │ ├── epoch-90.jpg │ │ ├── epoch-900.jpg │ │ ├── epoch-930.jpg │ │ ├── epoch-960.jpg │ │ └── epoch-990.jpg │ ├── WGAN │ │ ├── epoch-120.jpg │ │ ├── epoch-150.jpg │ │ ├── epoch-180.jpg │ │ ├── epoch-210.jpg │ │ ├── epoch-240.jpg │ │ ├── epoch-270.jpg │ │ ├── epoch-30.jpg │ │ ├── epoch-300.jpg │ │ ├── epoch-330.jpg │ │ ├── epoch-360.jpg │ │ ├── epoch-390.jpg │ │ ├── epoch-420.jpg │ │ ├── epoch-450.jpg │ │ ├── epoch-480.jpg │ │ ├── epoch-510.jpg │ │ ├── epoch-540.jpg │ │ ├── epoch-570.jpg │ │ ├── epoch-60.jpg │ │ ├── epoch-600.jpg │ │ ├── epoch-630.jpg │ │ ├── epoch-660.jpg │ │ ├── epoch-690.jpg │ │ ├── epoch-720.jpg │ │ ├── epoch-750.jpg │ │ ├── epoch-780.jpg │ │ ├── epoch-810.jpg │ │ ├── epoch-840.jpg │ │ ├── epoch-870.jpg │ │ ├── epoch-90.jpg │ │ ├── epoch-900.jpg │ │ ├── epoch-930.jpg │ │ ├── epoch-960.jpg │ │ └── epoch-990.jpg │ └── loss │ │ ├── GAN.jpg │ │ ├── WGAN-GP.jpg │ │ └── WGAN.jpg ├── sefa.py ├── 实验5-1190201215-冯开来.pdf ├── 对抗生成式网络代码 │ ├── datasets.py │ ├── draw.py │ ├── main.py │ ├── models.py │ └── requirements.txt └── 拟合分布的动图 │ ├── GAN-adam.gif │ ├── GAN-rmsprop.gif │ ├── GAN-sgd.gif │ ├── WGAN-GP.gif │ └── WGAN.gif └── 实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇 ├── Lab6_code ├── __pycache__ │ ├── darknet.cpython-38.pyc │ └── utils.cpython-38.pyc ├── cfg │ └── yolov3.cfg ├── darknet.py ├── data │ └── coco.names ├── det │ ├── 000000000139.jpg │ ├── 000000000632.jpg │ ├── 000000000785.jpg │ ├── 000000000885.jpg │ ├── 000000001532.jpg │ ├── 000000001761.jpg │ ├── 000000002149.jpg │ ├── 000000002157.jpg │ ├── 000000007108.jpg │ └── 000000007816.jpg ├── imgs │ ├── 000000000139.jpg │ ├── 000000000632.jpg │ ├── 000000000785.jpg │ ├── 000000000885.jpg │ ├── 000000001532.jpg │ ├── 000000001761.jpg │ ├── 000000002149.jpg │ ├── 000000002157.jpg │ ├── 000000007108.jpg │ └── 000000007816.jpg ├── main.py ├── pallete └── utils.py ├── log.txt └── 实验报告6.docx /README.md: -------------------------------------------------------------------------------- 1 | # HIT2022PatternRecognition-DeepLearning 2 | 哈工大2022春模式识别与深度学习 3 | 4 | 仅供学习参考,不建议直接套用搬运 5 | 6 | 各位学弟学妹clone后star下啦~ 7 | 8 | 有问题欢迎联系carlo_fkl@163.com 9 | -------------------------------------------------------------------------------- /实验1-1190201215-冯开来/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Python 3.8 3 | torch 1.11 4 | torchvision 0.12 5 | device: cpu 6 | """ 7 | import datetime 8 | import random 9 | import time 10 | import torch 11 | import torch.nn.functional as F 12 | import torchvision.transforms as transforms 13 | import numpy as np 14 | import argparse 15 | import matplotlib.pyplot as plt 16 | from torch.utils.data import DataLoader 17 | from torchvision import datasets 18 | from pathlib import Path 19 | import os 20 | 21 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' 22 | 23 | 24 | class MLP(torch.nn.Module): 25 | def __init__(self): 26 | super(MLP, self).__init__() # 27 | self.Flatten = torch.nn.Flatten() 28 | # 初始化三层神经网络 两个全连接的隐藏层,一个输出层 29 | self.fc1 = torch.nn.Linear(784, 512) # 隐含层 30 | self.fc2 = torch.nn.Linear(512, 128) # 隐含层 31 | self.fc3 = torch.nn.Linear(128, 10) # 输出层 32 | 33 | def forward(self, x): 34 | # 前向传播,输入值:x,返回值out 35 | x = self.Flatten(x) # 将一个多行的Tensor,拼接成一行 36 | out = F.relu(self.fc1(x)) # 使用 relu 激活函数 37 | out = F.relu(self.fc2(out)) 38 | out = F.softmax(self.fc3(out), dim=1) # 输出层使用softmax 39 | # 784×1的张量最后输出为10×1的张量, 40 | # 每个值为0-9类别的概率分布,最后选取概率最大的作为预测值输出 41 | return out 42 | 43 | 44 | def main(args): 45 | 46 | # device = args.device 47 | output_dir = Path(args.output_dir) 48 | 49 | # fix the seed for reproducibility 50 | seed = args.seed 51 | torch.manual_seed(seed) 52 | np.random.seed(seed) 53 | random.seed(seed) 54 | 55 | # Build the model 56 | model = MLP() 57 | model.to('cpu') 58 | 59 | # Loss 60 | lossfunc = torch.nn.CrossEntropyLoss() 61 | 62 | # Set up optimizers 63 | if args.optimizer == 'sgd': 64 | optimizer = torch.optim.SGD(params=model.parameters(), lr=0.01, momentum=0.9) 65 | elif args.optimizer in ["adam", "adamw"]: 66 | optimizer = torch.optim.AdamW(params=model.parameters(), lr=0.0001) 67 | 68 | # Dataset 69 | dataset_train = datasets.MNIST(root='./dataset/mnist', train=True, 70 | download=True, transform=transforms.ToTensor()) 71 | dataset_test = datasets.MNIST(root='./data/mnist', train=False, 72 | download=True, transform=transforms.ToTensor()) 73 | data_loader_train = DataLoader(dataset_train, args.batch_size) 74 | data_loader_test = DataLoader(dataset_test, args.batch_size) 75 | 76 | # epoch 77 | print("Start training\n") 78 | start_time = time.time() 79 | train_loss_pic = [] 80 | accuracy_pic = [] 81 | 82 | for epoch in range(args.epochs): 83 | print(f"Starting epoch {epoch+1}") 84 | train_loss = 0.0 85 | best_metric = 0.0 86 | 87 | print("train_one_epoch begin") 88 | 89 | # train 90 | for data, target in data_loader_train: 91 | optimizer.zero_grad() # 清空上一步的残余,更新参数值 92 | output = model(data) # 得到预测值 93 | loss = lossfunc(output, target) # 计算两者的误差 94 | loss.backward() # 误差反向传播, 计算参数更新值 95 | optimizer.step() # 将参数更新值施加到 net 的 parameters 上 96 | train_loss += loss.item() * data.size(0) 97 | 98 | train_loss = train_loss / len(data_loader_train.dataset) 99 | train_loss_pic.append(train_loss) 100 | print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch + 1, train_loss)) 101 | 102 | # val 103 | correct = 0 104 | total = 0 105 | with torch.no_grad(): 106 | for data in data_loader_test: 107 | images, labels = data 108 | outputs = model(images) 109 | _, predicted = torch.max(outputs.data, 1) 110 | total += labels.size(0) 111 | correct += (predicted == labels).sum().item() 112 | accuracy = 100 * correct / total 113 | accuracy_pic.append(accuracy) 114 | print('Accuracy on the test images: {:.2%}'.format(correct / total)) 115 | if args.output_dir and accuracy > best_metric: 116 | best_metric = accuracy 117 | checkpoint_path = output_dir / "BEST_MLP.pth" 118 | torch.save( 119 | { 120 | "model": model.state_dict(), 121 | "accuracy": best_metric, 122 | "optimizer": optimizer.state_dict(), 123 | "epoch": epoch+1, 124 | "args": args 125 | }, 126 | checkpoint_path, 127 | ) 128 | print("train_one_epoch ends\n") 129 | 130 | total_time = time.time() - start_time 131 | total_time_str = str(datetime.timedelta(seconds=int(total_time))) 132 | print("Training time {}".format(total_time_str)) 133 | 134 | x = np.arange(args.epochs) 135 | y1 = train_loss_pic 136 | y2 = accuracy_pic 137 | fig = plt.figure(2, figsize=(16, 8), dpi=50) 138 | ax1 = fig.add_subplot(1, 2, 1) 139 | ax2 = fig.add_subplot(1, 2, 2) 140 | ax1.set_xlabel('epoch') 141 | ax2.set_xlabel('epoch') 142 | ax1.plot(x, y1, 'r', label='train_loss') 143 | ax2.plot(x, y2, 'g--', label='accuracy') 144 | ax1.legend(loc='upper right') 145 | ax2.legend(loc='upper left') 146 | plt.show() 147 | 148 | 149 | if __name__ == '__main__': 150 | parser = argparse.ArgumentParser(description='MLP') 151 | parser.add_argument('--epochs', default=10, type=int) 152 | parser.add_argument('--batch_size', default=20, type=int) 153 | parser.add_argument('--output-dir', default="models") 154 | parser.add_argument('--seed', default=2, type=int) 155 | parser.add_argument('--optimizer', default="adam", type=str) 156 | args = parser.parse_args() 157 | 158 | print(args) 159 | 160 | main(args) 161 | 162 | 163 | -------------------------------------------------------------------------------- /实验1-1190201215-冯开来/requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | # pip install -r requirements.txt 3 | numpy>=1.18.5 4 | torch>=1.7.0 5 | torchvision>=0.8.1 6 | matplotlib~=3.5.1 7 | # conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch 8 | 9 | 10 | -------------------------------------------------------------------------------- /实验1-1190201215-冯开来/实验1-1190201215-冯开来.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验1-1190201215-冯开来/实验1-1190201215-冯开来.pdf -------------------------------------------------------------------------------- /实验2-1190201215-冯开来/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import random 4 | import torch.nn as nn 5 | import torch.optim 6 | import numpy as np 7 | from PIL import Image 8 | from torch.utils.data import DataLoader, Dataset, random_split 9 | from torchvision.transforms import transforms 10 | from torch.optim.lr_scheduler import MultiStepLR 11 | from pathlib import Path 12 | from torch.utils.tensorboard import SummaryWriter 13 | 14 | 15 | class AlexNet(nn.Module): 16 | 17 | def __init__(self): 18 | super(AlexNet, self).__init__() 19 | 20 | self.net = nn.Sequential( 21 | # 输入 (3, 109, 109) 22 | # 输出 (8, 55, 55) 23 | nn.Conv2d(in_channels=3, out_channels=8, kernel_size=(5, 5), stride=(2, 2), padding=2), # (109-5+4)/2+1=55 24 | nn.ReLU(), 25 | nn.MaxPool2d(kernel_size=3, stride=2), # (55-3+0)/2+1=27 26 | # 输入 (8, 27, 27) 27 | # 输出 (16, 27, 27) 28 | nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1), # (27-3+2)/1+1=27 29 | nn.ReLU(), 30 | nn.MaxPool2d(kernel_size=3, stride=2), # (27-3+0)/2+1=13 31 | # 输入 (16, 13, 13) 32 | # 输出 (32, 13, 13) 33 | nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1), # (13-3+2)/1+1=13 34 | nn.ReLU(), 35 | # 输入 (32, 13, 13) 36 | # 输出 (64, 13, 13) 37 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1), # (13-3+2)/1+1=13 38 | nn.ReLU(), 39 | # 输入 (64, 13, 13) 40 | # 输出 (128, 6, 6) 41 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=1), 42 | nn.ReLU(), 43 | nn.MaxPool2d(kernel_size=3, stride=2) # (13-3+0)/2+1=6 44 | ) 45 | 46 | self.classifier = nn.Sequential( 47 | nn.Flatten(), 48 | nn.Dropout(p=0.5), 49 | nn.Linear(128 * 6 * 6, 512), 50 | nn.ReLU(), 51 | nn.Dropout(p=0.5), 52 | nn.Linear(512, 256), 53 | nn.ReLU(), 54 | nn.Linear(256, 101) 55 | ) 56 | 57 | self.init_weights() # initialize bias 58 | 59 | def init_weights(self): 60 | for layer in self.net: 61 | if isinstance(layer, nn.Conv2d): 62 | nn.init.normal_(layer.weight, mean=0, std=0.01) 63 | nn.init.constant_(layer.bias, 0) 64 | nn.init.constant_(self.net[3].bias, 1) 65 | nn.init.constant_(self.net[8].bias, 1) 66 | nn.init.constant_(self.net[10].bias, 1) 67 | 68 | def forward(self, x): 69 | x = self.net(x) 70 | return self.classifier(x) 71 | 72 | 73 | class MyDataset(Dataset): 74 | 75 | def __init__(self, args): 76 | self.imgs = [] 77 | self.labels = [] 78 | self.transforms = transforms.Compose([ 79 | transforms.Resize((args.img_size, args.img_size)), 80 | transforms.ToTensor(), 81 | ]) 82 | 83 | img_dir = os.path.join(args.data_path, '101_ObjectCategories/') 84 | label = -1 85 | for path in os.listdir(img_dir): 86 | if path == 'BACKGROUND_Google': 87 | continue 88 | path_name = os.path.join(img_dir, path) 89 | label += 1 90 | for name in os.listdir(path_name): 91 | file_name = os.path.join(path_name, name) 92 | self.labels.append(label) 93 | img = self.transforms(Image.open(file_name).convert('RGB')) 94 | self.imgs.append(img) 95 | 96 | def __getitem__(self, idx): 97 | return {"img": self.imgs[idx], "label": self.labels[idx]} 98 | 99 | def __len__(self): 100 | return len(self.labels) 101 | 102 | 103 | def main(args): 104 | 105 | device = args.device 106 | 107 | # fix the seed for reproducibility 108 | seed = args.seed 109 | torch.manual_seed(seed) 110 | np.random.seed(seed) 111 | random.seed(seed) 112 | 113 | # dataset 114 | my_dataset = MyDataset(args) 115 | full_size = my_dataset.__len__() 116 | train_size = int(0.8 * full_size) 117 | val_size = int(0.1 * full_size) 118 | test_size = full_size - train_size - val_size 119 | train_dataset, rest_dataset = random_split(my_dataset, [train_size, val_size + test_size]) 120 | val_dataset, test_dataset = random_split(rest_dataset, [val_size, test_size]) 121 | train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size) 122 | val_loader = DataLoader(val_dataset, batch_size=args.batch_size) 123 | test_loader = DataLoader(test_dataset, batch_size=args.batch_size) 124 | 125 | # model 126 | alexnet = AlexNet() 127 | writer = SummaryWriter() 128 | input_img = torch.randn(30, 3, 109, 109) 129 | writer.add_graph(alexnet, input_img) 130 | alexnet.to(device) 131 | 132 | # Set up optimizer and loss function 133 | optimizer = torch.optim.Adam(alexnet.parameters(), lr=args.learning_rate) 134 | scheduler = MultiStepLR(optimizer, [20, 25], 0.1) 135 | lossfunc = nn.CrossEntropyLoss() 136 | 137 | # Only run on test 138 | if args.test: 139 | 140 | print("*****Starting testing*****\n") 141 | 142 | test_num = 0 143 | test_accur = 0.0 144 | load_dir = Path(args.output_path) / "AlexNet.pth" 145 | checkpoint = torch.load(load_dir) 146 | alexnet.load_state_dict(checkpoint['net']) 147 | optimizer.load_state_dict(checkpoint['optimizer']) 148 | alexnet.eval() 149 | with torch.no_grad(): 150 | for data in test_loader: 151 | img, label = data['img'].to(device), data['label'].to(device) 152 | outputs = alexnet(img) 153 | _, pred = torch.max(outputs.data, dim=1) 154 | test_num += label.size(0) 155 | test_accur += (pred == label).sum().item() 156 | test_accur /= test_num 157 | print('Test Accuracy: {:.2%}'.format(test_accur)) 158 | 159 | print("\n*****Testing ends*****") 160 | return 161 | 162 | # start train and val 163 | epochs = args.epochs 164 | train_loss_all = [] 165 | val_accur_all = [] 166 | best_accur = 0.0 167 | 168 | print("*****Starting training*****\n") 169 | 170 | for epoch in range(epochs): 171 | print("Epoch [{}/{}]".format(epoch+1, epochs)) 172 | train_loss = 0.0 173 | train_num = 0.0 174 | val_accur = 0.0 175 | val_num = 0.0 176 | 177 | # train 178 | alexnet.train() 179 | for idx, data in enumerate(train_loader): 180 | img, label = data['img'].to(device), data['label'].to(device) 181 | optimizer.zero_grad() 182 | outputs = alexnet(img) 183 | loss = lossfunc(outputs, label) 184 | loss.backward() 185 | optimizer.step() 186 | 187 | train_loss += loss.item() * img.size(0) 188 | train_num += img.size(0) 189 | train_loss /= train_num 190 | train_loss_all.append(train_loss) 191 | scheduler.step() 192 | 193 | # val 194 | alexnet.eval() 195 | with torch.no_grad(): 196 | for data in val_loader: 197 | img, label = data['img'].to(device), data['label'].to(device) 198 | outputs = alexnet(img) 199 | _, pred = torch.max(outputs.data, dim=1) 200 | 201 | val_num += label.size(0) 202 | val_accur += (pred == label).sum().item() 203 | val_accur = val_accur / val_num 204 | val_accur_all.append(val_accur) 205 | 206 | if val_accur > best_accur and args.output_path: 207 | best_accur = val_accur 208 | output_dir = Path(args.output_path) / "AlexNet.pth" 209 | state = { 210 | 'net': alexnet.state_dict(), 211 | 'optimizer': optimizer.state_dict(), 212 | 'epoch': epoch+1 213 | } 214 | torch.save(state, output_dir) 215 | 216 | print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Accuracy: {:.2%}' 217 | .format(epoch + 1, train_loss, val_accur)) 218 | 219 | for x in range(epochs): 220 | writer.add_scalar('train_loss', train_loss_all[x], x) 221 | writer.add_scalar('val_accuracy', val_accur_all[x], x) 222 | writer.close() 223 | 224 | print("\n*****Training ends*****") 225 | 226 | 227 | # Press the green button in the gutter to run the script. 228 | 229 | if __name__ == '__main__': 230 | parser = argparse.ArgumentParser("AlexNet") 231 | parser.add_argument("--device", default="cuda") 232 | parser.add_argument("--batch-size", default=30) 233 | parser.add_argument("--epochs", default=30, type=int) 234 | parser.add_argument("--seed", default=2) 235 | parser.add_argument("--learning-rate", default=0.001) 236 | parser.add_argument("--output-path", default="./model") 237 | parser.add_argument("--data-path", default="./caltech-101/") 238 | parser.add_argument("--img-size", default=109) 239 | parser.add_argument("--test", action="store_true", help="Only run test") 240 | 241 | args = parser.parse_args() 242 | print(args) 243 | 244 | main(args) 245 | 246 | # See PyCharm help at https://www.jetbrains.com/help/pycharm/ 247 | -------------------------------------------------------------------------------- /实验2-1190201215-冯开来/model/net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验2-1190201215-冯开来/model/net.png -------------------------------------------------------------------------------- /实验2-1190201215-冯开来/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验2-1190201215-冯开来/requirements.txt -------------------------------------------------------------------------------- /实验2-1190201215-冯开来/runs/May26_11-50-15_LAPTOP-EMMOR93D/events.out.tfevents.1653537015.LAPTOP-EMMOR93D.38396.0: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验2-1190201215-冯开来/runs/May26_11-50-15_LAPTOP-EMMOR93D/events.out.tfevents.1653537015.LAPTOP-EMMOR93D.38396.0 -------------------------------------------------------------------------------- /实验2-1190201215-冯开来/实验2-1190201215-冯开来.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验2-1190201215-冯开来/实验2-1190201215-冯开来.pdf -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/ResNet.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | import blocks as Block 3 | 4 | 5 | class ResNet(nn.Module): 6 | def __init__(self): 7 | super(ResNet, self).__init__() 8 | self.conv1 = nn.Sequential( 9 | nn.Conv2d(3, 64, 7, 2, 3), 10 | nn.BatchNorm2d(64), 11 | nn.ReLU(), 12 | nn.MaxPool2d(3, 2, 1) 13 | ) 14 | self.conv2 = nn.Sequential( 15 | Block.CommonBlock(64, 64), 16 | Block.CommonBlock(64, 64) 17 | ) 18 | self.conv3 = nn.Sequential( 19 | Block.SpecialBlock(64, 128, 2), # stride != 1 and in_channel != out_channel,需要下采样 20 | Block.CommonBlock(128, 128) 21 | ) 22 | self.conv4 = nn.Sequential( 23 | Block.SpecialBlock(128, 256, 2), 24 | Block.CommonBlock(256, 256) 25 | ) 26 | self.conv5 = nn.Sequential( 27 | Block.SpecialBlock(256, 512, 2), 28 | Block.CommonBlock(512, 512) 29 | ) 30 | self.dense = nn.Sequential( # 最后用于分类的全连接层,根据需要灵活变化 31 | nn.AdaptiveAvgPool2d(output_size=(1, 1)), # 自适应平均池化 32 | nn.Flatten(), 33 | nn.Linear(512, 12) 34 | ) 35 | 36 | def forward(self, image): 37 | img = self.conv1(image) 38 | img = self.conv2(img) # 四个卷积单元 39 | img = self.conv3(img) 40 | img = self.conv4(img) 41 | img = self.conv5(img) 42 | img = self.dense(img) # 全连接 43 | return img 44 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/SE_ResNet.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | import blocks as Block 3 | 4 | 5 | class SEResNet(nn.Module): 6 | def __init__(self): 7 | super(SEResNet, self).__init__() 8 | self.conv1 = nn.Sequential( 9 | nn.Conv2d(3, 64, 7, 2, 3), 10 | nn.BatchNorm2d(64), 11 | nn.ReLU(), 12 | nn.MaxPool2d(3, 2, 1) 13 | ) 14 | self.conv2 = nn.Sequential( 15 | Block.SECommonBlock(64, 64), 16 | Block.SECommonBlock(64, 64) 17 | ) 18 | self.conv3 = nn.Sequential( 19 | Block.SESpecialBlock(64, 128, 2), # stride != 1 and in_channel != out_channel,需要下采样 20 | Block.SECommonBlock(128, 128) 21 | ) 22 | self.conv4 = nn.Sequential( 23 | Block.SESpecialBlock(128, 256, 2), 24 | Block.SECommonBlock(256, 256) 25 | ) 26 | self.conv5 = nn.Sequential( 27 | Block.SESpecialBlock(256, 512, 2), 28 | Block.SECommonBlock(512, 512) 29 | ) 30 | self.dense = nn.Sequential( # 最后用于分类的全连接层,根据需要灵活变化 31 | nn.AdaptiveAvgPool2d(output_size=(1, 1)), # 自适应平均池化 32 | nn.Flatten(), 33 | nn.Linear(512, 12) 34 | ) 35 | 36 | def forward(self, image): 37 | img = self.conv1(image) 38 | img = self.conv2(img) # 四个卷积单元 39 | img = self.conv3(img) 40 | img = self.conv4(img) 41 | img = self.conv5(img) 42 | img = self.dense(img) # 全连接 43 | return img 44 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/VGG.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class VGG(torch.nn.Module): # 继承 torch 的 Module 5 | def __init__(self): 6 | super(VGG, self).__init__() # 7 | self.module = torch.nn.Sequential( 8 | # 1 9 | torch.nn.Conv2d(3, 64, kernel_size=3, padding=1), 10 | torch.nn.ReLU(inplace=True), 11 | torch.nn.MaxPool2d(kernel_size=2, stride=2), 12 | # 2 13 | torch.nn.Conv2d(64, 128, kernel_size=3, padding=1), 14 | torch.nn.ReLU(inplace=True), 15 | torch.nn.MaxPool2d(kernel_size=2, stride=2), 16 | # 3 17 | torch.nn.Conv2d(128, 256, kernel_size=3, padding=1), 18 | torch.nn.ReLU(inplace=True), 19 | torch.nn.Conv2d(256, 256, kernel_size=3, padding=1), 20 | torch.nn.ReLU(inplace=True), 21 | torch.nn.MaxPool2d(kernel_size=2, stride=2), 22 | # 4 23 | torch.nn.Conv2d(256, 512, kernel_size=3, padding=1), 24 | torch.nn.ReLU(inplace=True), 25 | torch.nn.Conv2d(512, 512, kernel_size=3, padding=1), 26 | torch.nn.ReLU(inplace=True), 27 | torch.nn.MaxPool2d(kernel_size=2, stride=2), 28 | # 5 29 | torch.nn.Conv2d(512, 512, kernel_size=3, padding=1), 30 | torch.nn.ReLU(inplace=True), 31 | torch.nn.Conv2d(512, 512, kernel_size=3, padding=1), 32 | torch.nn.ReLU(inplace=True), 33 | torch.nn.MaxPool2d(kernel_size=2, stride=2) 34 | ) 35 | self.classify = torch.nn.Sequential( 36 | torch.nn.Flatten(), 37 | torch.nn.Linear(25088, 4096), 38 | torch.nn.ReLU(inplace=True), 39 | torch.nn.Dropout(0.5), 40 | torch.nn.Linear(4096, 4096), 41 | torch.nn.ReLU(inplace=True), 42 | torch.nn.Dropout(0.5), 43 | torch.nn.Linear(4096, 12) 44 | ) 45 | 46 | def forward(self, x): 47 | x = self.module(x) 48 | x = self.classify(x) 49 | return x 50 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/blocks.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.nn import functional as F 3 | 4 | 5 | class CommonBlock(nn.Module): 6 | def __init__(self, in_channel, out_channel): # 普通Block简单完成两次卷积操作 7 | super(CommonBlock, self).__init__() 8 | self.conv1 = nn.Sequential( 9 | nn.Conv2d(in_channel, out_channel, 3, 1, 1), 10 | nn.BatchNorm2d(out_channel), 11 | nn.ReLU() 12 | ) 13 | self.conv2 = nn.Sequential( 14 | nn.Conv2d(out_channel, out_channel, 3, 1, 1), 15 | nn.BatchNorm2d(out_channel) 16 | ) 17 | 18 | def forward(self, image): 19 | identity = image # shortcut 20 | out = self.conv1(image) 21 | out = self.conv2(out) 22 | out += identity # 两路相加 23 | return F.relu(out) 24 | 25 | 26 | class SpecialBlock(nn.Module): # 特殊Block完成两次卷积操作,以及一次下采样 27 | def __init__(self, in_channel, out_channel, stride): 28 | super(SpecialBlock, self).__init__() 29 | self.down_sample = nn.Sequential( # 负责升维下采样的卷积网络change_channel 30 | nn.Conv2d(in_channel, out_channel, 1, stride), 31 | nn.BatchNorm2d(out_channel) 32 | ) 33 | self.conv1 = nn.Sequential( 34 | nn.Conv2d(in_channel, out_channel, 3, stride, 1), 35 | nn.BatchNorm2d(out_channel), 36 | nn.ReLU() 37 | ) 38 | self.conv2 = nn.Sequential( 39 | nn.Conv2d(out_channel, out_channel, 3, 1, 1), 40 | nn.BatchNorm2d(out_channel) 41 | ) 42 | 43 | def forward(self, image): 44 | identity = self.down_sample(image) # 下采样,为后面相加做准备 45 | out = self.conv1(image) 46 | out = self.conv2(out) # 完成残差部分的卷积 47 | out += identity 48 | return F.relu(out) # 输出卷积单元 49 | 50 | 51 | class SEBlock(nn.Module): 52 | def __init__(self, in_channel, reduction=16): 53 | super(SEBlock, self).__init__() 54 | self.global_pooling = nn.AdaptiveAvgPool2d(output_size=(1, 1)) # 全局池化 55 | self.fc = nn.Sequential( 56 | nn.Linear(in_channel, in_channel // reduction), 57 | nn.ReLU(), 58 | nn.Linear(in_channel // reduction, in_channel), 59 | nn.Sigmoid() 60 | ) 61 | 62 | def forward(self, image): 63 | b, c, _, _ = image.size() 64 | y = self.global_pooling(image).view(b, c) # 得到B*C*1*1,然后转成B*C,才能送入到FC层中 65 | y = self.fc(y).view(b, c, 1, 1) # 得到B*C的向量,C个值就表示C个通道的权重。把B*C变为B*C*1*1是为了与四维的x运算。 66 | return image * y.expand_as(image) # 先把B*C*1*1变成B*C*H*W大小,其中每个通道上的H*W个值都相等。*表示对应位置相乘。 67 | 68 | 69 | class SECommonBlock(nn.Module): 70 | def __init__(self, in_channel, out_channel): # 普通Block简单完成两次卷积操作 71 | super(SECommonBlock, self).__init__() 72 | self.conv1 = nn.Sequential( 73 | nn.Conv2d(in_channel, out_channel, 3, 1, 1), 74 | nn.BatchNorm2d(out_channel), 75 | nn.ReLU() 76 | ) 77 | self.conv2 = nn.Sequential( 78 | nn.Conv2d(out_channel, out_channel, 3, 1, 1), 79 | nn.BatchNorm2d(out_channel) 80 | ) 81 | self.se = SEBlock(out_channel) 82 | 83 | def forward(self, image): 84 | identity = image # shortcut 85 | out = self.conv1(image) 86 | out = self.conv2(out) 87 | out = self.se(out) # SE模块 88 | out += identity # 两路相加 89 | return F.relu(out) 90 | 91 | 92 | class SESpecialBlock(nn.Module): # 特殊Block完成两次卷积操作,以及一次下采样 93 | def __init__(self, in_channel, out_channel, stride): 94 | super(SESpecialBlock, self).__init__() 95 | self.down_sample = nn.Sequential( # 负责升维下采样的卷积网络change_channel 96 | nn.Conv2d(in_channel, out_channel, 1, stride), 97 | nn.BatchNorm2d(out_channel) 98 | ) 99 | self.conv1 = nn.Sequential( 100 | nn.Conv2d(in_channel, out_channel, 3, stride, 1), 101 | nn.BatchNorm2d(out_channel), 102 | nn.ReLU() 103 | ) 104 | self.conv2 = nn.Sequential( 105 | nn.Conv2d(out_channel, out_channel, 3, 1, 1), 106 | nn.BatchNorm2d(out_channel) 107 | ) 108 | self.se = SEBlock(out_channel) 109 | 110 | def forward(self, image): 111 | identity = self.down_sample(image) # 下采样,为后面相加做准备 112 | out = self.conv1(image) 113 | out = self.conv2(out) # 完成残差部分的卷积 114 | out = self.se(out) # SE模块 115 | out += identity 116 | return F.relu(out) # 输出卷积单元 117 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/data_path.py: -------------------------------------------------------------------------------- 1 | import os 2 | import math 3 | 4 | 5 | def get_data(): 6 | root = 'train/' 7 | path = os.listdir(root) 8 | path.sort() 9 | images_set = [] # 记录每个文件夹中图片的路径 10 | # 遍历所有文件夹 11 | for line in path: 12 | child_path = root + line 13 | image_set = [] # 记录一个文件夹中图片的路径 14 | images = os.listdir(child_path) 15 | # 遍历一个文件夹中的所有图片,获得相应的数目和路径 16 | for image in images: 17 | image_set.append(child_path + '/' + image) 18 | images_set.append(image_set) 19 | # 将每个文件夹中的图片划分出训练集、开发集和测试集 20 | train_data = [] # 训练集的路径 21 | labels_index = [] # 所有类别的label 22 | for img in images_set: 23 | train_data.extend(img) 24 | # 获得所有类别的label 25 | for data_path in train_data: 26 | label = data_path[6:-14] 27 | if label not in labels_index: 28 | labels_index.append(label) 29 | return train_data, labels_index 30 | 31 | 32 | def get_test(): 33 | root = 'test/' 34 | path = os.listdir(root) 35 | path.sort() 36 | test_set = [] # 记录一个文件夹中图片的路径 37 | for line in path: 38 | test_set.append(root + line) 39 | return test_set 40 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/main.py: -------------------------------------------------------------------------------- 1 | import VGG 2 | import model 3 | import torch 4 | import writer 5 | import ResNet 6 | import argparse 7 | import SE_ResNet 8 | import data_path as dp 9 | from torchvision import transforms 10 | from torchvision import datasets 11 | from torch.utils.data import dataloader 12 | 13 | 14 | epoch = 40 15 | batch_size = 32 16 | train_data, labels_index = dp.get_data() # 获取所有数据的路径,labels_index存放所有类别的label 17 | 18 | data_augmentation = { 19 | "yes": transforms.Compose([transforms.RandomResizedCrop(224), 20 | transforms.RandomHorizontalFlip(), 21 | transforms.RandomRotation(45), 22 | transforms.RandomAffine(degrees=0, translate=(0, 0.2)), 23 | transforms.ToTensor(), 24 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), 25 | "no": transforms.Compose([transforms.RandomResizedCrop(224), 26 | transforms.ToTensor(), 27 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) 28 | } 29 | 30 | 31 | if __name__ == '__main__': 32 | parser = argparse.ArgumentParser() 33 | parser.add_argument('--device', type=str, default='GPU') # 选择GPU还是CPU 34 | parser.add_argument('--net', type=str, default=None) # 选择使用的网络 35 | parser.add_argument('--aug', type=str, default='yes') # 选择是否进行数据增强 36 | parser.add_argument('--optim', type=str, default='Adam') # 选择使用的优化器 37 | opt = parser.parse_args() 38 | # 运算设备,默认选择GPU 39 | if opt.device == 'GPU': 40 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 41 | else: 42 | device = torch.device("cpu") 43 | print("设备: " + torch.cuda.get_device_name(device)) 44 | # 选择是否进行数据增强,默认选择yes 45 | if opt.aug == 'yes': 46 | augment = '_AUGMENTATION' 47 | print("data augmentation...") 48 | train_set = datasets.ImageFolder(root='train', transform=data_augmentation["yes"]) 49 | else: 50 | augment = '_NO_AUGMENTATION' 51 | train_set = datasets.ImageFolder(root='train', transform=data_augmentation["no"]) 52 | train_loader = dataloader.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True) 53 | # 选择想要使用的网络 54 | net = None 55 | if opt.net == 'VGG': 56 | net = VGG.VGG().to(device) 57 | print("VGG loading...") 58 | elif opt.net == 'ResNet': 59 | net = ResNet.ResNet().to(device) 60 | print("ResNet loading...") 61 | elif opt.net == 'SEResNet': 62 | net = SE_ResNet.SEResNet().to(device) 63 | print("SE_ResNet loading...") 64 | modelPath = 'models/' + opt.net + '_' + opt.optim + augment + '.pkl' 65 | csvPath = 'result/' + opt.net + '_' + opt.optim + augment + '.csv' 66 | model.train(net, train_loader, device, epoch, opt, modelPath) 67 | writer.write_to_csv(net, data_augmentation["no"], device, labels_index, csvPath) 68 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from tqdm import tqdm 4 | from torch import optim 5 | from torch.utils.tensorboard import SummaryWriter 6 | 7 | 8 | def train(net, train_loader, device, epoch, opt, model_path): 9 | print('training.....') 10 | net.train() 11 | Swriter = SummaryWriter(log_dir='./vision') 12 | loss_func = nn.CrossEntropyLoss() # 交叉熵损失函数 13 | if opt.optim == 'Adam': 14 | print("optimizer: Adam") 15 | optimizer = optim.Adam(net.parameters(), lr=0.0001) # adam优化器 16 | else: 17 | print("optimizer: SGD") 18 | optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) # SGD优化器 19 | for i in range(epoch): 20 | train_loss = 0 21 | tran_data = tqdm(train_loader) 22 | for j, (image, label) in enumerate(tran_data): 23 | image = image.to(device) 24 | label = label.to(device) 25 | optimizer.zero_grad() 26 | out = net(image) 27 | loss = loss_func(out, label).to(device) 28 | train_loss += loss.data 29 | loss.backward() 30 | optimizer.step() 31 | print('epoch {}/{}, Loss: {:.6f}'.format(i + 1, epoch, train_loss / len(train_loader))) 32 | Swriter.add_scalar('loss_' + opt.net + '_' + opt.optim + '_' + opt.aug, 33 | train_loss / len(train_loader), global_step=i+1) 34 | torch.save(net, model_path) 35 | print('training completed') 36 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/train.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import time 3 | 4 | import cv2 as cv 5 | import os 6 | import torch 7 | from torch import nn, optim 8 | from torch.optim.lr_scheduler import MultiStepLR 9 | from torch.utils.data import DataLoader, Dataset, random_split 10 | from torchvision import transforms 11 | from tqdm import tqdm 12 | import json 13 | 14 | cuda = torch.cuda.is_available() 15 | 16 | 17 | class my_dataset(Dataset): 18 | 19 | def __init__(self, imgs, labels): 20 | self.imgs = imgs 21 | self.labels = labels 22 | self.len = len(labels) 23 | 24 | def __getitem__(self, idx): 25 | return self.imgs[idx], self.labels[idx] 26 | 27 | def __len__(self): 28 | return self.len 29 | 30 | 31 | class AlexNet(nn.Module): 32 | def __init__(self): 33 | super(AlexNet, self).__init__() 34 | 35 | self.conv1 = nn.Sequential( 36 | nn.Conv2d(3, 96, kernel_size=(11, 11), stride=(4, 4), padding=3), 37 | nn.ReLU()) 38 | self.pool1 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)) 39 | self.conv2 = nn.Sequential( 40 | nn.Conv2d(96, 256, kernel_size=(5, 5), stride = (1, 1), padding=2), 41 | nn.ReLU()) 42 | self.pool2 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)) 43 | self.conv3 = nn.Sequential( 44 | nn.Conv2d(256, 384, kernel_size=(3, 3), stride=(1, 1), padding=1), 45 | nn.ReLU()) 46 | self.conv4 = nn.Sequential( 47 | nn.Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=1), 48 | nn.ReLU()) 49 | self.conv5 = nn.Sequential( 50 | nn.Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=1), 51 | nn.ReLU()) 52 | self.pool3 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)) 53 | self.l1 = nn.Linear(6*6*128*2, 4096) 54 | self.l2 = nn.Linear(4096, 4096) 55 | self.l3 = nn.Linear(4096, 101) 56 | 57 | def forward(self, x): 58 | x = self.conv1(x) 59 | x = self.pool1(x) 60 | x = self.conv2(x) 61 | x = self.pool2(x) 62 | x = self.conv3(x) 63 | x = self.conv4(x) 64 | x = self.conv5(x) 65 | x = self.pool3(x) 66 | 67 | x = x.view(x.shape[0], -1) 68 | x = torch.relu(self.l1(x)) 69 | x = torch.relu(self.l2(x)) 70 | return self.l3(x) 71 | 72 | 73 | def load_data(): 74 | 75 | with open('label_dic.json', 'r') as f: 76 | label_dic = json.load(f) 77 | f.close() 78 | 79 | # transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]) 80 | transform = transforms.ToTensor() 81 | # transform.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)) 82 | 83 | img_paths = glob.glob('../dataset/caltech-101/101_ObjectCategories/*/*.jpg') 84 | data = [] 85 | labels = [] 86 | for img_path in tqdm(img_paths): 87 | 88 | label = img_path.split(os.path.sep)[-2] 89 | if label == 'BACKGROUND_Google': 90 | continue 91 | 92 | img = cv.imread(img_path) 93 | img = cv.resize(img, (224, 224)) 94 | img = cv.cvtColor(img, cv.COLOR_BGR2RGB) 95 | img = transform(img) 96 | 97 | data.append(img) 98 | labels.append(label_dic[label]) 99 | 100 | return data, labels 101 | 102 | 103 | def train(epoch): 104 | model.train() 105 | loss_sum = 0.0 106 | print('Beginning of epoch %d, lr = %f' % (epoch + 1, optimizer.state_dict()['param_groups'][0]['lr'])) 107 | tic = time.time() 108 | for idx, data in enumerate(train_dataloader, 1): 109 | input, target = data 110 | if cuda: 111 | input, target = input.cuda(), target.cuda() 112 | optimizer.zero_grad() 113 | output = model(input) 114 | loss = criterion(output, target) 115 | loss.backward() 116 | optimizer.step() 117 | 118 | loss_sum += loss.item() 119 | if idx % 20 == 0: 120 | print('[epoch %d, batch %d] loss: %.3f' % (epoch + 1, idx, loss_sum / 20)) 121 | loss_sum = 0.0 122 | toc = time.time() 123 | print('Epoch %d finished, took %.2fs' % (epoch + 1, toc - tic)) 124 | 125 | 126 | def validation(): 127 | model.eval() 128 | correct_cnt = 0 # 预测正确的数量 129 | total_cnt = 0 # 样本总数 130 | with torch.no_grad(): 131 | for data in val_dataloader: # 每次取出一个mini-batch 132 | input, labels = data 133 | if cuda: 134 | input, labels = input.cuda(), labels.cuda() 135 | output = model(input) 136 | _, pred = torch.max(output.data, dim=1) # 选取数值最大的一维对应的标签作为预测标签 137 | total_cnt += labels.size(0) 138 | correct_cnt += (pred == labels).sum().item() 139 | print('Accuracy on validation set: %.2f%%\n' % (100.0 * correct_cnt / total_cnt)) 140 | 141 | 142 | if __name__ == '__main__': 143 | batch_size = 64 144 | epochs = 20 145 | lr = 0.001 146 | data, labels = load_data() 147 | train_val_dataset = my_dataset(data, labels) 148 | train_dataset, val_dataset = random_split(train_val_dataset, 149 | [8000, 677], 150 | generator=torch.Generator().manual_seed(1)) 151 | train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) 152 | val_dataloader = DataLoader(val_dataset, shuffle=False, batch_size=batch_size) 153 | 154 | model = AlexNet() 155 | criterion = nn.CrossEntropyLoss() 156 | # optimizer = optim.SGD(model.parameters(), lr = lr, momentum=0.3) 157 | optimizer = optim.Adam(model.parameters(), lr = lr) 158 | scheduler = MultiStepLR(optimizer, milestones=[10, 20], gamma=0.1) 159 | 160 | if cuda: 161 | model.cuda() 162 | criterion = criterion.cuda() 163 | 164 | for epoch in range(epochs): 165 | scheduler.step() 166 | train(epoch) 167 | validation() 168 | 169 | torch.save(model, '../models/model.pth') 170 | 171 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/codes/writer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pandas as pd 3 | from PIL import Image 4 | import data_path as dp 5 | 6 | 7 | def write_to_csv(net, trans, device, labels, csv_path): 8 | label_list = [] 9 | name_list = [] 10 | test_path = dp.get_test() 11 | 12 | for i in test_path: 13 | net.eval() 14 | image = Image.open(i).convert('RGB') 15 | img = trans(image) 16 | img = torch.unsqueeze(img, dim=0) 17 | img = img.to(device) 18 | output = net(img) 19 | name = i.split('/')[1] 20 | _, prediction = torch.max(output.data, dim=1) 21 | label = labels[prediction] 22 | name_list.append(name) 23 | label_list.append(label) 24 | 25 | finally_result = pd.DataFrame({'file': name_list, 'species': label_list}) 26 | print("writing to " + csv_path + '...') 27 | finally_result.to_csv(csv_path, index=False) 28 | -------------------------------------------------------------------------------- /实验3-1190201215-冯开来-1190201225-韩庸平/实验报告3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验3-1190201215-冯开来-1190201225-韩庸平/实验报告3.pdf -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/codes/datasets.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import jieba 3 | import torch 4 | import numpy as np 5 | from torch.utils.data import DataLoader, Dataset 6 | from gensim.models.word2vec import Word2Vec 7 | from sklearn.preprocessing import MinMaxScaler 8 | 9 | 10 | shopping_cats = {'书籍': 0, '平板': 1, '手机': 2, '水果': 3, '洗发水': 4, '热水器': 5, '蒙牛': 6, '衣服': 7, '计算机': 8, '酒店': 9} # 全部类别 11 | 12 | 13 | class Shopping(Dataset): 14 | def __init__(self, my_list): 15 | self.cats = [] 16 | self.reviews = [] 17 | for i in range(len(my_list)): 18 | self.cats.append(my_list[i]['cat']) 19 | self.reviews.append(my_list[i]['review']) 20 | 21 | def __getitem__(self, idx): 22 | # return {"cat": self.cats[idx], "review": self.reviews[idx]} 23 | return {'cat': self.cats[idx], 'review': torch.tensor(np.array(self.reviews[idx]))} 24 | 25 | def __len__(self): 26 | return len(self.cats) 27 | 28 | 29 | class Climate(Dataset): 30 | def __init__(self, my_data, my_label): 31 | sentence = [] 32 | for i in range(my_data.shape[0]): 33 | word = [] 34 | for j in range(0, 5): 35 | word.append(int(my_data.iloc[i, j])) 36 | sentence.append(word) 37 | self.data = np.array(sentence) 38 | self.label = my_label 39 | 40 | def __getitem__(self, idx): 41 | attr = self.data[idx] 42 | label = float(self.label.iloc[idx]) 43 | return {'cat': label, 'review': attr} # 和上面保持一致 44 | 45 | def __len__(self): 46 | return len(self.label) 47 | 48 | 49 | def build_shopping(args): 50 | df = pd.read_csv("./dataset/online_shopping_10_cats.csv") 51 | val_list = [] 52 | test_list = [] 53 | train_list = [] 54 | 55 | reviews = [] 56 | cats = [] 57 | for index, row in df.iterrows(): 58 | if not isinstance(row['review'], str): 59 | continue 60 | cats.append(shopping_cats[row['cat']]) 61 | reviews.append(row['review']) 62 | tokens = [jieba.lcut(i) for i in reviews] # 分词 63 | 64 | model = Word2Vec(tokens, min_count=1, hs=1, window=3, vector_size=args.input_size) 65 | reviews_vector = [[model.wv[word] for word in sentence] for sentence in tokens] # 转换成vector的reviews 66 | 67 | for i in range(62773): 68 | if i % 5 == 4: 69 | val_list.append({'cat': cats[i], 'review': reviews_vector[i]}) 70 | elif i % 5 == 0: 71 | test_list.append({'cat': cats[i], 'review': reviews_vector[i]}) 72 | else: 73 | train_list.append({'cat': cats[i], 'review': reviews_vector[i]}) 74 | 75 | # 因为每句句子长度不同,而且是每个句子中的单词进行训练,所以相当于batch-size只能为1 76 | train_loader = DataLoader(Shopping(train_list), shuffle=True, batch_size=1) 77 | val_loader = DataLoader(Shopping(train_list), shuffle=True, batch_size=1) 78 | test_loader = DataLoader(Shopping(train_list), shuffle=True, batch_size=1) 79 | 80 | return train_loader, val_loader, test_loader 81 | 82 | 83 | def build_climate(args): 84 | # read data 85 | data_path = "./dataset/jena_climate_2009_2016.csv" 86 | dataset = pd.read_csv(data_path, parse_dates=['Date Time'], index_col=['Date Time']) 87 | # insert new index 88 | dataset['year'] = dataset.index.year 89 | dataset['hour'] = dataset.index.hour 90 | # normalize hour 91 | dataset['sin(h)'] = [np.sin(x * (2 * np.pi / 24)) for x in dataset['hour']] 92 | dataset['cos(h)'] = [np.cos(x * (2 * np.pi / 24)) for x in dataset['hour']] 93 | # split train and test 94 | train_set = dataset[dataset['year'].isin(range(2009, 2015))] 95 | test_set = dataset[dataset['year'].isin(range(2015, 2018))] 96 | # determine attributes deciding T 97 | attr = ['H2OC (mmol/mol)', 'rho (g/m**3)', 'sh (g/kg)', 'Tpot (K)', 'VPmax (mbar)'] 98 | # normalize other attributes 99 | for col in attr: 100 | scaler = MinMaxScaler() 101 | if col not in ['sin(h)', 'cos(h)', 'T (degC)']: 102 | dataset[col] = scaler.fit_transform(dataset[col].values.reshape(-1, 1)) 103 | # get train/test data 104 | train_data = train_set[attr] 105 | train_label = train_set['T (degC)'] 106 | 107 | test_data = test_set[attr] 108 | test_label = test_set['T (degC)'] 109 | 110 | # get train/test loader 111 | train_loader = DataLoader(Climate(train_data, train_label), shuffle=True, batch_size=1) 112 | test_loader = DataLoader(Climate(test_data, test_label), shuffle=True, batch_size=1) 113 | return train_loader, test_loader 114 | -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/codes/engine.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | from tqdm import tqdm 5 | import matplotlib.pyplot as plt 6 | 7 | criterion = nn.CrossEntropyLoss() 8 | 9 | 10 | def train(model, data_loader, optimizer, epoch, args): 11 | model.train() 12 | train_loss = 0.0 13 | all_loss = [] 14 | for idx, data in enumerate(data_loader): 15 | label, sentence = data['cat'].to(args.device), data['review'].to(args.device) 16 | print(sentence.shape) 17 | if len(sentence.shape) < 3: 18 | sentence = sentence[None] # expand for batchsz 19 | # print(sentence.shape) # 1, words, 128 20 | output = model(sentence) 21 | 22 | optimizer.zero_grad() 23 | if args.dataset == 'climate': 24 | loss = abs(output - label) 25 | else: 26 | loss = criterion(output, label) 27 | loss.backward() 28 | optimizer.step() 29 | 30 | train_loss += loss.item() 31 | if idx % 376 == 0 and idx > 0: 32 | all_loss.append(train_loss / 376) 33 | print('[epoch %d, batch %d] loss: %.3f' % (epoch, idx, train_loss / 376)) 34 | train_loss = 0.0 35 | return all_loss 36 | 37 | 38 | def validation(model, data_loader, args): 39 | model.eval() 40 | model.zero_grad() 41 | correct_num = 0 42 | val_num = 0 43 | with torch.no_grad(): 44 | for data in tqdm(data_loader): 45 | label, sentence = data['cat'].to(args.device), data['review'].to(args.device) 46 | output = model(sentence) 47 | pred = torch.argmax(output, dim=1) 48 | correct_num += (pred == label).sum().item() 49 | val_num += 1 50 | print('Accuracy on validation set: %.2f%%\n' % (100.0 * correct_num / val_num)) 51 | return correct_num / val_num 52 | 53 | 54 | def test_climate(model, data_loader, args): 55 | trues = [] 56 | preds = [] 57 | with torch.no_grad(): 58 | for data in tqdm(data_loader): 59 | label, sentence = data['cat'].to(args.device), data['review'].to(args.device) 60 | if len(sentence.shape) < 3: 61 | sentence = sentence[None] 62 | pred = model(sentence) 63 | pred, label = pred.to('cpu').item(), label.to('cpu').item() 64 | trues.append(float(label)) 65 | preds.append(float(pred)) 66 | 67 | for i in range(10): 68 | start = 144 * i 69 | end = 144 * i + 288 70 | loss = [] 71 | for j in range(start, end): 72 | loss.append(abs(trues[j] - preds[j])) 73 | mean_loss = np.mean(loss) 74 | median_loss = np.median(loss) 75 | print('[Week %d] Mean-loss: %.3f, Median-loss: %.3f' 76 | % (i, mean_loss, median_loss)) 77 | 78 | x = np.arange(288) 79 | plt.figure(1, (16, 8), 100) 80 | plt.cla() 81 | plt.plot(x, trues[start: end], 'r') 82 | plt.plot(x, preds[start: end], 'g--') 83 | plt.legend(["true", "predict"], loc='upper left') 84 | plt.savefig('./result/pred-true-' + str(i) + '.jpg') 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/codes/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import random 3 | import time 4 | import os 5 | from sklearn.metrics import classification_report 6 | from datasets import * 7 | from models import * 8 | from engine import * 9 | 10 | 11 | shopping_names = ['书籍', '平板', '手机', '水果', '洗发水', '热水器', '蒙牛', '衣服', '计算机', '酒店'] # 全部类别 12 | 13 | 14 | def main(args): 15 | # device 16 | # global test_loader 17 | device = args.device 18 | 19 | # fix the seed for reproducibility 20 | seed = args.seed 21 | torch.manual_seed(seed) 22 | np.random.seed(seed) 23 | random.seed(seed) 24 | 25 | # build dataset and dataloader 26 | print("\nProcessing " + args.dataset + " dataset...") 27 | if args.dataset == "shopping": 28 | train_loader, val_loader, test_loader = build_shopping(args) 29 | output_size = 10 30 | elif args.dataset == "climate": 31 | train_loader, test_loader = build_climate(args) 32 | val_loader = None 33 | output_size = 1 34 | else: 35 | raise ValueError(f"dataset {args.dataset} not supported") 36 | print("Data processing finished!") 37 | 38 | # build model 39 | print("\nBuilding model " + args.model + "...") 40 | if args.model == "RNN": 41 | my_model = RNN(args, output_size) 42 | elif args.model == "GRU": 43 | my_model = GRU(args, output_size) 44 | elif args.model == "LSTM": 45 | my_model = LSTM(args, output_size) 46 | elif args.model == "Bi-LSTM": 47 | my_model = LSTM(args, output_size, bidirectional=True) 48 | else: 49 | raise ValueError(f"model {args.model} not supported") 50 | my_model.to(device) 51 | print("Model building finished!") 52 | 53 | # set up optimizers 54 | if args.optimizer == 'sgd': 55 | optimizer = torch.optim.SGD(params=my_model.parameters(), lr=0.001, momentum=0.9) 56 | elif args.optimizer in ["adam", "adamw"]: 57 | optimizer = torch.optim.AdamW(params=my_model.parameters(), lr=0.0001) 58 | else: 59 | raise ValueError(f"optimizer {args.optimizer} not supported") 60 | 61 | # run model only on test 62 | if args.test and args.dataset == 'climate': 63 | print("Testing temperature by " + args.model + "...") 64 | filename = "best_" + args.model + "_climate.pth" 65 | load_path = os.path.join(args.output_path, "models/", filename) 66 | checkpoint = torch.load(load_path) 67 | my_model.load_state_dict(checkpoint['model']) 68 | optimizer.load_state_dict(checkpoint['optimizer']) 69 | my_model.eval() 70 | test_climate(my_model, test_loader, args) 71 | return 72 | if args.test: 73 | print("Testing accuracy, recall, F1 by " + args.model + "...") 74 | filename = "best_" + args.model + ".pth" 75 | load_path = os.path.join(args.output_path, "models/", filename) 76 | checkpoint = torch.load(load_path) 77 | my_model.load_state_dict(checkpoint['model']) 78 | optimizer.load_state_dict(checkpoint['optimizer']) 79 | my_model.eval() 80 | 81 | preds = [] 82 | true = [] 83 | with torch.no_grad(): 84 | for data in tqdm(test_loader): 85 | label, sentence = data['cat'].to(args.device), data['review'].to(args.device) 86 | output = my_model(sentence) 87 | pred = torch.argmax(output, dim=1) 88 | pred, label = pred.to('cpu').item(), label.to('cpu').item() 89 | preds.append(pred) 90 | true.append(label) 91 | result = classification_report(true, preds, target_names=shopping_names) 92 | print(result) 93 | return 94 | 95 | # start train and validation 96 | all_loss = [] 97 | best_accuracy = 0.0 98 | accuracy = 0.0 99 | for epoch in range(1, args.epochs + 1): 100 | start = time.time() 101 | print("Epoch [{}/{}]".format(epoch, args.epochs)) 102 | epoch_loss = train(my_model, train_loader, optimizer, epoch, args) 103 | if val_loader: 104 | accuracy = validation(my_model, val_loader, args) 105 | 106 | all_loss.extend(epoch_loss) 107 | end = time.time() 108 | print('Epoch %d finished, took %.2fs' % (epoch, end - start)) 109 | 110 | if accuracy > best_accuracy: 111 | best_accuracy = accuracy 112 | filename = "best_" + args.model + ".pth" 113 | checkpoint_path = os.path.join(args.output_path, "models/", filename) 114 | torch.save( 115 | { 116 | "model": my_model.state_dict(), 117 | "optimizer": optimizer.state_dict(), 118 | "epoch": epoch, 119 | "args": args 120 | }, 121 | checkpoint_path 122 | ) 123 | if not val_loader: 124 | filename = "best_" + args.model + "_climate.pth" 125 | checkpoint_path = os.path.join(args.output_path, "models/", filename) 126 | torch.save( 127 | { 128 | "model": my_model.state_dict(), 129 | "optimizer": optimizer.state_dict(), 130 | "epoch": epoch, 131 | "args": args 132 | }, 133 | checkpoint_path 134 | ) 135 | 136 | # draw the loss 137 | x = np.arange(len(all_loss)) 138 | plt.plot(x, all_loss, 'r') 139 | plt.show() 140 | 141 | 142 | # Press the green button in the gutter to run the script. 143 | if __name__ == '__main__': 144 | parser = argparse.ArgumentParser("lab4") 145 | parser.add_argument("--device", default="cuda:0") 146 | parser.add_argument("--model", default="GRU") # RNN GRU LSTM Bi-LSTM 147 | parser.add_argument("--epochs", default=2) 148 | parser.add_argument("--batch-size", default=30) # 本次实验没有用到过 149 | parser.add_argument("--seed", default=42) 150 | parser.add_argument("--dataset", default="shopping") # shopping or climate 151 | parser.add_argument("--output-path", default="./result/") 152 | parser.add_argument("--hidden-size", default=128) 153 | parser.add_argument("--input-size", default=128, type=int) # if climate, only be 5 154 | parser.add_argument("--optimizer", default="adam") 155 | parser.add_argument("--test", action="store_true", help="Only run test") 156 | 157 | args = parser.parse_args() 158 | print(args) 159 | 160 | main(args) 161 | 162 | # run climate 163 | # python main.py --input-size 5 -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/codes/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | class RNN(nn.Module): 7 | def __init__(self, args, output_size): 8 | super(RNN, self).__init__() 9 | self.device = args.device 10 | self.hidden_size = args.hidden_size 11 | self.input_size = args.input_size 12 | self.i2h = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 13 | self.h2o = nn.Linear(self.hidden_size, output_size) 14 | self.tanh = nn.Tanh() 15 | 16 | def forward(self, x, hidden=None): # x是一个句子,tensor 17 | global output 18 | if not hidden: 19 | hidden = torch.zeros(1, self.hidden_size).to(self.device) 20 | x = x[0] 21 | for i in range(x.shape[0]): 22 | token = x[i: i + 1] 23 | combined = torch.cat((token, hidden), 1) 24 | hidden = self.tanh(self.i2h(combined)) 25 | output = self.h2o(hidden) 26 | return output 27 | 28 | 29 | class LSTM(nn.Module): 30 | def __init__(self, args, output_size, bidirectional=False): 31 | super(LSTM, self).__init__() 32 | self.device = args.device 33 | self.hidden_size = args.hidden_size 34 | self.input_size = args.input_size 35 | self.bidirectional = bidirectional 36 | self.forget_gate = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 37 | self.input_gate = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 38 | self.c_gate = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 39 | self.output_gate = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 40 | if not bidirectional: 41 | self.h2o = nn.Linear(self.hidden_size, output_size) 42 | else: 43 | self.h2o = nn.Linear(self.hidden_size * 2, output_size) 44 | self.tanh = nn.Tanh() 45 | self.sigmoid = nn.Sigmoid() 46 | 47 | def forward(self, x): 48 | global ct, result 49 | x = x[0] 50 | if not self.bidirectional: 51 | hidden = torch.zeros(1, self.hidden_size).to(self.device) 52 | ct = torch.zeros(1, self.hidden_size).to(self.device) 53 | for i in range(x.shape[0]): 54 | token = x[i: i + 1] 55 | combined = torch.cat((token, hidden), 1) 56 | forget = self.sigmoid(self.forget_gate(combined)) 57 | input = self.sigmoid(self.input_gate(combined)) 58 | c_ = self.tanh(self.c_gate(combined)) 59 | output = self.sigmoid(self.output_gate(combined)) 60 | ct = ct * forget + input * c_ 61 | hidden = self.tanh(ct) * output 62 | result = self.h2o(hidden) 63 | return result 64 | else: 65 | num = x.shape[0] 66 | hidden1 = torch.zeros(1, self.hidden_size).to(self.device) 67 | hidden2 = torch.zeros(1, self.hidden_size).to(self.device) 68 | hidden1s = [] 69 | hidden2s = [] 70 | ct1 = torch.zeros(1, self.hidden_size).to(self.device) 71 | ct2 = torch.zeros(1, self.hidden_size).to(self.device) 72 | for i in range(num): 73 | token1 = x[i: i+1] 74 | token2 = x[num-i-1: num-i] 75 | combined1 = torch.cat((token1, hidden1), 1) 76 | combined2 = torch.cat((token2, hidden2), 1) 77 | forget1 = self.sigmoid(self.forget_gate(combined1)) 78 | forget2 = self.sigmoid(self.forget_gate(combined2)) 79 | input1 = self.sigmoid(self.input_gate(combined1)) 80 | input2 = self.sigmoid(self.input_gate(combined2)) 81 | c_1 = self.tanh(self.c_gate(combined1)) 82 | c_2 = self.tanh(self.c_gate(combined2)) 83 | output1 = self.sigmoid(self.output_gate(combined1)) 84 | output2 = self.sigmoid(self.output_gate(combined2)) 85 | ct1 = ct1 * forget1 + input1 * c_1 86 | ct2 = ct2 * forget2 + input2 * c_2 87 | hidden1 = self.tanh(ct1) * output1 88 | hidden2 = self.tanh(ct2) * output2 89 | hidden1s.append(hidden1) 90 | hidden2s.insert(0, hidden2) 91 | hidden1 = torch.stack(hidden1s).mean(0) 92 | hidden2 = torch.stack(hidden2s).mean(0) 93 | result = self.h2o(torch.cat((hidden1, hidden2), 1)) 94 | return result 95 | 96 | 97 | class GRU(nn.Module): 98 | def __init__(self, args, output_size): 99 | super(GRU, self).__init__() 100 | self.device = args.device 101 | self.input_size = args.input_size 102 | self.hidden_size = args.hidden_size 103 | self.reset_gate = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 104 | self.update_gate = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 105 | self.h_gate = nn.Linear(self.input_size + self.hidden_size, self.hidden_size) 106 | self.h2o = nn.Linear(self.hidden_size, output_size) 107 | self.tanh = nn.Tanh() 108 | self.sigmoid = nn.Sigmoid() 109 | 110 | def forward(self, x): 111 | global output 112 | hidden = torch.zeros(1, self.hidden_size).to(self.device) 113 | ones = torch.ones(1, self.hidden_size).to(self.device) 114 | x = x[0] 115 | for i in range(x.shape[0]): 116 | token = x[i: i + 1] 117 | combined = torch.cat((token, hidden), 1) # 1 x (128+_) 118 | reset = self.sigmoid(self.reset_gate(combined)) 119 | zt = self.sigmoid(self.update_gate(combined)) 120 | combined2 = torch.cat((token, reset * hidden), 1) 121 | h_ = self.tanh(self.h_gate(combined2)) 122 | hidden = zt * hidden + (ones - zt) * h_ 123 | output = self.h2o(hidden) 124 | return output 125 | 126 | 127 | -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/codes/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/codes/requirements.txt -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/models/lab4曾经保存的模型.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/models/lab4曾经保存的模型.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/Bi-lstm pricious recal f1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/Bi-lstm pricious recal f1.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/Bi-lstm-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/Bi-lstm-loss.png -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/GRU-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/GRU-loss.png -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/GRU-precious&recall&f1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/GRU-precious&recall&f1.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/RNN-accuracy&recall&f1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/RNN-accuracy&recall&f1.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/RNN-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/RNN-loss.png -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/lstm-accuracy&recall&f1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/lstm-accuracy&recall&f1.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/lstm-loss and accuracy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/lstm-loss and accuracy.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/文本分类/lstm-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/文本分类/lstm-loss.png -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/GRU-climate-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/GRU-climate-loss.png -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-0.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-1.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-2.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-3.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-4.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-5.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-6.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-7.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-8.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/pred-true-9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/pred-true-9.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/test-climate-result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/test-climate-result.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/results/温度预测/test-climate-running.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/results/温度预测/test-climate-running.jpg -------------------------------------------------------------------------------- /实验4-1190201215-冯开来/实验4-1190201215-冯开来.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验4-1190201215-冯开来/实验4-1190201215-冯开来.pdf -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/SeFa生成的动图/pggan_celebahq1024_N5_K5_seed0.mov: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/SeFa生成的动图/pggan_celebahq1024_N5_K5_seed0.mov -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-120.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-120.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-150.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-180.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-180.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-210.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-210.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-240.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-240.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-270.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-270.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-30.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-30.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-300.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-300.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-330.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-330.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-360.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-360.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-390.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-390.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-420.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-420.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-450.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-450.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-480.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-480.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-510.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-540.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-540.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-570.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-570.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-60.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-60.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-600.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-630.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-630.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-660.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-660.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-690.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-690.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-720.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-750.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-750.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-780.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-780.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-810.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-810.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-840.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-840.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-870.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-870.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-90.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-90.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-900.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-900.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-930.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-930.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-960.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-960.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-adam/epoch-990.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-adam/epoch-990.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-120.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-120.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-150.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-180.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-180.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-210.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-210.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-240.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-240.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-270.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-270.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-30.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-30.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-300.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-300.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-330.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-330.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-360.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-360.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-390.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-390.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-420.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-420.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-450.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-450.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-480.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-480.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-510.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-540.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-540.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-570.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-570.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-60.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-60.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-600.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-630.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-630.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-660.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-660.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-690.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-690.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-720.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-750.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-750.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-780.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-780.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-810.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-810.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-840.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-840.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-870.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-870.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-90.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-90.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-900.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-900.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-930.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-930.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-960.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-960.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN-sgd/epoch-990.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN-sgd/epoch-990.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-120.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-120.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-150.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-180.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-180.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-210.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-210.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-240.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-240.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-270.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-270.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-30.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-30.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-300.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-300.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-330.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-330.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-360.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-360.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-390.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-390.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-420.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-420.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-450.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-450.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-480.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-480.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-510.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-540.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-540.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-570.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-570.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-60.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-60.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-600.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-630.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-630.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-660.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-660.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-690.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-690.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-720.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-750.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-750.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-780.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-780.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-810.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-810.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-840.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-840.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-870.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-870.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-90.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-90.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-900.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-900.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-930.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-930.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-960.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-960.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/GAN/epoch-990.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/GAN/epoch-990.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-120.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-120.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-150.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-180.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-180.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-210.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-210.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-240.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-240.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-270.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-270.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-30.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-30.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-300.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-300.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-330.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-330.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-360.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-360.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-390.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-390.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-420.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-420.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-450.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-450.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-480.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-480.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-510.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-540.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-540.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-570.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-570.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-60.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-60.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-600.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-630.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-630.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-660.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-660.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-690.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-690.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-720.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-750.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-750.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-780.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-780.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-810.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-810.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-840.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-840.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-870.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-870.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-90.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-90.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-900.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-900.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-930.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-930.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-960.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-960.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN-GP/epoch-990.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN-GP/epoch-990.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-120.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-120.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-150.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-180.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-180.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-210.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-210.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-240.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-240.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-270.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-270.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-30.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-30.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-300.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-300.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-330.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-330.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-360.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-360.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-390.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-390.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-420.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-420.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-450.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-450.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-480.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-480.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-510.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-540.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-540.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-570.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-570.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-60.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-60.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-600.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-630.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-630.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-660.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-660.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-690.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-690.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-720.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-750.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-750.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-780.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-780.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-810.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-810.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-840.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-840.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-870.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-870.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-90.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-90.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-900.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-900.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-930.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-930.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-960.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-960.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/WGAN/epoch-990.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/WGAN/epoch-990.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/loss/GAN.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/loss/GAN.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/loss/WGAN-GP.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/loss/WGAN-GP.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/results/loss/WGAN.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/results/loss/WGAN.jpg -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/sefa.py: -------------------------------------------------------------------------------- 1 | # python3.7 2 | """A simple tool to synthesize images with pre-trained models.""" 3 | 4 | import os 5 | import argparse 6 | import subprocess 7 | from tqdm import tqdm 8 | import numpy as np 9 | 10 | import torch 11 | 12 | from models import MODEL_ZOO 13 | from models import build_generator 14 | from utils.misc import bool_parser 15 | 16 | 17 | def postprocess(images): 18 | """Post-processes images from `torch.Tensor` to `numpy.ndarray`.""" 19 | images = images.detach().cpu().numpy() 20 | images = (images + 1) * 255 / 2 21 | images = np.clip(images + 0.5, 0, 255).astype(np.uint8) 22 | images = images.transpose(0, 2, 3, 1) 23 | return images 24 | 25 | def save_video(visuals, path): 26 | gifs = [] 27 | import torch 28 | import imageio 29 | from torchvision.utils import make_grid 30 | for img in visuals: 31 | x = (img + 1) / 2 32 | x = x.clamp_(0, 1) 33 | grid = make_grid(x.data.cpu(), nrow=1, padding=0, pad_value=0, 34 | normalize=False, range=None, scale_each=None) 35 | ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).to('cpu', torch.uint8).numpy() 36 | ndarr = ndarr.transpose((1, 2, 0)) 37 | gifs.append(ndarr) 38 | imageio.mimwrite(path, gifs) 39 | 40 | def save_frame(visuals, path): 41 | from torchvision.utils import save_image as th_save 42 | x = (visuals + 1) / 2 43 | x = x.clamp_(0, 1) 44 | th_save(x.data.cpu(), path, nrow=1, padding=0) 45 | 46 | def resize_image(img, size): 47 | return torch.nn.functional.interpolate(img, (size, size), mode='bilinear') 48 | 49 | def parse_args(): 50 | """Parses arguments.""" 51 | parser = argparse.ArgumentParser( 52 | description='Synthesize images with pre-trained models.') 53 | parser.add_argument('--model_name', type=str, default="pggan_celebahq1024", 54 | help='Name to the pre-trained model.') 55 | parser.add_argument('--save_dir', type=str, default=None, 56 | help='Directory to save the results. If not specified, ' 57 | 'the results will be saved to ' 58 | '`work_dirs/synthesis/` by default. ' 59 | '(default: %(default)s)') 60 | parser.add_argument('-N', '--num_samples', type=int, default=5, 61 | help='Number of samples used for visualization. ' 62 | '(default: %(default)s)') 63 | parser.add_argument('-K', '--num_semantics', type=int, default=5, 64 | help='Number of semantic boundaries corresponding to ' 65 | 'the top-k eigen values. (default: %(default)s)') 66 | parser.add_argument('--generate_html', type=bool_parser, default=False, 67 | help='Whether to use HTML page to visualize the ' 68 | 'synthesized results. (default: %(default)s)') 69 | parser.add_argument('--save_raw_synthesis', type=bool_parser, default=True, 70 | help='Whether to save raw synthesis. ' 71 | '(default: %(default)s)') 72 | parser.add_argument('--seed', type=int, default=0, 73 | help='Seed for sampling. (default: %(default)s)') 74 | parser.add_argument('--start_distance', type=float, default=-2.0, 75 | help='Start point for manipulation on each semantic. ' 76 | '(default: %(default)s)') 77 | parser.add_argument('--end_distance', type=float, default=2.0, 78 | help='Ending point for manipulation on each semantic. ' 79 | '(default: %(default)s)') 80 | parser.add_argument('--step', type=int, default=21, 81 | help='Manipulation step on each semantic. ' 82 | '(default: %(default)s)') 83 | parser.add_argument('--trunc_psi', type=float, default=0.7, 84 | help='Psi factor used for truncation. This is ' 85 | 'particularly applicable to StyleGAN (v1/v2). ' 86 | '(default: %(default)s)') 87 | parser.add_argument('--trunc_layers', type=int, default=8, 88 | help='Number of layers to perform truncation. This is ' 89 | 'particularly applicable to StyleGAN (v1/v2). ' 90 | '(default: %(default)s)') 91 | parser.add_argument('--viz_size', type=int, default=256, 92 | help='Size of images to visualize on the HTML page. ' 93 | '(default: %(default)s)') 94 | parser.add_argument('--randomize_noise', type=bool_parser, default=False, 95 | help='Whether to randomize the layer-wise noise. This ' 96 | 'is particularly applicable to StyleGAN (v1/v2). ' 97 | '(default: %(default)s)') 98 | parser.add_argument('--cuda', type=bool_parser, default=False, 99 | help='Whether to use cuda.') 100 | 101 | return parser.parse_args() 102 | 103 | 104 | def main(): 105 | """Main function.""" 106 | args = parse_args() 107 | if not args.save_raw_synthesis and not args.generate_html: 108 | return 109 | 110 | num_sam = args.num_samples 111 | num_sem = args.num_semantics 112 | 113 | # Parse model configuration. 114 | if args.model_name not in MODEL_ZOO: 115 | raise SystemExit(f'Model `{args.model_name}` is not registered in ' 116 | f'`models/model_zoo.py`!') 117 | 118 | model_config = MODEL_ZOO[args.model_name].copy() 119 | url = model_config.pop('url') # URL to download model if needed. 120 | 121 | # Get work directory and job name. 122 | if args.save_dir: 123 | work_dir = args.save_dir 124 | else: 125 | work_dir = os.path.join('work_dirs', 'synthesis') 126 | os.makedirs(work_dir, exist_ok=True) 127 | 128 | prefix = (f'{args.model_name}_' 129 | f'N{num_sam}_K{num_sem}_seed{args.seed}') 130 | job_dir = os.path.join(work_dir, prefix) 131 | os.makedirs(job_dir, exist_ok=True) 132 | frame_dir = os.path.join(job_dir, 'frames') 133 | if args.save_raw_synthesis: 134 | os.makedirs(frame_dir, exist_ok=True) 135 | 136 | 137 | # Build generation and get synthesis kwargs. 138 | print(f'Building generator for model `{args.model_name}` ...') 139 | generator = build_generator(**model_config) 140 | synthesis_kwargs = dict(trunc_psi=args.trunc_psi, 141 | trunc_layers=args.trunc_layers, 142 | randomize_noise=args.randomize_noise) 143 | print(f'Finish building generator.') 144 | 145 | # Load pre-trained weights. 146 | os.makedirs('checkpoints', exist_ok=True) 147 | checkpoint_path = os.path.join('checkpoints', args.model_name + '.pth') 148 | print(f'Loading checkpoint from `{checkpoint_path}` ...') 149 | if not os.path.exists(checkpoint_path): 150 | print(f' Downloading checkpoint from `{url}` ...') 151 | subprocess.call(['wget', '--quiet', '-O', checkpoint_path, url]) 152 | print(f' Finish downloading checkpoint.') 153 | checkpoint = torch.load(checkpoint_path, map_location='cpu') 154 | if 'generator_smooth' in checkpoint: 155 | generator.load_state_dict(checkpoint['generator_smooth']) 156 | else: 157 | generator.load_state_dict(checkpoint['generator']) 158 | if args.cuda: 159 | generator = generator.cuda() 160 | generator.eval() 161 | print(f'Finish loading checkpoint.') 162 | 163 | directions = [torch.randn(num_sam, generator.z_space_dim) for _ in range(num_sem)] 164 | weight = generator.__getattr__('layer0').weight 165 | weight = weight.flip(2, 3).permute(1, 0, 2, 3).flatten(1) 166 | 167 | ################ 168 | # TODO factorize the weight of layer0 to get the directions 169 | # run: python sefa.py pggan_celebahq1024 --cuda false/true 170 | _, directions = torch.linalg.eig(torch.mm(weight, weight.T)) 171 | 172 | directions = directions.transpose(1, 0).real 173 | ################ 174 | 175 | if args.cuda: 176 | directions = [d.cuda() for d in directions] 177 | 178 | # Set random seed. 179 | np.random.seed(args.seed) 180 | torch.manual_seed(args.seed) 181 | 182 | # Sample and synthesize. 183 | code = torch.randn(num_sam, generator.z_space_dim) 184 | if args.cuda: 185 | code = code.cuda() 186 | distances = np.linspace(args.start_distance, args.end_distance, args.step) 187 | visual_list = [[] for _ in range(num_sem)] 188 | with torch.no_grad(): 189 | video_list = [] 190 | for s in tqdm(range(args.step)): 191 | row_list = [] 192 | images = generator(code, **synthesis_kwargs)['image'] 193 | images = resize_image(images, args.viz_size) 194 | row_list.append(images.cpu()) 195 | for i, dr in enumerate(directions[:num_sem]): 196 | temp_code = code.clone() 197 | temp_code += dr * distances[s] 198 | images = generator(temp_code, **synthesis_kwargs)['image'] 199 | images = resize_image(images, args.viz_size) 200 | row_list.append(images.cpu()) 201 | visual_list[i].append(images.cpu()) 202 | video_list.append(torch.cat(row_list, dim=3)) 203 | save_video(video_list, os.path.join(job_dir, prefix + '.mov')) 204 | for i, v in enumerate(visual_list): 205 | save_frame(torch.cat(v, dim=3), os.path.join(frame_dir, f'frame{i}.png')) 206 | print(f'Finish synthesizing.') 207 | 208 | 209 | if __name__ == '__main__': 210 | main() 211 | -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/实验5-1190201215-冯开来.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/实验5-1190201215-冯开来.pdf -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/对抗生成式网络代码/datasets.py: -------------------------------------------------------------------------------- 1 | import mat4py 2 | import torch 3 | from torch.utils.data import Dataset 4 | import numpy as np 5 | 6 | 7 | class Points(Dataset): 8 | def __init__(self): 9 | self.data = mat4py.loadmat("./dataset/points.mat")['xx'] 10 | 11 | def __getitem__(self, idx): 12 | xy = torch.tensor(np.array(self.data[idx])).to(torch.float32) 13 | return xy 14 | 15 | def __len__(self): 16 | return len(self.data) 17 | -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/对抗生成式网络代码/draw.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | import mat4py 3 | import numpy as np 4 | import torch 5 | 6 | data = mat4py.loadmat("./dataset/points.mat")['xx'] 7 | data = np.array(data) 8 | 9 | 10 | def draw_background(D, x_min, x_max, y_min, y_max): 11 | i = x_min 12 | background = [] 13 | color = [] 14 | while i <= x_max - 0.01: 15 | j = y_min 16 | while j <= y_max - 0.01: 17 | background.append([i, j]) 18 | j += 0.01 19 | background.append([i, y_max]) 20 | i += 0.01 21 | j = y_min 22 | while j <= y_max - 0.01: 23 | background.append([i, j]) 24 | j += 0.01 25 | background.append([i, y_max]) 26 | background.append([x_max, y_max]) 27 | result = D(torch.Tensor(background).to("cuda:0")) 28 | for i in range(len(result)): 29 | if result[i] < 0.5: 30 | color.append('w') 31 | else: 32 | color.append('k') 33 | # print(result) 34 | background = np.array(background) 35 | plt.scatter(background[:, 0], background[:, 1], c=color) 36 | 37 | 38 | def draw_scatter(D, xy, epoch, model): 39 | x = xy[:, 0] 40 | y = xy[:, 1] 41 | draw_background(D, -0.5, 2.2, -0.2, 1) 42 | plt.xlim(-0.5, 2.2) 43 | plt.ylim(-0.2, 1) 44 | plt.scatter(data[:, 0], data[:, 1], c='b', s=10) 45 | plt.scatter(x, y, c='r', s=10) 46 | plt.savefig("./result/" + model + '/epoch-' + str(epoch) + '.jpg') -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/对抗生成式网络代码/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import random 3 | from torch.utils.data import DataLoader 4 | from torch import autograd 5 | from datasets import * 6 | from models import * 7 | from draw import * 8 | 9 | 10 | def gradient_penalty(D, xr, xf, batchsz, args): 11 | 12 | # [b, 1] 13 | t = torch.rand(batchsz, 1).to(args.device) 14 | # [b, 1] => [b, 10] 15 | t = t.expand_as(xr) 16 | # interpolation 17 | mid = t * xr + (1 - t) * xf 18 | # set it requires gradient 19 | mid.requires_grad_() 20 | 21 | pred = D(mid) 22 | grads = autograd.grad(outputs=pred, inputs=mid, 23 | grad_outputs=torch.ones_like(pred), 24 | create_graph=True, retain_graph=True, 25 | only_inputs=True)[0] 26 | 27 | gp = torch.pow(grads.norm(2, dim=1) - 1, 2).mean() 28 | 29 | return gp 30 | 31 | 32 | def main(args): 33 | 34 | global loss_D, loss_G 35 | device = args.device 36 | 37 | # fix the seed for reproducibility 38 | seed = args.seed 39 | torch.manual_seed(seed) 40 | np.random.seed(seed) 41 | random.seed(seed) 42 | 43 | # build dataset and dataloader 44 | print("\nProcessing " + args.dataset + " dataset...") 45 | if args.dataset == "points": 46 | dataset = Points() 47 | train_loader = DataLoader(dataset, shuffle=True, batch_size=args.batch_size) 48 | else: 49 | raise ValueError(f"dataset {args.dataset} not supported") 50 | print("Data processing finished!") 51 | 52 | # build model 53 | print("\nBuilding model " + args.model + "...") 54 | if args.model in ["GAN", "WGAN", "WGAN-GP"]: 55 | G = Generator() 56 | D = Discriminator() 57 | else: 58 | raise ValueError(f"dataset {args.model} not supported") 59 | G.to(device) 60 | D.to(device) 61 | print("Model building finished!") 62 | 63 | # set up optimizers 64 | if args.optimizer == 'adam': 65 | optim_G = torch.optim.Adam(G.parameters(), lr=5e-4, betas=(0.5, 0.9)) 66 | optim_D = torch.optim.Adam(D.parameters(), lr=5e-4, betas=(0.5, 0.9)) 67 | elif args.optimizer == 'sgd': 68 | optim_G = torch.optim.Adam(G.parameters(), lr=3e-4) 69 | optim_D = torch.optim.Adam(D.parameters(), lr=3e-4) 70 | elif args.optimizer == 'rmsprop': 71 | optim_G = torch.optim.RMSprop(G.parameters(), lr=1e-4) 72 | optim_D = torch.optim.RMSprop(D.parameters(), lr=1e-4) 73 | else: 74 | raise ValueError(f"dataset {args.optimizer} not supported") 75 | 76 | # start train and validation 77 | print("\nStart training...") 78 | all_loss = [] 79 | for epoch in range(1, args.epochs + 1): 80 | # train loss 81 | for data in train_loader: 82 | # 1. optimize Discriminator 83 | # 1.1 train real data 84 | xr = data.to(device) 85 | batchsz = xr.shape[0] 86 | predr = D(xr) 87 | # 1.2 train fake data 88 | z = torch.randn(batchsz, 10).to(device) 89 | xf = G(z).detach() 90 | predf = D(xf) 91 | 92 | # 1.3 loss and update Discriminator 93 | loss_D = - (torch.log(predr) + torch.log(1. - predf)).mean() 94 | 95 | if args.model == 'WGAN': 96 | for p in D.parameters(): 97 | # print(p.data) 98 | p.data.clamp_(-args.CLAMP, args.CLAMP) 99 | 100 | if args.model == 'WGAN-GP': 101 | loss_D += 0.2 * gradient_penalty(D, xr, xf.detach(), batchsz, args) 102 | 103 | optim_D.zero_grad() 104 | loss_D.backward() 105 | optim_D.step() 106 | 107 | # 2. optimize Generator 108 | z = torch.randn(args.batch_size, 10).to(device) 109 | xf = G(z) 110 | predf = D(xf) 111 | loss_G = torch.log(1. - predf).mean() 112 | optim_G.zero_grad() 113 | loss_G.backward() 114 | optim_G.step() 115 | 116 | if epoch % 5 == 0: 117 | print('[epoch %d/%d] Discriminator loss: %.3f, Generator loss: %.3f' 118 | % (epoch, args.epochs, loss_D.item(), loss_G.item())) 119 | all_loss.append([loss_D.item(), loss_G.item()]) 120 | if epoch % 30 == 0 and args.draw: 121 | input = torch.randn(1000, 10).to(device) 122 | output = G(input) 123 | output = output.to('cpu').detach() 124 | xy = np.array(output) 125 | draw_scatter(D, xy, epoch, args.model) 126 | 127 | # draw the loss 128 | all_loss = np.array(all_loss) 129 | x = np.arange(len(all_loss)) 130 | y1 = all_loss[:, 0] 131 | y2 = all_loss[:, 1] 132 | fig = plt.figure(2, figsize=(16, 16), dpi=150) 133 | ax1 = fig.add_subplot(2, 1, 1) 134 | ax2 = fig.add_subplot(2, 1, 2) 135 | ax1.plot(x, y1, 'r', label='loss_D') 136 | ax2.plot(x, y2, 'g', label='loss_G') 137 | ax1.legend(loc='upper right') 138 | ax2.legend(loc='upper right') 139 | plt.savefig(args.output_path + args.model + "/loss.jpg") 140 | 141 | # save the model 142 | state = {"model_D": D.state_dict(), "model_G": G.state_dict()} 143 | torch.save(state, args.output_path + 'models/' + args.model + '.pth') 144 | 145 | 146 | if __name__ == '__main__': 147 | parser = argparse.ArgumentParser("lab4") 148 | parser.add_argument("--device", default="cuda:0") 149 | parser.add_argument("--model", default="GAN") # GAN WGAN WGAN-GP 150 | parser.add_argument("--epochs", default=1000) 151 | parser.add_argument("--batch-size", default=2000) 152 | parser.add_argument("--seed", default=42) 153 | parser.add_argument("--dataset", default="points") # points 154 | parser.add_argument("--output-path", default="./result/") 155 | parser.add_argument("--hidden-size", default=128) 156 | parser.add_argument("--input-size", default=128) 157 | parser.add_argument("--CLAMP", default=0.1) 158 | parser.add_argument("--optimizer", default="rmsprop") # adam sgd rmsprop 159 | parser.add_argument("--draw", default=False, help="draw the loss and process") 160 | 161 | args = parser.parse_args() 162 | print(args) 163 | 164 | main(args) 165 | -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/对抗生成式网络代码/models.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class Generator(nn.Module): 5 | def __init__(self): 6 | super(Generator, self).__init__() 7 | self.net = nn.Sequential( 8 | # z: [b, 2] => [b, 2] 9 | nn.Linear(10, 128), 10 | nn.ReLU(True), 11 | nn.Linear(128, 256), 12 | nn.ReLU(True), 13 | nn.Linear(256, 512), 14 | nn.ReLU(True), 15 | nn.Linear(512, 2), 16 | ) 17 | 18 | def forward(self, z): 19 | output = self.net(z) 20 | return output 21 | 22 | 23 | class Discriminator(nn.Module): 24 | def __init__(self): 25 | super(Discriminator, self).__init__() 26 | self.net = nn.Sequential( 27 | # [b, 2] => [b, 1] 28 | nn.Linear(2, 128), 29 | nn.LeakyReLU(), 30 | nn.Linear(128, 256), 31 | nn.LeakyReLU(), 32 | nn.Linear(256, 128), 33 | nn.LeakyReLU(), 34 | nn.Linear(128, 1), 35 | nn.Sigmoid() 36 | ) 37 | def forward(self, x): 38 | output = self.net(x).view(-1) 39 | return output 40 | 41 | 42 | class WGAN_D(nn.Module): 43 | def __init__(self): 44 | super(WGAN_D, self).__init__() 45 | -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/对抗生成式网络代码/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/对抗生成式网络代码/requirements.txt -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/拟合分布的动图/GAN-adam.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/拟合分布的动图/GAN-adam.gif -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/拟合分布的动图/GAN-rmsprop.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/拟合分布的动图/GAN-rmsprop.gif -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/拟合分布的动图/GAN-sgd.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/拟合分布的动图/GAN-sgd.gif -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/拟合分布的动图/WGAN-GP.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/拟合分布的动图/WGAN-GP.gif -------------------------------------------------------------------------------- /实验5-1190201215-冯开来/拟合分布的动图/WGAN.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验5-1190201215-冯开来/拟合分布的动图/WGAN.gif -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/__pycache__/darknet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/__pycache__/darknet.cpython-38.pyc -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/cfg/yolov3.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | batch=1 3 | subdivisions=1 4 | width= 416 5 | 6 | height = 416 7 | channels=3 8 | momentum=0.9 9 | decay=0.0005 10 | angle=0 11 | saturation = 1.5 12 | exposure = 1.5 13 | hue=.1 14 | 15 | learning_rate=0.001 16 | burn_in=1000 17 | max_batches = 500200 18 | policy=steps 19 | steps=400000,450000 20 | scales=.1,.1 21 | 22 | [convolutional] 23 | batch_normalize=1 24 | filters=32 25 | size=3 26 | stride=1 27 | pad=1 28 | activation=leaky 29 | 30 | # Downsample 31 | 32 | [convolutional] 33 | batch_normalize=1 34 | filters=64 35 | size=3 36 | stride=2 37 | pad=1 38 | activation=leaky 39 | 40 | [convolutional] 41 | batch_normalize=1 42 | filters=32 43 | size=1 44 | stride=1 45 | pad=1 46 | activation=leaky 47 | 48 | [convolutional] 49 | batch_normalize=1 50 | filters=64 51 | size=3 52 | stride=1 53 | pad=1 54 | activation=leaky 55 | 56 | [shortcut] 57 | from=-3 58 | activation=linear 59 | 60 | # Downsample 61 | 62 | [convolutional] 63 | batch_normalize=1 64 | filters=128 65 | size=3 66 | stride=2 67 | pad=1 68 | activation=leaky 69 | 70 | [convolutional] 71 | batch_normalize=1 72 | filters=64 73 | size=1 74 | stride=1 75 | pad=1 76 | activation=leaky 77 | 78 | [convolutional] 79 | batch_normalize=1 80 | filters=128 81 | size=3 82 | stride=1 83 | pad=1 84 | activation=leaky 85 | 86 | [shortcut] 87 | from=-3 88 | activation=linear 89 | 90 | [convolutional] 91 | batch_normalize=1 92 | filters=64 93 | size=1 94 | stride=1 95 | pad=1 96 | activation=leaky 97 | 98 | [convolutional] 99 | batch_normalize=1 100 | filters=128 101 | size=3 102 | stride=1 103 | pad=1 104 | activation=leaky 105 | 106 | [shortcut] 107 | from=-3 108 | activation=linear 109 | 110 | # Downsample 111 | 112 | [convolutional] 113 | batch_normalize=1 114 | filters=256 115 | size=3 116 | stride=2 117 | pad=1 118 | activation=leaky 119 | 120 | [convolutional] 121 | batch_normalize=1 122 | filters=128 123 | size=1 124 | stride=1 125 | pad=1 126 | activation=leaky 127 | 128 | [convolutional] 129 | batch_normalize=1 130 | filters=256 131 | size=3 132 | stride=1 133 | pad=1 134 | activation=leaky 135 | 136 | [shortcut] 137 | from=-3 138 | activation=linear 139 | 140 | [convolutional] 141 | batch_normalize=1 142 | filters=128 143 | size=1 144 | stride=1 145 | pad=1 146 | activation=leaky 147 | 148 | [convolutional] 149 | batch_normalize=1 150 | filters=256 151 | size=3 152 | stride=1 153 | pad=1 154 | activation=leaky 155 | 156 | [shortcut] 157 | from=-3 158 | activation=linear 159 | 160 | [convolutional] 161 | batch_normalize=1 162 | filters=128 163 | size=1 164 | stride=1 165 | pad=1 166 | activation=leaky 167 | 168 | [convolutional] 169 | batch_normalize=1 170 | filters=256 171 | size=3 172 | stride=1 173 | pad=1 174 | activation=leaky 175 | 176 | [shortcut] 177 | from=-3 178 | activation=linear 179 | 180 | [convolutional] 181 | batch_normalize=1 182 | filters=128 183 | size=1 184 | stride=1 185 | pad=1 186 | activation=leaky 187 | 188 | [convolutional] 189 | batch_normalize=1 190 | filters=256 191 | size=3 192 | stride=1 193 | pad=1 194 | activation=leaky 195 | 196 | [shortcut] 197 | from=-3 198 | activation=linear 199 | 200 | 201 | [convolutional] 202 | batch_normalize=1 203 | filters=128 204 | size=1 205 | stride=1 206 | pad=1 207 | activation=leaky 208 | 209 | [convolutional] 210 | batch_normalize=1 211 | filters=256 212 | size=3 213 | stride=1 214 | pad=1 215 | activation=leaky 216 | 217 | [shortcut] 218 | from=-3 219 | activation=linear 220 | 221 | [convolutional] 222 | batch_normalize=1 223 | filters=128 224 | size=1 225 | stride=1 226 | pad=1 227 | activation=leaky 228 | 229 | [convolutional] 230 | batch_normalize=1 231 | filters=256 232 | size=3 233 | stride=1 234 | pad=1 235 | activation=leaky 236 | 237 | [shortcut] 238 | from=-3 239 | activation=linear 240 | 241 | [convolutional] 242 | batch_normalize=1 243 | filters=128 244 | size=1 245 | stride=1 246 | pad=1 247 | activation=leaky 248 | 249 | [convolutional] 250 | batch_normalize=1 251 | filters=256 252 | size=3 253 | stride=1 254 | pad=1 255 | activation=leaky 256 | 257 | [shortcut] 258 | from=-3 259 | activation=linear 260 | 261 | [convolutional] 262 | batch_normalize=1 263 | filters=128 264 | size=1 265 | stride=1 266 | pad=1 267 | activation=leaky 268 | 269 | [convolutional] 270 | batch_normalize=1 271 | filters=256 272 | size=3 273 | stride=1 274 | pad=1 275 | activation=leaky 276 | 277 | [shortcut] 278 | from=-3 279 | activation=linear 280 | 281 | # Downsample 282 | 283 | [convolutional] 284 | batch_normalize=1 285 | filters=512 286 | size=3 287 | stride=2 288 | pad=1 289 | activation=leaky 290 | 291 | [convolutional] 292 | batch_normalize=1 293 | filters=256 294 | size=1 295 | stride=1 296 | pad=1 297 | activation=leaky 298 | 299 | [convolutional] 300 | batch_normalize=1 301 | filters=512 302 | size=3 303 | stride=1 304 | pad=1 305 | activation=leaky 306 | 307 | [shortcut] 308 | from=-3 309 | activation=linear 310 | 311 | 312 | [convolutional] 313 | batch_normalize=1 314 | filters=256 315 | size=1 316 | stride=1 317 | pad=1 318 | activation=leaky 319 | 320 | [convolutional] 321 | batch_normalize=1 322 | filters=512 323 | size=3 324 | stride=1 325 | pad=1 326 | activation=leaky 327 | 328 | [shortcut] 329 | from=-3 330 | activation=linear 331 | 332 | 333 | [convolutional] 334 | batch_normalize=1 335 | filters=256 336 | size=1 337 | stride=1 338 | pad=1 339 | activation=leaky 340 | 341 | [convolutional] 342 | batch_normalize=1 343 | filters=512 344 | size=3 345 | stride=1 346 | pad=1 347 | activation=leaky 348 | 349 | [shortcut] 350 | from=-3 351 | activation=linear 352 | 353 | 354 | [convolutional] 355 | batch_normalize=1 356 | filters=256 357 | size=1 358 | stride=1 359 | pad=1 360 | activation=leaky 361 | 362 | [convolutional] 363 | batch_normalize=1 364 | filters=512 365 | size=3 366 | stride=1 367 | pad=1 368 | activation=leaky 369 | 370 | [shortcut] 371 | from=-3 372 | activation=linear 373 | 374 | [convolutional] 375 | batch_normalize=1 376 | filters=256 377 | size=1 378 | stride=1 379 | pad=1 380 | activation=leaky 381 | 382 | [convolutional] 383 | batch_normalize=1 384 | filters=512 385 | size=3 386 | stride=1 387 | pad=1 388 | activation=leaky 389 | 390 | [shortcut] 391 | from=-3 392 | activation=linear 393 | 394 | 395 | [convolutional] 396 | batch_normalize=1 397 | filters=256 398 | size=1 399 | stride=1 400 | pad=1 401 | activation=leaky 402 | 403 | [convolutional] 404 | batch_normalize=1 405 | filters=512 406 | size=3 407 | stride=1 408 | pad=1 409 | activation=leaky 410 | 411 | [shortcut] 412 | from=-3 413 | activation=linear 414 | 415 | 416 | [convolutional] 417 | batch_normalize=1 418 | filters=256 419 | size=1 420 | stride=1 421 | pad=1 422 | activation=leaky 423 | 424 | [convolutional] 425 | batch_normalize=1 426 | filters=512 427 | size=3 428 | stride=1 429 | pad=1 430 | activation=leaky 431 | 432 | [shortcut] 433 | from=-3 434 | activation=linear 435 | 436 | [convolutional] 437 | batch_normalize=1 438 | filters=256 439 | size=1 440 | stride=1 441 | pad=1 442 | activation=leaky 443 | 444 | [convolutional] 445 | batch_normalize=1 446 | filters=512 447 | size=3 448 | stride=1 449 | pad=1 450 | activation=leaky 451 | 452 | [shortcut] 453 | from=-3 454 | activation=linear 455 | 456 | # Downsample 457 | 458 | [convolutional] 459 | batch_normalize=1 460 | filters=1024 461 | size=3 462 | stride=2 463 | pad=1 464 | activation=leaky 465 | 466 | [convolutional] 467 | batch_normalize=1 468 | filters=512 469 | size=1 470 | stride=1 471 | pad=1 472 | activation=leaky 473 | 474 | [convolutional] 475 | batch_normalize=1 476 | filters=1024 477 | size=3 478 | stride=1 479 | pad=1 480 | activation=leaky 481 | 482 | [shortcut] 483 | from=-3 484 | activation=linear 485 | 486 | [convolutional] 487 | batch_normalize=1 488 | filters=512 489 | size=1 490 | stride=1 491 | pad=1 492 | activation=leaky 493 | 494 | [convolutional] 495 | batch_normalize=1 496 | filters=1024 497 | size=3 498 | stride=1 499 | pad=1 500 | activation=leaky 501 | 502 | [shortcut] 503 | from=-3 504 | activation=linear 505 | 506 | [convolutional] 507 | batch_normalize=1 508 | filters=512 509 | size=1 510 | stride=1 511 | pad=1 512 | activation=leaky 513 | 514 | [convolutional] 515 | batch_normalize=1 516 | filters=1024 517 | size=3 518 | stride=1 519 | pad=1 520 | activation=leaky 521 | 522 | [shortcut] 523 | from=-3 524 | activation=linear 525 | 526 | [convolutional] 527 | batch_normalize=1 528 | filters=512 529 | size=1 530 | stride=1 531 | pad=1 532 | activation=leaky 533 | 534 | [convolutional] 535 | batch_normalize=1 536 | filters=1024 537 | size=3 538 | stride=1 539 | pad=1 540 | activation=leaky 541 | 542 | [shortcut] 543 | from=-3 544 | activation=linear 545 | 546 | ###################### 547 | 548 | [convolutional] 549 | batch_normalize=1 550 | filters=512 551 | size=1 552 | stride=1 553 | pad=1 554 | activation=leaky 555 | 556 | [convolutional] 557 | batch_normalize=1 558 | size=3 559 | stride=1 560 | pad=1 561 | filters=1024 562 | activation=leaky 563 | 564 | [convolutional] 565 | batch_normalize=1 566 | filters=512 567 | size=1 568 | stride=1 569 | pad=1 570 | activation=leaky 571 | 572 | [convolutional] 573 | batch_normalize=1 574 | size=3 575 | stride=1 576 | pad=1 577 | filters=1024 578 | activation=leaky 579 | 580 | [convolutional] 581 | batch_normalize=1 582 | filters=512 583 | size=1 584 | stride=1 585 | pad=1 586 | activation=leaky 587 | 588 | [convolutional] 589 | batch_normalize=1 590 | size=3 591 | stride=1 592 | pad=1 593 | filters=1024 594 | activation=leaky 595 | 596 | [convolutional] 597 | size=1 598 | stride=1 599 | pad=1 600 | filters=255 601 | activation=linear 602 | 603 | 604 | [yolo] 605 | mask = 6,7,8 606 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 607 | classes=80 608 | num=9 609 | jitter=.3 610 | ignore_thresh = .5 611 | truth_thresh = 1 612 | random=1 613 | 614 | 615 | [route] 616 | layers = -4 617 | 618 | [convolutional] 619 | batch_normalize=1 620 | filters=256 621 | size=1 622 | stride=1 623 | pad=1 624 | activation=leaky 625 | 626 | [upsample] 627 | stride=2 628 | 629 | [route] 630 | layers = -1, 61 631 | 632 | 633 | 634 | [convolutional] 635 | batch_normalize=1 636 | filters=256 637 | size=1 638 | stride=1 639 | pad=1 640 | activation=leaky 641 | 642 | [convolutional] 643 | batch_normalize=1 644 | size=3 645 | stride=1 646 | pad=1 647 | filters=512 648 | activation=leaky 649 | 650 | [convolutional] 651 | batch_normalize=1 652 | filters=256 653 | size=1 654 | stride=1 655 | pad=1 656 | activation=leaky 657 | 658 | [convolutional] 659 | batch_normalize=1 660 | size=3 661 | stride=1 662 | pad=1 663 | filters=512 664 | activation=leaky 665 | 666 | [convolutional] 667 | batch_normalize=1 668 | filters=256 669 | size=1 670 | stride=1 671 | pad=1 672 | activation=leaky 673 | 674 | [convolutional] 675 | batch_normalize=1 676 | size=3 677 | stride=1 678 | pad=1 679 | filters=512 680 | activation=leaky 681 | 682 | [convolutional] 683 | size=1 684 | stride=1 685 | pad=1 686 | filters=255 687 | activation=linear 688 | 689 | 690 | [yolo] 691 | mask = 3,4,5 692 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 693 | classes=80 694 | num=9 695 | jitter=.3 696 | ignore_thresh = .5 697 | truth_thresh = 1 698 | random=1 699 | 700 | 701 | 702 | [route] 703 | layers = -4 704 | 705 | [convolutional] 706 | batch_normalize=1 707 | filters=128 708 | size=1 709 | stride=1 710 | pad=1 711 | activation=leaky 712 | 713 | [upsample] 714 | stride=2 715 | 716 | [route] 717 | layers = -1, 36 718 | 719 | 720 | 721 | [convolutional] 722 | batch_normalize=1 723 | filters=128 724 | size=1 725 | stride=1 726 | pad=1 727 | activation=leaky 728 | 729 | [convolutional] 730 | batch_normalize=1 731 | size=3 732 | stride=1 733 | pad=1 734 | filters=256 735 | activation=leaky 736 | 737 | [convolutional] 738 | batch_normalize=1 739 | filters=128 740 | size=1 741 | stride=1 742 | pad=1 743 | activation=leaky 744 | 745 | [convolutional] 746 | batch_normalize=1 747 | size=3 748 | stride=1 749 | pad=1 750 | filters=256 751 | activation=leaky 752 | 753 | [convolutional] 754 | batch_normalize=1 755 | filters=128 756 | size=1 757 | stride=1 758 | pad=1 759 | activation=leaky 760 | 761 | [convolutional] 762 | batch_normalize=1 763 | size=3 764 | stride=1 765 | pad=1 766 | filters=256 767 | activation=leaky 768 | 769 | [convolutional] 770 | size=1 771 | stride=1 772 | pad=1 773 | filters=255 774 | activation=linear 775 | 776 | 777 | [yolo] 778 | mask = 0,1,2 779 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 780 | classes=80 781 | num=9 782 | jitter=.3 783 | ignore_thresh = .5 784 | truth_thresh = 1 785 | random=1 786 | 787 | -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/darknet.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from torch import nn 3 | from utils import * 4 | 5 | 6 | class EmptyLayer(nn.Module): 7 | """ 8 | 仅仅是一个空模块,在shortcut层和route层使用可以大大简化代码编写 9 | """ 10 | 11 | def __init__(self): 12 | """ 13 | 初始化函数,进行初始化处理 14 | """ 15 | super(EmptyLayer, self).__init__() 16 | 17 | 18 | class DetectionLayer(nn.Module): 19 | """ 20 | 用于检测层,是一个单独的类 21 | """ 22 | 23 | def __init__(self, anchors): 24 | """ 25 | 对层进行初始化处理 26 | Args: 27 | anchors: 对anchors进行定义初始化 28 | """ 29 | super(DetectionLayer, self).__init__() 30 | self.anchors = anchors 31 | 32 | 33 | def parse_cfg(cfg_file): 34 | """ 35 | 读取配置文件,解析配置文件,并将每个模块存储为字典的list 36 | Args: 37 | cfg_file: 配置文件路径 38 | 39 | Returns: 40 | 表示模块参数的字典构成的 list 41 | """ 42 | 43 | file = open(cfg_file, 'r') 44 | # 将配置文件的行表示成一个list,后续处理将在该list上进行 45 | lines = file.read().split('\n') 46 | # 去除空行,注释,两侧的blank 47 | lines = [x for x in lines if len(x) > 0] 48 | lines = [x for x in lines if x[0] != '#'] 49 | lines = [x.rstrip().lstrip() for x in lines] 50 | 51 | block = {} 52 | blocks = [] 53 | 54 | for line in lines: 55 | # 表示一个新模块的开始 56 | if line[0] == '[': 57 | # 如果block不是空,则表示其存储了签一个模块的值 58 | if len(block) != 0: 59 | # 将前一个模块加进blocks 60 | blocks.append(block) 61 | # 重新初始化block 62 | block = {} 63 | block['type'] = line[1:-1].rstrip() 64 | else: 65 | key, value = line.split('=') 66 | block[key.rstrip()] = value.lstrip() 67 | blocks.append(block) 68 | return blocks 69 | 70 | 71 | def create_modules(blocks): 72 | """ 73 | 根据blocks指示的模块参数进行模块的构建 74 | Args: 75 | blocks: 需要建立模型的所有block参数字典列表 76 | 77 | Returns: 78 | 通过这个函数构建好神经网络的模型 79 | """ 80 | net_info = blocks[0] 81 | # 获取关于输入和预处理的信息,在迭代blocks之前,用该变量存储关于网络的信息 82 | # 该方法返回这个list,这个类类似一个包含nn.Module元素的正常的list 83 | # 但是将nn.ModuleList看做nn.Module的成员添加时,nn.Modulelist中成员的参数会被当做nn.Module的参数 84 | module_list = nn.ModuleList() 85 | # 定义一个卷积层的时候,必须定义卷积核的维度。尽管卷积核的宽度和高度在cfg文件中设置,卷积核的深度就是前一层 86 | # 的filters的个数,因此我们需要记录卷积层所在的层的filters的数量。使用prev_filters变量来完成该功能。 87 | # 将prev_filters初始化为3,表示RGB三个通道 88 | prev_filters = 3 89 | # Route之后如果有卷积层,则卷积层需要使用Route带来的特征,因此需要保存所有层的filters的数量 90 | output_filters = [] 91 | 92 | # 迭代blocks中的所有模块,并且为每个模块创建nn.Module;在迭代过程中,要根据block的类型创建响应的模块,并添加到module_list中 93 | for index, x in enumerate(blocks[1:]): 94 | module = nn.Sequential() 95 | # 如果block是一个卷积层 96 | if x['type'] == 'convolutional': 97 | # 获得关于这层的信息 98 | activation = x['activation'] 99 | try: 100 | batch_normalize = int(x['batch_normalize']) 101 | bias = False 102 | except: 103 | batch_normalize = 0 104 | bias = True 105 | 106 | filters = int(x['filters']) 107 | padding = int(x['pad']) 108 | kernel_size = int(x['size']) 109 | stride = int(x['stride']) 110 | 111 | # 是否有padding填充处理 112 | if padding: 113 | pad = (kernel_size - 1) // 2 114 | else: 115 | pad = 0 116 | 117 | # 添加卷积层 118 | conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=bias) 119 | module.add_module('conv_{0}'.format(index), conv) 120 | 121 | # 添加BN层 122 | if batch_normalize: 123 | bn = nn.BatchNorm2d(filters) 124 | module.add_module('batch_norm_{0}'.format(index), bn) 125 | 126 | # 检查激活函数 127 | # YOLO中可能是线性函数或者是Leaky ReLU 128 | if activation == 'leaky': 129 | activn = nn.LeakyReLU(0.1, inplace=True) 130 | module.add_module('leaky_{0}'.format(index), activn) 131 | # 如果是Route层 132 | elif x['type'] == 'route': 133 | x['layers'] = x['layers'].split(',') 134 | # 路由层的起点 135 | start = int(x['layers'][0]) 136 | # 如果已经存在就停止 137 | try: 138 | end = int(x['layers'][1]) 139 | except: 140 | end = 0 141 | if start > 0: 142 | start = start - index 143 | if end > 0: 144 | end = end - index 145 | # 构建route层 146 | route = EmptyLayer() 147 | module.add_module('route_{0}'.format(index), route) 148 | 149 | if end < 0: 150 | filters = output_filters[index + start] + output_filters[index + end] 151 | else: 152 | filters = output_filters[index + start] 153 | # 这一部分对应跳过连接,就是一个empty层 154 | elif x['type'] == 'shortcut': 155 | shortcut = EmptyLayer() 156 | module.add_module('shortcut_{}'.format(index), shortcut) 157 | 158 | # Yolo层,也就是用于探测的层 159 | elif x['type'] == 'yolo': 160 | mask = x['mask'].split(',') 161 | mask = [int(x) for x in mask] 162 | anchors = x['anchors'].split(',') 163 | anchors = [int(a) for a in anchors] 164 | anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)] 165 | anchors = [anchors[i] for i in mask] 166 | # 定义探测检测层 167 | detection = DetectionLayer(anchors) 168 | module.add_module('Detection_{}'.format(index), detection) 169 | # 如果是上采样层 170 | # 使用Bilinear2dUpsampling 171 | elif x['type'] == 'upsample': 172 | upsample = nn.Upsample(scale_factor=2, mode='nearest') 173 | module.add_module('upsample_{}'.format(index), upsample) 174 | 175 | module_list.append(module) # 最后将模块添加到module_list中 176 | prev_filters = filters 177 | output_filters.append(filters) 178 | 179 | return net_info, module_list 180 | 181 | 182 | class Darknet(nn.Module): 183 | """ 184 | 为探测器构造的类 185 | """ 186 | 187 | def __init__(self, cfg_file): 188 | """ 189 | 初始化函数,对网络进行初始化 190 | Args: 191 | cfg_file: 读取配置文件进行初始化 192 | """ 193 | super(Darknet, self).__init__() 194 | # 读取网络中的每一个块 195 | self.blocks = parse_cfg(cfg_file) 196 | self.net_info, self.module_list = create_modules(self.blocks) 197 | 198 | def forward(self, x, CUDA): 199 | """ 200 | 该函数实现两个功能:一是计算输出;二是将探测器输出的特征图转换成能够更加容易被处理的形式 201 | (例如对特征图做转换令不同尺度的特征图能够彼此拼接,否则如果他们维度不同,是无法进行拼接的) 202 | Args: 203 | x: 输入 204 | CUDA: 如果该参数是True,则使用GPU;如果是False,则使用CPU 205 | 206 | Returns: 207 | 返回前向检测的运算结果 208 | """ 209 | # 从blocks[1]开始迭代,因为blocks[0]是一个网络,不是前向传播的模块 210 | modules = self.blocks[1:] 211 | # 对路由层的输出进行缓存,供后续层使用(键值是层的索引,值是特征图) 212 | outputs = {} 213 | 214 | # 表示我们是否遇到了第一个detection;值为0表示collector还没有初始化;是1表示已经初始化了,则可以 215 | # 直接将特征图和它相连 216 | write = 0 217 | for i, module in enumerate(modules): 218 | module_type = (module['type']) 219 | # 卷积层或者上采样层 220 | if module_type == 'convolutional' or module_type == 'upsample': 221 | x = self.module_list[i](x) 222 | # route层 223 | elif module_type == 'route': 224 | # 处理路由层连接一个层的特征图或者两个层的特征图的情况 225 | layers = module['layers'] 226 | layers = [int(a) for a in layers] 227 | 228 | if (layers[0]) > 0: 229 | layers[0] = layers[0] - i 230 | 231 | if len(layers) == 1: 232 | x = outputs[i + (layers[0])] 233 | else: 234 | if (layers[1]) > 0: 235 | layers[1] = layers[1] - i 236 | map1 = outputs[i + layers[0]] 237 | map2 = outputs[i + layers[1]] 238 | x = torch.cat((map1, map2), 1) 239 | # shortcut层,即一个empty层 240 | elif module_type == 'shortcut': 241 | from_ = int(module['from']) 242 | x = outputs[i - 1] + outputs[i + from_] 243 | # YOLO的输出时一个卷积特征映射,包含沿特征映射深度的边界框属性,神经元预测的边界框的属性被一个一个地堆叠在一起 244 | # 所以,如果想要获得(5,6)位置处的神经元预测的第二个边界,需要使用map[5, 6, (5+c) : 2*(5+c)]来索引 245 | # 这种形式对于输出处理非常不方便,如通过对象置信阈值,向中心添加网格偏移量,应用锚定等 246 | elif module_type == 'yolo': 247 | anchors = self.module_list[i][0].anchors 248 | # 提取输入的维度 249 | inp_dim = int(self.net_info['height']) 250 | 251 | # 提取识别和检测的类别数量 252 | num_classes = int(module['classes']) 253 | 254 | # 进行张量运算 255 | x = x.data 256 | x = predict_transform(x, inp_dim, anchors, num_classes, CUDA) 257 | # 若还没初始化,则需要进行初始化 258 | if not write: 259 | detections = x 260 | write = 1 261 | else: 262 | detections = torch.cat((detections, x), 1) 263 | 264 | outputs[i] = x 265 | return detections 266 | 267 | def load_weights(self, weight_file): 268 | """ 269 | 加载模型的权重 270 | Args: 271 | weight_file: 权重的配置文件 272 | 273 | Returns: 274 | 配置权重之后就是权重配置的网络 275 | """ 276 | fp = open(weight_file, 'rb') 277 | 278 | # 首先提取5个头部文件的信息 279 | # 1. 获取主版本号 280 | # 2. 获取次版本号 281 | # 3. 获得子版本号的数量 282 | # 4,5. 训练过程中被模型识别的图像 283 | header = np.fromfile(fp, dtype=np.int32, count=5) 284 | self.header = torch.from_numpy(header) 285 | self.seen = self.header[3] 286 | 287 | # 提取权重信息 288 | weights = np.fromfile(fp, dtype=np.float32) 289 | 290 | ptr = 0 291 | for i in range(len(self.module_list)): 292 | module_type = self.blocks[i + 1]['type'] 293 | 294 | # 卷积层则提取这些参数信息,否则进行忽略 295 | if module_type == 'convolutional': 296 | model = self.module_list[i] 297 | try: 298 | batch_normalize = int(self.blocks[i + 1]['batch_normalize']) 299 | except: 300 | batch_normalize = 0 301 | conv = model[0] 302 | if batch_normalize: 303 | bn = model[1] 304 | # 获得BN层的参数信息 305 | num_bn_biases = bn.bias.numel() 306 | 307 | # 加载参数,并对参数进行处理 308 | bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases]) 309 | ptr += num_bn_biases 310 | bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases]) 311 | ptr += num_bn_biases 312 | bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases]) 313 | ptr += num_bn_biases 314 | bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases]) 315 | ptr += num_bn_biases 316 | 317 | # 将加载的weight作为模型的weight 318 | bn_biases = bn_biases.view_as(bn.bias.data) 319 | bn_weights = bn_weights.view_as(bn.weight.data) 320 | bn_running_mean = bn_running_mean.view_as(bn.running_mean) 321 | bn_running_var = bn_running_var.view_as(bn.running_var) 322 | 323 | # 将数据载入模型中 324 | bn.bias.data.copy_(bn_biases) 325 | bn.weight.data.copy_(bn_weights) 326 | bn.running_mean.copy_(bn_running_mean) 327 | bn.running_var.copy_(bn_running_var) 328 | 329 | else: 330 | num_biases = conv.bias.numel() 331 | 332 | # 将weights信息加载进来 333 | conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases]) 334 | ptr = ptr + num_biases 335 | 336 | # 根据模型的设置,对参数进行reshape 337 | conv_biases = conv_biases.view_as(conv.bias.data) 338 | 339 | # 最终将数据进行复制 340 | conv.bias.data.copy_(conv_biases) 341 | 342 | # 为卷积层加载模型参数和信息 343 | num_weights = conv.weight.numel() 344 | conv_weights = torch.from_numpy(weights[ptr:ptr + num_weights]) 345 | ptr = ptr + num_weights 346 | conv_weights = conv_weights.view_as(conv.weight.data) 347 | conv.weight.data.copy_(conv_weights) 348 | -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/data/coco.names: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000139.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000139.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000632.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000632.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000785.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000885.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000000885.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000001532.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000001532.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000001761.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000001761.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000002149.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000002149.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000002157.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000002157.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000007108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000007108.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000007816.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/det/000000007816.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000139.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000139.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000632.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000632.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000785.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000885.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000000885.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000001532.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000001532.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000001761.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000001761.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000002149.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000002149.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000002157.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000002157.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000007108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000007108.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000007816.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/imgs/000000007816.jpg -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/main.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from torch.autograd import Variable 3 | from utils import * 4 | import os 5 | import os.path as osp 6 | from darknet import Darknet 7 | import pickle as pkl 8 | import pandas as pd 9 | import random 10 | 11 | images = 'imgs' 12 | batch_size = 1 13 | confidence = 0.5 14 | # NMS方法阈值选择 15 | nms_thesh = 0.4 16 | # 选择CUDA进行测试和检测 17 | device = torch.cuda.is_available() 18 | # coco数据集可以识别的种类是80 19 | num_classes = 80 20 | classes = load_classes('data/coco.names') 21 | # 加载神经网络,初始化参数 22 | model = Darknet('cfg/yolov3.cfg') 23 | model.load_weights('cfg/yolov3.weights') 24 | model.net_info['height'] = '416' 25 | inp_dim = int(model.net_info['height']) 26 | 27 | 28 | def write_a(x, filename, results): 29 | """ 30 | 处理图像文件,并且将实验结果写回det文件夹中 31 | Args: 32 | x: 输入的文件 33 | filename: 文件名称 34 | results: 需要进行处理图像的整体 35 | 36 | Returns: 37 | 处理之后的图像进行写回 38 | """ 39 | c1 = tuple(x[1:3].int()) 40 | c2 = tuple(x[3:5].int()) 41 | img = results[int(x[0])] 42 | cls = int(x[-1]) 43 | color = random.choice(colors) 44 | label = '{0}'.format(classes[cls]) 45 | # 勾勒矩形轮廓 46 | cv2.rectangle(img, c1, c2, color, 2) 47 | t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0] 48 | c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4 49 | cv2.rectangle(img, c1, c2, color, 2) 50 | # 写入类别 51 | cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225, 255, 255], 2) 52 | # 将处理的结果进行写回 53 | cv2.imwrite(filename[int(x[0])], img) 54 | return img 55 | 56 | 57 | # 使用CUDA进行训练 58 | if device: 59 | model.cuda() 60 | 61 | # 将模型变成测试,不再进行训练改变参数 62 | model.eval() 63 | 64 | # 对测试图像的检测阶段 65 | im_list = [osp.join(osp.realpath('.'), images, img) for img in os.listdir(images)] 66 | # 建立检测之后的图像文件夹 67 | if not os.path.exists('det'): 68 | os.makedirs('det') 69 | # 加载测试图像 70 | loaded_ims = [cv2.imread(x) for x in im_list] 71 | im_batches = list(map(prep_image, loaded_ims, [inp_dim for x in range(len(im_list))])) 72 | im_dim_list = [(x.shape[1], x.shape[0]) for x in loaded_ims] 73 | im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2) 74 | leftover = 0 75 | if len(im_dim_list) % batch_size: 76 | leftover = 1 77 | 78 | if batch_size != 1: 79 | num_batches = len(im_list) // batch_size + leftover 80 | im_batches = [torch.cat((im_batches[i * batch_size: min((i + 1) * batch_size, 81 | len(im_batches))])) for i in range(num_batches)] 82 | 83 | write = 0 84 | # 设置CUDA的使用 85 | if device: 86 | im_dim_list = im_dim_list.cuda() 87 | 88 | for i, batch in enumerate(im_batches): 89 | # 加载图像,使用CUDA 90 | if device: 91 | batch = batch.cuda() 92 | with torch.no_grad(): 93 | # 进行模型测试,得到预测值 94 | prediction = model(Variable(batch), device) 95 | prediction = write_results(prediction, confidence, num_classes, nms_conf=nms_thesh) 96 | if type(prediction) == int: 97 | for im_num, image in enumerate(im_list[i * batch_size: min((i + 1) * batch_size, len(im_list))]): 98 | im_id = i * batch_size + im_num 99 | continue 100 | prediction[:, 0] += i * batch_size 101 | # 输出初始化 102 | if not write: 103 | output = prediction 104 | write = 1 105 | else: 106 | output = torch.cat((output, prediction)) 107 | 108 | for im_num, image in enumerate(im_list[i * batch_size: min((i + 1) * batch_size, len(im_list))]): 109 | im_id = i * batch_size + im_num 110 | objs = [classes[int(x[-1])] for x in output if int(x[0]) == im_id] 111 | # 设置CUDA的使用 112 | if device: 113 | torch.cuda.synchronize() 114 | 115 | try: 116 | # 输出内容 117 | output 118 | except NameError: 119 | print('No detections were made') 120 | exit() 121 | 122 | im_dim_list = torch.index_select(im_dim_list, 0, output[:, 0].long()) 123 | scaling_factor = torch.min(416 / im_dim_list, 1)[0].view(-1, 1) 124 | output[:, [1, 3]] -= (inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2 125 | output[:, [2, 4]] -= (inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2 126 | output[:, 1:5] /= scaling_factor 127 | 128 | for i in range(output.shape[0]): 129 | output[i, [1, 3]] = torch.clamp(output[i, [1, 3]], 0.0, im_dim_list[i, 0]) 130 | output[i, [2, 4]] = torch.clamp(output[i, [2, 4]], 0.0, im_dim_list[i, 1]) 131 | 132 | # 从已有模型加载 133 | colors = pkl.load(open('pallete', 'rb')) 134 | 135 | # 处理得到处理文件的名字 136 | det_names = pd.Series(im_list).apply(lambda x: './{}/det_{}'.format('det', x.split('/')[-1])) 137 | # 将文件名进行简化,存储在目标文件夹中 138 | for i in range(len(det_names)): 139 | # 将所有\进行替换,然后将:也进行替换 140 | det_names[i] = det_names[i].replace('\\', '_') 141 | det_names[i] = det_names[i].replace(':', '') 142 | # 利用split操作得到一个简化名字 143 | simple_name = det_names[i].split('_') 144 | det_names[i] = './det/' + simple_name[len(simple_name) - 1] 145 | # 对图像进行探测处理 146 | list(map(lambda x: write_a(x, det_names, loaded_ims), output)) 147 | list(map(cv2.imwrite, det_names, loaded_ims)) 148 | 149 | # 清空缓存,实验探测处理完成 150 | torch.cuda.empty_cache() 151 | -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/pallete: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/pallete -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/Lab6_code/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import torch 4 | import numpy as np 5 | import cv2 6 | 7 | 8 | # 该文件中包含各种辅助函数的代码,为了完成模型的加载以及图像的检测 9 | 10 | def letterbox_image(img, inp_dim): 11 | """ 12 | 填充图像,用不变化的比例 13 | Args: 14 | img: 输入的图像 15 | inp_dim: 变化的比例 16 | 17 | Returns: 18 | 返回填充之后结果 19 | """ 20 | img_w, img_h = img.shape[1], img.shape[0] 21 | w, h = inp_dim 22 | new_w = int(img_w * min(w / img_w, h / img_h)) 23 | new_h = int(img_h * min(w / img_w, h / img_h)) 24 | resized_image = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC) 25 | canvas = np.full((inp_dim[1], inp_dim[0], 3), 128) 26 | canvas[(h - new_h) // 2:(h - new_h) // 2 + new_h, (w - new_w) // 2:(w - new_w) // 2 + new_w, :] = resized_image 27 | return canvas 28 | 29 | 30 | def unique(tensor): 31 | """ 32 | 去重操作 33 | Args: 34 | tensor: 待处理张量,是一个图片中的预测结果 35 | 36 | Returns: 去重之后的张量,即一个类别值保留一个 37 | 38 | """ 39 | tensor_np = tensor.cpu().numpy() 40 | unique_np = np.unique(tensor_np) # 对np.array()进行去重操作 41 | unique_tensor = torch.from_numpy(unique_np) # 将np.array()转换成张量 42 | 43 | tensor_res = tensor.new(unique_tensor.shape) # 创建一个新的张量 44 | tensor_res.copy_(unique_tensor) # 拷贝 45 | return tensor_res 46 | 47 | 48 | def load_classes(name_file): 49 | """ 50 | Args: 51 | name_file: 文件名,包含了可以检测的类别名称 52 | 53 | Returns: 54 | 将每个类的索引映射到类名字符串的字典 55 | """ 56 | fp = open(name_file, 'r') 57 | names = fp.read().split('\n')[:-1] 58 | return names 59 | 60 | 61 | def bbox_iou(box1, box2): 62 | """ 63 | 返回两个边界框的重叠之处所占比例 64 | Args: 65 | box1: 边界框1 66 | box2: 边界框2 67 | 68 | Returns: 69 | 返回重叠面积占真实边界框box2面积的比例 70 | """ 71 | # 得到边界的坐标 72 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3] 73 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3] 74 | 75 | # 求出两个矩形相交的坐标 76 | inter_rect_x1 = torch.max(b1_x1, b2_x1) 77 | inter_rect_x2 = torch.min(b1_x2, b2_x2) 78 | inter_rect_y1 = torch.max(b1_y1, b2_y1) 79 | inter_rect_y2 = torch.min(b1_y2, b2_y2) 80 | 81 | # 求解交叉区域 82 | inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, 83 | min=0) 84 | 85 | # 合并区域 86 | b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) 87 | b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) 88 | iou = inter_area / (b1_area + b2_area - inter_area) 89 | return iou 90 | 91 | 92 | def predict_transform(prediction, inp_dim, anchors, num_classes, CUDA=True): 93 | """ 94 | 该函数将特征映射prediction转换成一个二维张量,二维张量的每一行表示一个边界框的属性 95 | Args: 96 | prediction: 待转换的特征映射 97 | inp_dim: 输入维度大小 98 | anchors: 先验框 99 | num_classes: 预测的类的总数 100 | CUDA: 选用CPU或者GPU,当CUDA是True的时候用GPU,False的时候使用CPU 101 | 102 | Returns: prediction对应的二维张量 103 | 104 | """ 105 | batch_size = prediction.size(0) 106 | stride = inp_dim // prediction.size(2) 107 | grid_size = inp_dim // stride 108 | bbox_attrs = 5 + num_classes 109 | num_anchors = len(anchors) 110 | 111 | prediction = prediction.view(batch_size, bbox_attrs * num_anchors, grid_size * grid_size) 112 | prediction = prediction.transpose(1, 2).contiguous() 113 | prediction = prediction.view(batch_size, grid_size * grid_size * num_anchors, bbox_attrs) 114 | anchors = [(a[0] / stride, a[1] / stride) for a in anchors] 115 | 116 | # 对对象进行sigmoid函数处理 117 | prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0]) 118 | prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1]) 119 | prediction[:, :, 4] = torch.sigmoid(prediction[:, :, 4]) 120 | 121 | # 加上中心的偏移量 122 | grid = np.arange(grid_size) 123 | a, b = np.meshgrid(grid, grid) 124 | x_offset = torch.FloatTensor(a).view(-1, 1) 125 | y_offset = torch.FloatTensor(b).view(-1, 1) 126 | # 使用CUDA进行测试 127 | if CUDA: 128 | x_offset = x_offset.cuda() 129 | y_offset = y_offset.cuda() 130 | x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1, num_anchors).view(-1, 2).unsqueeze(0) 131 | prediction[:, :, :2] += x_y_offset 132 | # 将高度宽度变换到对数空间 133 | anchors = torch.FloatTensor(anchors) 134 | # 使用CUDA 135 | if CUDA: 136 | anchors = anchors.cuda() 137 | anchors = anchors.repeat(grid_size * grid_size, 1).unsqueeze(0) 138 | prediction[:, :, 2:4] = torch.exp(prediction[:, :, 2:4]) * anchors 139 | prediction[:, :, 5: 5 + num_classes] = torch.sigmoid((prediction[:, :, 5: 5 + num_classes])) 140 | prediction[:, :, :4] *= stride # 将特征映射的大小调整为和图像大小一致 141 | return prediction 142 | 143 | 144 | def write_results(prediction, confidence, num_classes, nms_conf=0.4): 145 | """ 146 | 对特征映射prediction做客观评分阈值处理和非极大值抑制,获得真实的预测结果 147 | Args: 148 | prediction: 特征映射 149 | confidence: 分数阈值 150 | num_classes: 类别总数 151 | nms_conf: IOU阈值 152 | 153 | Returns: 真实预测结果 154 | 155 | """ 156 | # 挑选出得分大于阈值的边界框 157 | conf_mask = (prediction[:, :, 4] > confidence).float().unsqueeze(2) 158 | prediction = prediction * conf_mask 159 | # 将边界框使用四个角的坐标进行描述,方便进行IOU的计算 160 | box_corner = prediction.new(prediction.shape) 161 | box_corner[:, :, 0] = (prediction[:, :, 0] - prediction[:, :, 2] / 2) 162 | box_corner[:, :, 1] = (prediction[:, :, 1] - prediction[:, :, 3] / 2) 163 | box_corner[:, :, 2] = (prediction[:, :, 0] + prediction[:, :, 2] / 2) 164 | box_corner[:, :, 3] = (prediction[:, :, 1] + prediction[:, :, 3] / 2) 165 | prediction[:, :, :4] = box_corner[:, :, :4] 166 | batch_size = prediction.size(0) 167 | write = False # 表示是否对output进行了初始化 168 | # 每个边界框有85个属性值,其中的80个是分数,循环中的处理将分数全部清除,但是保留分数最大值的索引以及最大分数值 169 | for ind in range(batch_size): 170 | image_pred = prediction[ind] 171 | # NMS方法进行处理 172 | max_conf, max_conf_score = torch.max(image_pred[:, 5:5 + num_classes], 1) 173 | max_conf = max_conf.float().unsqueeze(1) 174 | max_conf_score = max_conf_score.float().unsqueeze(1) 175 | seq = (image_pred[:, :5], max_conf, max_conf_score) 176 | image_pred = torch.cat(seq, 1) 177 | # 对象置信度小于阈值的都要去除 178 | non_zero_ind = (torch.nonzero(image_pred[:, 4])) 179 | try: 180 | image_pred_ = image_pred[non_zero_ind.squeeze(), :].view(-1, 7) 181 | except: 182 | continue 183 | 184 | if image_pred_.shape[0] == 0: 185 | continue 186 | 187 | # 获取图像中检测到的类别 188 | img_classes = unique(image_pred_[:, -1]) 189 | # 非极大值抑制操作 190 | for cls in img_classes: 191 | 192 | # 对一个特定的类的检测结果 193 | cls_mask = image_pred_ * (image_pred_[:, -1] == cls).float().unsqueeze(1) 194 | class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze() 195 | image_pred_class = image_pred_[class_mask_ind].view(-1, 7) 196 | 197 | # 对检测进行结果排序,得到最大可能性 198 | conf_sort_index = torch.sort(image_pred_class[:, 4], descending=True)[1] 199 | image_pred_class = image_pred_class[conf_sort_index] 200 | # 检测到的数量 201 | idx = image_pred_class.size(0) 202 | # 对每个检测内容循环处理 203 | for i in range(idx): 204 | try: 205 | ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i + 1:]) 206 | except ValueError: 207 | break 208 | 209 | except IndexError: 210 | break 211 | 212 | # 检测大于阈值的交叉比例 213 | iou_mask = (ious < nms_conf).float().unsqueeze(1) 214 | image_pred_class[i + 1:] *= iou_mask 215 | 216 | # 移除部分项 217 | non_zero_ind = torch.nonzero(image_pred_class[:, 4]).squeeze() 218 | image_pred_class = image_pred_class[non_zero_ind].view(-1, 7) 219 | 220 | batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_( 221 | ind) 222 | seq = batch_ind, image_pred_class 223 | # 检测是否初始化 224 | if not write: 225 | output = torch.cat(seq, 1) 226 | write = True 227 | else: 228 | out = torch.cat(seq, 1) 229 | output = torch.cat((output, out)) 230 | 231 | try: 232 | return output 233 | except: 234 | return 0 235 | 236 | 237 | def prep_image(img, inp_dim): 238 | """ 239 | 将输入神经网络的图像进行预处理,将其转换成一个张量 240 | Args: 241 | img: 输入图像 242 | inp_dim: 维度 243 | 244 | Returns: 处理好的图片 245 | 246 | """ 247 | img = (letterbox_image(img, (inp_dim, inp_dim))) 248 | img = img[:, :, ::-1].transpose((2, 0, 1)).copy() 249 | img = torch.from_numpy(img).float().div(255.0).unsqueeze(0) 250 | return img 251 | -------------------------------------------------------------------------------- /实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/实验报告6.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Carlofkl/HIT2022PatternRecognition-DeepLearning/d5eeafc531c022f3cd892f9c28edc5bf4ddafb64/实验6-1190201215-冯开来-1190201225-韩庸平-1190501001-利恩宇/实验报告6.docx --------------------------------------------------------------------------------