├── releases ├── experiment1.zip ├── experiment2.zip ├── experiment3.zip └── experiment4.zip ├── helpers ├── __pycache__ │ ├── Metrics.cpython-37.pyc │ ├── MyDataset.cpython-37.pyc │ └── MyDataset.cpython-38.pyc ├── Metrics.py └── myDataset.py ├── models ├── __pycache__ │ ├── BaseModels.cpython-37.pyc │ ├── BaseModels.cpython-38.pyc │ ├── ConvNetMNIST.cpython-38.pyc │ └── ConvNetSVHN2.cpython-37.pyc ├── BaseModels.py ├── ConvNetMNIST_BN.py ├── ConvNetMNIST.py └── ConvNetSVHN2.py ├── README.md ├── perception.py ├── eca.py ├── fnn.py ├── reference ├── Perceptron.py └── load_data.py ├── lab2.py ├── lab3_eval.py ├── datasets ├── iris_multi │ ├── iris.names │ └── iris.data ├── wdbc_binary │ └── wdbc.names ├── soybean_multi │ ├── soybean-large.names │ └── soybean-large.data ├── sonar_binary │ └── sonar.names └── robot_multi │ └── Wall-following.names ├── lab4_wgan.py ├── lab4_gan.py ├── lab3.py └── lab4_cgan.py /releases/experiment1.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/releases/experiment1.zip -------------------------------------------------------------------------------- /releases/experiment2.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/releases/experiment2.zip -------------------------------------------------------------------------------- /releases/experiment3.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/releases/experiment3.zip -------------------------------------------------------------------------------- /releases/experiment4.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/releases/experiment4.zip -------------------------------------------------------------------------------- /helpers/__pycache__/Metrics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/helpers/__pycache__/Metrics.cpython-37.pyc -------------------------------------------------------------------------------- /helpers/__pycache__/MyDataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/helpers/__pycache__/MyDataset.cpython-37.pyc -------------------------------------------------------------------------------- /helpers/__pycache__/MyDataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/helpers/__pycache__/MyDataset.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/BaseModels.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/models/__pycache__/BaseModels.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/BaseModels.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/models/__pycache__/BaseModels.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/ConvNetMNIST.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/models/__pycache__/ConvNetMNIST.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/ConvNetSVHN2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lblaoke/TJU-DeepLearning2020/HEAD/models/__pycache__/ConvNetSVHN2.cpython-37.pyc -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Recommended specification: 2 | python - 3.7.4 3 | numpy - 1.19.4 4 | cudatoolkit - 10.2.89 5 | cudnn - 7.6.5 6 | pytorch - 1.7.0 7 | 8 | These codes are designed to run in a multi-GPU environment. If it is not available for you, please pay attention to the variable "device_id". 9 | 10 | Please run each .py file separately in the project path. 11 | 12 | Larger datasets like MNIST are not contained in the "datasets" folder. Please redirect routes inside the codes. 13 | 14 | For experiment 3, please run "lab3.py" to collect results and then run "lab3_eval.py" to visualize them. All results can be found in the "results/" folder. 15 | 16 | For experiment 4, result generation and curve generation are valid only for GAN and WGAN model. 17 | -------------------------------------------------------------------------------- /models/BaseModels.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | import torch.nn.functional as F 3 | 4 | #convolutional module with identical input and output size 5 | class Conv2dSame(nn.Sequential): 6 | def __init__(self,in_channels,out_channels,kernel_size): 7 | assert type(kernel_size)==int,'Unsupported type '+str(type(kernel_size))+' for kernel_size' 8 | 9 | bound = kernel_size-1 10 | bound_l = bound//2 11 | bound_r = bound-bound_l 12 | 13 | super(Conv2dSame,self).__init__( 14 | nn.ReplicationPad2d((bound_l,bound_r,bound_l,bound_r)), 15 | nn.Conv2d(in_channels,out_channels,kernel_size) 16 | ) 17 | 18 | #Conv2dSame+ReLU+BatchNorm2d 19 | class Conv2dSame_BN_ReLU(nn.Sequential): 20 | def __init__(self,in_channels,out_channels,kernel_size): 21 | assert type(kernel_size)==int,'Unsupported type '+str(type(kernel_size))+' for kernel_size' 22 | 23 | super(Conv2dSame_BN_ReLU,self).__init__( 24 | Conv2dSame(in_channels,out_channels,kernel_size), 25 | nn.BatchNorm2d(out_channels) , 26 | nn.ReLU() 27 | ) 28 | -------------------------------------------------------------------------------- /models/ConvNetMNIST_BN.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from models.BaseModels import * 3 | 4 | #small convolutional network for MNIST dataset 5 | class ConvNetMNIST_BN(nn.Module): 6 | def __init__(self,input_shape,num_feature,num_class,transfered=None): 7 | super(ConvNetMNIST_BN,self).__init__() 8 | 9 | self.conv = nn.Sequential( #input_shape[0]*input_shape[1]*input_shape[2] 10 | Conv2dSame_ReLU_BN(in_channels=input_shape[0],out_channels=32,kernel_size=3), 11 | Conv2dSame_ReLU_BN(in_channels=32,out_channels=64,kernel_size=3), 12 | nn.MaxPool2d(2), 13 | nn.Dropout(0.25) 14 | ) #64*(input_shape[1]/2)*(input_shape[2]/2) 15 | 16 | self.fc1 = nn.Sequential( #64*(input_shape[1]/2)*(input_shape[2]/2) 17 | nn.Linear(64*(input_shape[1]//2)*(input_shape[2]//2),num_feature), 18 | nn.ReLU(), 19 | nn.Dropout(0.5) 20 | ) #num_feature 21 | 22 | self.fc2 = nn.Linear(num_feature,num_class) 23 | 24 | def forward(self,x,mode='all'): 25 | x = self.conv(x) 26 | x = x.view(x.size(0),-1) 27 | x = self.fc1(x) 28 | y = self.fc2(x) 29 | return y 30 | -------------------------------------------------------------------------------- /models/ConvNetMNIST.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from models.BaseModels import * 3 | 4 | #small convolutional network for MNIST dataset 5 | class ConvNetMNIST(nn.Module): 6 | def __init__(self,input_shape,num_feature,num_class,transfered=None): 7 | super(ConvNetMNIST,self).__init__() 8 | 9 | self.conv = nn.Sequential( #input_shape[0]*input_shape[1]*input_shape[2] 10 | Conv2dSame(in_channels=input_shape[0],out_channels=32,kernel_size=3), 11 | nn.ReLU(), 12 | Conv2dSame(in_channels=32,out_channels=64,kernel_size=3), 13 | nn.ReLU(), 14 | nn.MaxPool2d(2), 15 | nn.Dropout(0.25) 16 | ) #64*(input_shape[1]/2)*(input_shape[2]/2) 17 | 18 | self.fc1 = nn.Sequential( #64*(input_shape[1]/2)*(input_shape[2]/2) 19 | nn.Linear(64*(input_shape[1]//2)*(input_shape[2]//2),num_feature), 20 | nn.ReLU(), 21 | nn.Dropout(0.5) 22 | ) #num_feature 23 | 24 | self.fc2 = nn.Linear(num_feature,num_class) 25 | 26 | def forward(self,x,mode='all'): 27 | x = self.conv(x) 28 | x = x.view(x.size(0),-1) 29 | x = self.fc1(x) 30 | y = self.fc2(x) 31 | return y 32 | -------------------------------------------------------------------------------- /helpers/Metrics.py: -------------------------------------------------------------------------------- 1 | from sklearn.metrics import * 2 | import numpy as np 3 | 4 | #standard identical-count statistics 5 | def accuracy(y_pred,y_true,task): 6 | assert len(y_pred)==len(y_true),'Inconsistent length, %d != %d' %(len(y_pred),len(y_true)) 7 | 8 | if task=='binary-classification': 9 | y_pred = np.where(y_pred<0.5,0,1) 10 | elif task=='multi-classification': 11 | y_pred = np.argmax(y_pred,axis=1) 12 | else: 13 | assert False,'Unsupported task '+task 14 | 15 | return accuracy_score(y_pred=y_pred,y_true=y_true) 16 | 17 | #??? 18 | def balanced_accuracy(y_pred,y_true,task): 19 | assert len(y_pred)==len(y_true),'Inconsistent length, %d != %d' %(len(y_pred),len(y_true)) 20 | 21 | if task=='binary-classification': 22 | y_pred = np.where(y_pred<0.5,0,1) 23 | elif task=='multi-classification': 24 | y_pred = np.argmax(y_pred,axis=1) 25 | else: 26 | assert False,'Unsupported task '+task 27 | 28 | return balanced_accuracy_score(y_pred=y_pred,y_true=y_true) 29 | 30 | #area under PR curve 31 | def AUPR(y_pred,y_true,task): 32 | assert len(y_pred)==len(y_true),'Inconsistent length, %d != %d' %(len(y_pred),len(y_true)) 33 | return 0. 34 | 35 | #area under ROC curve 36 | def AUROC(y_pred,y_true,task): 37 | assert len(y_pred)==len(y_true),'Inconsistent length, %d != %d' %(len(y_pred),len(y_true)) 38 | 39 | if task=='binary-classification': 40 | return roc_auc_score(y_score=y_pred,y_true=y_true) 41 | return 0. 42 | 43 | #??? 44 | def EIS(y_pred,y_true,task): 45 | assert len(y_pred)==len(y_true),'Inconsistent length, %d != %d' %(len(y_pred),len(y_true)) 46 | return 0. 47 | -------------------------------------------------------------------------------- /perception.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from sys import argv 4 | from helpers.myDataset import * 5 | from helpers.myfunc import * 6 | from torch.utils.data import DataLoader 7 | 8 | #test device status 9 | device_status=torch.cuda.is_available() 10 | if device_status: 11 | device_id=0 12 | 13 | #initialize model 14 | net=nn.Sequential( 15 | nn.Linear(60,2) 16 | ) 17 | if device_status: 18 | net=net.to(device_id) 19 | 20 | #set hyperparameters 21 | loss_func=nn.CrossEntropyLoss() 22 | opt=torch.optim.Adam(net.parameters(),lr=0.0007) 23 | mini_batch=1 24 | 25 | #load dataset 26 | trainData=TrainData('sonar','./datasets/sonar_binary/sonar.all-data') 27 | testData=TestData('sonar','./datasets/sonar_binary/sonar.all-data') 28 | 29 | trainLoader=DataLoader( 30 | dataset=trainData, 31 | batch_size=mini_batch, 32 | shuffle=True, 33 | num_workers=0 34 | ) 35 | testLoader=DataLoader( 36 | dataset=testData, 37 | batch_size=testData.__len__(), 38 | shuffle=False, 39 | num_workers=0 40 | ) 41 | 42 | if __name__=='__main__': 43 | 44 | #train & test 45 | for epoch in range(100): 46 | net=net.train() 47 | for _,(x,y) in enumerate(trainLoader): 48 | if device_status: 49 | x,y=x.to(device_id),y.to(device_id) 50 | 51 | #calcualte estimated results 52 | opt.zero_grad() 53 | y_hat=net(x) 54 | 55 | #calculate loss and propagate back 56 | loss=loss_func(y_hat,y) 57 | loss.backward() 58 | opt.step() 59 | 60 | # if epoch%10!=0: 61 | # continue 62 | 63 | net=net.eval() 64 | positive_n=0 65 | for _,(x,y) in enumerate(testLoader): 66 | if device_status: 67 | x=x.to(device_id) 68 | 69 | #predict 70 | with torch.no_grad(): 71 | y_hat=net(x) 72 | 73 | #compare and count 74 | for i in range(len(y)): 75 | if torch.argmax(y_hat[i]).item()==y[i].item(): 76 | positive_n+=1 77 | 78 | print('epoch = %d accuracy = %f' %(epoch,positive_n/testData.__len__())) 79 | 80 | #save parameters 81 | torch.save(net.state_dict(),'./results/perception.pkl') 82 | -------------------------------------------------------------------------------- /eca.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data import DataLoader 3 | from helpers.MyDataset import * 4 | from models.ConvNetSVHN2 import * 5 | from helpers.Metrics import * 6 | 7 | #test device status 8 | if torch.cuda.is_available(): 9 | device_id = 0 10 | torch.backends.cudnn.enabled = True 11 | torch.backends.cudnn.benchmark = True 12 | else: 13 | device_id = None 14 | 15 | #initialize model 16 | net = ConvNetSVHN2(128).to(device_id) 17 | 18 | #set hyperparameters 19 | loss_func = nn.CrossEntropyLoss().to(device_id) 20 | opt = torch.optim.SGD(net.parameters(),lr=0.001,momentum=0.9,weight_decay=0.0005) 21 | load_batch,train_batch = 1024,256 22 | 23 | #load dataset 24 | trainData = Svhn2Dataset('D://datasets/svhn-format2/','train') 25 | testData = Svhn2Dataset('D://datasets/svhn-format2/','t10k') 26 | 27 | trainLoader = DataLoader( 28 | dataset = trainData , 29 | batch_size = load_batch , 30 | shuffle = True , 31 | num_workers = 0 , 32 | drop_last = True 33 | ) 34 | testLoader = DataLoader( 35 | dataset = testData , 36 | batch_size = load_batch , 37 | shuffle = False , 38 | num_workers = 0 , 39 | drop_last = False 40 | ) 41 | 42 | if __name__=='__main__': 43 | for epoch in range(5): 44 | net = net.train() 45 | for _,(X,y) in enumerate(trainLoader): 46 | X,y=X.to(device_id),y.to(device_id) 47 | 48 | batch = 0 49 | while batch