├── QF_Net ├── __init__.py ├── lib_util.py └── lib_qf_net.py ├── QF_FB_C ├── __init__.py ├── lib_mlp.py └── lib_qf_fb.py ├── Quantumflow_Data.xlsx ├── README.md └── Execute ├── README.md ├── exec_mnist.py └── exec_mnist.ipynb /QF_Net/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /QF_FB_C/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Quantumflow_Data.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weiwenjiang/QuantumFlow/HEAD/Quantumflow_Data.xlsx -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ![](https://raw.githubusercontent.com/weiwenjiang/QML_tutorial/main/Readme_Img/qflow.png) 3 | 4 | QuantumFlow is an open-source framework. Its [website](https://jqub.github.io/categories/QF/) has been built for access. This repo released all the source codes. In addition to this, we have a [tutorial repo](https://github.com/weiwenjiang/QuantumFlow_Tutorial) at Github. 5 | 6 | This repo gives the detailed implementation on the classical side to training the model, which will generate the input of QuantumFlow. 7 | 8 | Feel free to contact me via wjiang2@nd.edu if you have any questions! 9 | 10 | #### Reference 11 | [1] Weiwen Jiang, Jinjun Xiong and Yiyu Shi, "Can Quantum Computers Learn Like Classical Computers? A Co-Design Framework for Machine Learning and Quantum Circuits", Nature Communications, 12, 579, 2021. 12 | -------------------------------------------------------------------------------- /QF_FB_C/lib_mlp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import math 4 | from QF_Net.lib_util import * 5 | from torch.nn.parameter import Parameter 6 | 7 | class MLP(nn.Linear): 8 | def forward(self, input): 9 | binarize = BinarizeF.apply 10 | binary_weight = binarize(self.weight) 11 | if self.bias is None: 12 | output = F.linear(input, binary_weight) 13 | output = torch.div(output, input.shape[-1]) 14 | return output 15 | else: 16 | print("Not Implement") 17 | sys.exit(0) 18 | def reset_parameters(self): 19 | in_features, out_features = self.weight.size() 20 | stdv = math.sqrt(1.5 / (in_features + out_features)) 21 | self.weight.data.uniform_(-stdv, stdv) 22 | if self.bias is not None: 23 | self.bias.data.zero_() 24 | self.weight.lr_scale = 1. / stdv 25 | 26 | -------------------------------------------------------------------------------- /Execute/README.md: -------------------------------------------------------------------------------- 1 | # Experiments 2 | 3 | ## exec_mnist 4 | The first set of experiments in [QuantumFlow](https://128.84.21.199/pdf/2006.14815.pdf), as shown in Figure 2. 5 | 6 | In exec_mnist.ipynb, we demonstrate the execution of QF-Net w/ BN on {3,6} subset of MNIST. 7 | 8 | For other results, use the following commands for the execution. 9 | 10 | Taking {3,6} subset of MNIST as an example, we list binMLP(C) w/o BN, FFNN w/o BN, MLP(C) w/o BN, QF-Net w/o BN, binMLP(C) w/ BN, FFNN w/ BN, MLP(C) w/ BN, QF-Net w/ BN, respectively. 11 | 12 | ```console 13 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -bin -nq -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/binMLP_36_wo.res 14 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -bin -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/FFNN_36_wo.res 15 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -nq -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/MLP_36_wo.res 16 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/QFNET_36_wo.res 17 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -wn -bin -nq -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/binMLP_36_wo.res 18 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -wn -bin -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/FFNN_36_wo.res 19 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -wn -nq -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/MLP_36_wo.res 20 | test@linux:~$ CUDA_VISIBLE_DEVICES=0 python exe_mnist.py -wn -c "3, 6" -s 4 -e 30 -m "10, 20" -chk > log/QFNET_36_wo.res 21 | ``` 22 | 23 | For datasets with more than 2 classes. Please use the following script to run the experiments. 24 | 25 | ``` 26 | $dataset is the interested classes 27 | 28 | # 3 classes 29 | python -u exe_mnist.py -qa "-1 -1 1 1 1 -1 1 -1, -1 -1 -1" -nn "8, 3" -bin -qt -c $dataset -s 4 -l 0.1 -ql 0.0001 -e 5 -m "2, 4" 30 | 31 | # 4 classes 32 | python -u exe_mnist.py -qa "1 -1 1 -1 -1 1 -1 -1 1 1 -1 -1 -1 1 1 1, -1 -1 -1 -1" -nn "16, 4" -bin -qt -c $dataset -s 8 -l 0.1 -ql 0.0001 -e 5 -m "2, 4" 33 | 34 | # 5 classes 35 | python -u exe_mnist.py -qa "1 -1 1 -1 -1 1 -1 -1 1 1 -1 -1 -1 1 1 1, -1 -1 -1 -1 -1" -nn "16, 5" -bin -qt -c $dataset -s 8 -l 0.1 -ql 0.0001 -e 5 -m "2, 4" 36 | ``` 37 | -------------------------------------------------------------------------------- /QF_Net/lib_util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Function 5 | import numpy as np 6 | import math 7 | import shutil 8 | import os 9 | import sys 10 | 11 | def modify_target_ori(target,interest_num): 12 | for j in range(len(target)): 13 | for idx in range(len(interest_num)): 14 | if target[j] == interest_num[idx]: 15 | target[j] = idx 16 | break 17 | new_target = torch.zeros(target.shape[0], len(interest_num)) 18 | for i in range(target.shape[0]): 19 | one_shot = torch.zeros(len(interest_num)) 20 | one_shot[target[i].item()] = 1 21 | new_target[i] = one_shot.clone() 22 | return target, new_target 23 | 24 | def modify_target(target,interest_num): 25 | new_target = torch.zeros(target.shape[0], len(interest_num)) 26 | for i in range(target.shape[0]): 27 | one_shot = torch.zeros(len(interest_num)) 28 | one_shot[target[i].item()] = 1 29 | new_target[i] = one_shot.clone() 30 | return target, new_target 31 | 32 | def select_num(dataset, interest_num): 33 | labels = dataset.targets # get labels 34 | labels = labels.numpy() 35 | idx = {} 36 | for num in interest_num: 37 | idx[num] = np.where(labels == num) 38 | fin_idx = idx[interest_num[0]] 39 | for i in range(1, len(interest_num)): 40 | fin_idx = (np.concatenate((fin_idx[0], idx[interest_num[i]][0])),) 41 | fin_idx = fin_idx[0] 42 | dataset.targets = labels[fin_idx] 43 | dataset.data = dataset.data[fin_idx] 44 | dataset.targets, _ = modify_target_ori(dataset.targets, interest_num) 45 | return dataset 46 | 47 | def save_checkpoint(state, is_best, save_path, filename): 48 | filename = os.path.join(save_path, filename) 49 | torch.save(state, filename) 50 | if is_best: 51 | bestname = os.path.join(save_path, 'model_best.tar') 52 | shutil.copyfile(filename, bestname) 53 | 54 | 55 | class BinarizeF(Function): 56 | @staticmethod 57 | def forward(cxt, input): 58 | output = input.new(input.size()) 59 | output[input >= 0] = 1 60 | output[input < 0] = -1 61 | return output 62 | @staticmethod 63 | def backward(cxt, grad_output): 64 | grad_input = grad_output.clone() 65 | return grad_input 66 | 67 | class ClipF(Function): 68 | @staticmethod 69 | def forward(ctx, input): 70 | output = input.clone().detach() 71 | output[input >= 1] = 1 72 | output[input <= 0] = 0 73 | ctx.save_for_backward(input) 74 | return output 75 | 76 | @staticmethod 77 | def backward(ctx, grad_output): 78 | input, = ctx.saved_tensors 79 | grad_input = grad_output.clone() 80 | grad_input[input >= 1] = 0 81 | grad_input[input <= 0] = 0 82 | return grad_input 83 | 84 | -------------------------------------------------------------------------------- /QF_Net/lib_qf_net.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from .lib_util import * 3 | from QF_FB_C.lib_qf_fb import * 4 | from QF_FB_C.lib_mlp import * 5 | import torch 6 | 7 | ## Define the NN architecture 8 | class Net(nn.Module): 9 | def __init__(self,img_size,layers,with_norm,given_ang,train_ang,training,binary,classic): 10 | super(Net, self).__init__() 11 | 12 | self.in_size = img_size*img_size 13 | self.training = training 14 | self.with_norm = with_norm 15 | self.layer = len(layers) 16 | self.binary = binary 17 | self.classic = classic 18 | cur_input_size = self.in_size 19 | for idx in range(self.layer): 20 | fc_name = "fc"+str(idx) 21 | if classic: 22 | setattr(self, fc_name, MLP(cur_input_size, layers[idx], bias=False)) 23 | else: 24 | setattr(self, fc_name, QF_FB_NC(cur_input_size, layers[idx], bias=False)) 25 | cur_input_size = layers[idx] 26 | 27 | if self.with_norm: 28 | for idx in range(self.layer): 29 | IAdj_name = "IAdj"+str(idx) 30 | BAdj_name = "BAdj"+str(idx) 31 | setattr(self, IAdj_name, QF_FB_BN_IAdj(num_features=layers[idx], init_ang_inc=given_ang[idx], training=train_ang)) 32 | setattr(self, BAdj_name, QF_FB_BN_BAdj(num_features=layers[idx])) 33 | for idx in range(self.layer): 34 | bn_name = "bn"+str(idx) 35 | setattr(self, bn_name,nn.BatchNorm1d(num_features=layers[idx])) 36 | 37 | 38 | def forward(self, x, training=1): 39 | binarize = BinarizeF.apply 40 | clipfunc = ClipF.apply 41 | x = x.view(-1, self.in_size) 42 | if self.classic == 1 and self.with_norm==0: 43 | for layer_idx in range(self.layer): 44 | if self.binary and layer_idx==0: 45 | x = binarize(x-0.5) 46 | x = getattr(self, "fc" + str(layer_idx))(x) 47 | x = x.pow(2) 48 | elif self.classic == 1 and self.with_norm==1: 49 | for layer_idx in range(self.layer): 50 | if self.binary and layer_idx==0: 51 | x = (binarize(x - 0.5) + 1) / 2 52 | x = getattr(self, "fc" + str(layer_idx))(x) 53 | x = x.pow(2) 54 | x = getattr(self, "bn" + str(layer_idx))(x) 55 | x = clipfunc(x) 56 | 57 | elif self.classic == 0 and self.with_norm==0: 58 | for layer_idx in range(self.layer): 59 | if self.binary and layer_idx==0: 60 | x = (binarize(x - 0.5) + 1) / 2 61 | x = getattr(self, "fc" + str(layer_idx))(x) 62 | else: # Quantum Training 63 | if self.training == 1: 64 | for layer_idx in range(self.layer): 65 | if self.binary and layer_idx==0: 66 | x = (binarize(x-0.5)+1)/2 67 | x = getattr(self, "fc"+str(layer_idx))(x) 68 | x = getattr(self, "BAdj"+str(layer_idx))(x) 69 | x = getattr(self, "IAdj"+str(layer_idx))(x) 70 | else: 71 | for layer_idx in range(self.layer): 72 | if self.binary and layer_idx==0: 73 | x = (binarize(x-0.5)+1)/2 74 | x = getattr(self, "fc"+str(layer_idx))(x) 75 | x = getattr(self, "BAdj"+str(layer_idx))(x, training=False) 76 | x = getattr(self, "IAdj"+str(layer_idx))(x, training=False) 77 | return x 78 | 79 | 80 | -------------------------------------------------------------------------------- /QF_FB_C/lib_qf_fb.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import math 4 | from QF_Net.lib_util import * 5 | from torch.nn.parameter import Parameter 6 | 7 | 8 | class QF_FB_NC(nn.Linear): 9 | def sim_neural_comp(self, input_ori, w_ori): 10 | p = input_ori 11 | d = 4 * p * (1 - p) 12 | e = (2 * p - 1) 13 | w = w_ori 14 | sum_of_sq = (d + e.pow(2)).sum(-1) 15 | sum_of_sq = sum_of_sq.unsqueeze(-1) 16 | sum_of_sq = sum_of_sq.expand(p.shape[0], w.shape[0]) 17 | diag_p = torch.diag_embed(e) 18 | p_w = torch.matmul(w, diag_p) 19 | z_p_w = torch.zeros_like(p_w) 20 | shft_p_w = torch.cat((p_w, z_p_w), -1) 21 | sum_of_cross = torch.zeros_like(p_w) 22 | length = p.shape[1] 23 | for shft in range(1, length): 24 | sum_of_cross += shft_p_w[:, :, 0:length] * shft_p_w[:, :, shft:length + shft] 25 | sum_of_cross = sum_of_cross.sum(-1) 26 | return (sum_of_sq + 2 * sum_of_cross) / (length ** 2) 27 | 28 | def forward(self, input): 29 | binarize = BinarizeF.apply 30 | binary_weight = binarize(self.weight) 31 | if self.bias is None: 32 | return self.sim_neural_comp(input, binary_weight) 33 | else: 34 | print("Bias is not supported at current version") 35 | sys.exit(0) 36 | 37 | def reset_parameters(self): 38 | in_features, out_features = self.weight.size() 39 | stdv = math.sqrt(1.5 / (in_features + out_features)) 40 | self.weight.data.uniform_(-stdv, stdv) 41 | if self.bias is not None: 42 | self.bias.data.zero_() 43 | self.weight.lr_scale = 1. / stdv 44 | 45 | 46 | 47 | class QF_FB_BN_IAdj(nn.Module): 48 | def __init__(self, num_features, init_ang_inc=1, momentum=0.1,training = False): 49 | super(QF_FB_BN_IAdj, self).__init__() 50 | 51 | self.x_running_rot = Parameter(torch.zeros((num_features)), requires_grad=False) 52 | self.ang_inc = Parameter(torch.tensor(init_ang_inc,dtype=torch.float32),requires_grad=True) 53 | self.momentum = momentum 54 | 55 | self.printed = False 56 | self.x_mean_ancle = 0 57 | self.x_mean_rote = 0 58 | self.input = 0 59 | self.output = 0 60 | 61 | def forward(self, x, training=True): 62 | if not training: 63 | if not self.printed: 64 | self.printed = True 65 | x_1 = (self.x_running_rot * x) 66 | 67 | else: 68 | self.printed = False 69 | x = x.transpose(0, 1) 70 | x_sum = x.sum(-1).unsqueeze(-1).expand(x.shape) 71 | x_lack_sum = x_sum + x 72 | x_mean = x_lack_sum / x.shape[-1] 73 | ang_inc = (self.ang_inc > 0).float() * self.ang_inc + 1 74 | y = 0.5 / x_mean 75 | y = y.transpose(0, 1) 76 | y = y / ang_inc 77 | y = y.transpose(0, 1) 78 | x_moving_rot = (y.sum(-1) / x.shape[-1]) 79 | self.x_running_rot[:] = self.momentum * self.x_running_rot + \ 80 | (1 - self.momentum) * x_moving_rot 81 | x_1 = y * x 82 | x_1 = x_1.transpose(0, 1) 83 | return x_1 84 | 85 | def reset_parameters(self): 86 | self.reset_running_stats() 87 | self.ang_inc.data.zeros_() 88 | 89 | 90 | class QF_FB_BN_BAdj(nn.Module): 91 | def __init__(self, num_features, momentum=0.1): 92 | super(QF_FB_BN_BAdj, self).__init__() 93 | self.x_running_rot = Parameter(torch.zeros(num_features), requires_grad=False) 94 | self.momentum = momentum 95 | self.x_l_0_5 = Parameter(torch.zeros(num_features), requires_grad=False) 96 | self.x_g_0_5 = Parameter(torch.zeros(num_features), requires_grad=False) 97 | 98 | def forward(self, x, training=True): 99 | if not training: 100 | x_1 = self.x_l_0_5 * (self.x_running_rot * (1 - x) + x) 101 | x_1 += self.x_g_0_5 * (self.x_running_rot * x) 102 | else: 103 | x = x.transpose(0, 1) 104 | x_sum = x.sum(-1) 105 | x_mean = x_sum / x.shape[-1] 106 | self.x_l_0_5[:] = ((x_mean <= 0.5).float()) 107 | self.x_g_0_5[:] = ((x_mean > 0.5).float()) 108 | y = self.x_l_0_5 * ((0.5 - x_mean) / (1 - x_mean)) 109 | y += self.x_g_0_5 * (0.5 / x_mean) 110 | 111 | self.x_running_rot[:] = self.momentum * self.x_running_rot + \ 112 | (1 - self.momentum) * y 113 | x = x.transpose(0, 1) 114 | x_1 = self.x_l_0_5 * (y * (1 - x) + x) 115 | x_1 += self.x_g_0_5 * (y * x) 116 | return x_1 117 | -------------------------------------------------------------------------------- /Execute/exec_mnist.py: -------------------------------------------------------------------------------- 1 | from QF_FB_C.lib_mlp import * 2 | from QF_FB_C.lib_qf_fb import * 3 | from QF_Net.lib_qf_net import * 4 | from QF_Net.lib_util import * 5 | 6 | import argparse 7 | import time 8 | import torch 9 | import torchvision.transforms as transforms 10 | from torchvision import datasets 11 | import torch.nn as nn 12 | import os 13 | import sys 14 | 15 | from collections import Counter 16 | from pathlib import Path 17 | 18 | import logging 19 | logging.basicConfig(stream=sys.stdout, 20 | level=logging.WARNING, 21 | format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | def train(epoch,interest_num,criterion,train_loader): 26 | model.train() 27 | correct = 0 28 | epoch_loss = [] 29 | for batch_idx, (data, target) in enumerate(train_loader): 30 | target, new_target = modify_target(target,interest_num) 31 | 32 | data, target = data.to(device), target.to(device) 33 | optimizer.zero_grad() 34 | output = model(data, True) 35 | 36 | pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability 37 | correct += pred.eq(target.data.view_as(pred)).cpu().sum() 38 | 39 | loss = criterion(output, target) 40 | epoch_loss.append(loss.item()) 41 | loss.backward() 42 | 43 | optimizer.step() 44 | 45 | if batch_idx % 500 == 0: 46 | logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {}/{} ({:.2f}%)'.format( 47 | epoch, batch_idx * len(data), len(train_loader.dataset), 48 | 100. * batch_idx / len(train_loader), loss, correct, (batch_idx + 1) * len(data), 49 | 100. * float(correct) / float(((batch_idx + 1) * len(data))))) 50 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {}/{} ({:.2f}%)'.format( 51 | epoch, batch_idx * len(data), len(train_loader.dataset), 52 | 100. * batch_idx / len(train_loader), loss, correct, (batch_idx + 1) * len(data), 53 | 100. * float(correct) / float(((batch_idx + 1) * len(data))))) 54 | print("-" * 20, "training done, loss", "-" * 20) 55 | logger.info("Training Set: Average loss: {}".format(round(sum(epoch_loss) / len(epoch_loss), 6))) 56 | 57 | 58 | accur = [] 59 | 60 | 61 | def test(interest_num,criterion,test_loader,debug=False): 62 | model.eval() 63 | test_loss = 0 64 | correct = 0 65 | for data, target in test_loader: 66 | target, new_target = modify_target(target,interest_num) 67 | 68 | data, target = data.to(device), target.to(device) 69 | if debug: 70 | start = time.time() 71 | output = model(data, False) 72 | if debug: 73 | end = time.time() 74 | print("Time",end - start) 75 | # sys.exit(0) 76 | test_loss += criterion(output, target) # sum up batch loss 77 | pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability 78 | correct += pred.eq(target.data.view_as(pred)).cpu().sum() 79 | 80 | a = 100. * correct / len(test_loader.dataset) 81 | accur.append(a) 82 | test_loss /= len(test_loader.dataset) 83 | print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format( 84 | test_loss, correct, len(test_loader.dataset), 85 | 100. * float(correct) / float(len(test_loader.dataset)))) 86 | 87 | return float(correct) / len(test_loader.dataset) 88 | 89 | 90 | 91 | 92 | def load_data(interest_num): 93 | # convert data to torch.FloatTensor 94 | transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor()]) 95 | # transform = transforms.Compose([transforms.Resize((img_size,img_size)),transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))]) 96 | # choose the training and test datasets 97 | train_data = datasets.MNIST(root='data', train=True, 98 | download=True, transform=transform) 99 | test_data = datasets.MNIST(root='data', train=False, 100 | download=True, transform=transform) 101 | 102 | train_data = select_num(train_data, interest_num) 103 | test_data = select_num(test_data, interest_num) 104 | 105 | # prepare data loaders 106 | train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, 107 | num_workers=num_workers, shuffle=True, drop_last=True) 108 | test_loader = torch.utils.data.DataLoader(test_data, batch_size=inference_batch_size, 109 | num_workers=num_workers, shuffle=False, drop_last=True) 110 | 111 | return train_loader,test_loader 112 | 113 | def parse_args(): 114 | parser = argparse.ArgumentParser(description='QuantumFlow Classification Training') 115 | 116 | # ML related 117 | parser.add_argument('--device', default='cpu', help='device') 118 | parser.add_argument('-c','--interest_class',default="3, 6",help="investigate classes",) 119 | parser.add_argument('-s','--img_size', default="4", help="image size 4: 4*4", ) 120 | parser.add_argument('-j','--num_workers', default="0", help="worker to load data", ) 121 | parser.add_argument('-tb','--batch_size', default="32", help="training batch size", ) 122 | parser.add_argument('-ib','--inference_batch_size', default="32", help="inference batch size", ) 123 | parser.add_argument('-nn','--neural_in_layers', default="4, 2", help="PNN structrue", ) 124 | parser.add_argument('-l','--init_lr', default="0.01", help="PNN learning rate", ) 125 | parser.add_argument('-m','--milestones', default="3, 7, 9", help="Training milestone", ) 126 | parser.add_argument('-e','--max_epoch', default="10", help="Training epoch", ) 127 | parser.add_argument('-r','--resume_path', default='', help='resume from checkpoint') 128 | parser.add_argument('-t',"--test_only", help="Only Test without Training", action="store_true", ) 129 | parser.add_argument('-bin', "--binary", help="binary activation", action="store_true", ) 130 | 131 | 132 | # QC related 133 | parser.add_argument('-nq', "--classic", help="classic computing test", action="store_true", ) 134 | parser.add_argument('-wn', "--with_norm", help="Using Batchnorm", action="store_true", ) 135 | 136 | parser.add_argument('-ql','--init_qc_lr', default="0.1", help="QC Batchnorm learning rate", ) 137 | parser.add_argument('-qa',"--given_ang", default="1 -1 1 -1, -1 -1", help="ang amplify, the same size with --neural_in_layers",) 138 | parser.add_argument('-qt',"--train_ang", help="train anglee", action="store_true", ) 139 | parser.add_argument('-qs', "--sim_range", default="0, 1551", help="quantum simulation range",) 140 | 141 | # File 142 | parser.add_argument('-chk',"--save_chkp", help="Save checkpoints", action="store_true", ) 143 | # parser.add_argument("--save_path", help="save path", ) 144 | 145 | parser.add_argument('-deb', "--debug", help="Debug mode", action="store_true", ) 146 | 147 | args = parser.parse_args() 148 | return args 149 | 150 | 151 | if __name__ == "__main__": 152 | print("=" * 100) 153 | print("Training procedure for Quantum Computer:") 154 | print("\tStart at:", time.strftime("%m/%d/%Y %H:%M:%S")) 155 | print("\tProblems and issues, please contact Dr. Weiwen Jiang (wjiang2@nd.edu)") 156 | print("\tEnjoy and Good Luck!") 157 | print("=" * 100) 158 | print() 159 | 160 | args = parse_args() 161 | 162 | device = args.device 163 | interest_class = [int(x.strip()) for x in args.interest_class.split(",")] 164 | img_size = int(args.img_size) 165 | num_workers = int(args.num_workers) 166 | batch_size = int(args.batch_size) 167 | inference_batch_size = int(args.inference_batch_size) 168 | layers = [int(x.strip()) for x in args.neural_in_layers.split(",")] 169 | init_lr = float(args.init_lr) 170 | milestones = [int(x.strip()) for x in args.milestones.split(",")] 171 | max_epoch = int(args.max_epoch) 172 | resume_path = args.resume_path 173 | training = not(args.test_only) 174 | binary = args.binary 175 | debug = args.debug 176 | classic = args.classic 177 | init_qc_lr = float(args.init_qc_lr) 178 | with_norm = args.with_norm 179 | sim_range = [int(x.strip()) for x in args.sim_range.split(",")] 180 | given_ang = [[int(y) for y in x.strip().split(" ")] for x in args.given_ang.split(",")] 181 | train_ang = args.train_ang 182 | save_chkp = args.save_chkp 183 | if save_chkp: 184 | save_path = "./model/" + os.path.basename(sys.argv[0]) + "_" + time.strftime("%Y_%m_%d-%H_%M_%S") 185 | Path(save_path).mkdir(parents=True, exist_ok=True) 186 | 187 | logger.info("Checkpoint path: {}".format(save_path)) 188 | 189 | if save_chkp: 190 | fh = open(save_path+"/config","w") 191 | print("=" * 21, "Your setting is listed as follows", "=" * 22, file=fh) 192 | print("\t{:<25} {:<15}".format('Attribute', 'Input'), file=fh) 193 | for k, v in vars(args).items(): 194 | print("\t{:<25} {:<15}".format(k, v), file=fh) 195 | print("=" * 22, "Exploration will start, have fun", "=" * 22, file=fh) 196 | print("=" * 78, file=fh) 197 | 198 | print("=" * 21, "Your setting is listed as follows", "=" * 22) 199 | print("\t{:<25} {:<15}".format('Attribute', 'Input')) 200 | for k,v in vars(args).items(): 201 | print("\t{:<25} {:<15}".format(k, v)) 202 | print("=" * 22, "Exploration will start, have fun", "=" * 22) 203 | print("=" * 78) 204 | 205 | 206 | # Schedule train and test 207 | 208 | train_loader, test_loader = load_data(interest_class) 209 | criterion = nn.CrossEntropyLoss() 210 | model = Net(img_size,layers,with_norm,given_ang,train_ang,training,binary,classic).to(device) 211 | 212 | print(model) 213 | 214 | 215 | if with_norm and train_ang: 216 | para_list = [] 217 | for idx in range(len(layers)): 218 | fc = getattr(model, "fc"+str(idx)) 219 | IAdj = getattr(model, "IAdj"+str(idx)) 220 | para_list.append({'params': fc.parameters(), 'lr': init_lr}) 221 | para_list.append({'params': IAdj.parameters(), 'lr': init_qc_lr}) 222 | optimizer = torch.optim.Adam(para_list) 223 | else: 224 | optimizer = torch.optim.Adam(model.parameters(), lr=init_lr) 225 | 226 | 227 | 228 | scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1) 229 | 230 | if os.path.isfile(resume_path): 231 | print("=> loading checkpoint from '{}'<=".format(resume_path)) 232 | checkpoint = torch.load(resume_path, map_location=device) 233 | epoch_init, acc = checkpoint["epoch"], checkpoint["acc"] 234 | model.load_state_dict(checkpoint["state_dict"]) 235 | 236 | scheduler.load_state_dict(checkpoint["scheduler"]) 237 | scheduler.milestones = Counter(milestones) 238 | optimizer.load_state_dict(checkpoint["optimizer"]) 239 | else: 240 | epoch_init, acc = 0, 0 241 | 242 | 243 | 244 | if training: 245 | for epoch in range(epoch_init, max_epoch + 1): 246 | print("=" * 20, epoch, "epoch", "=" * 20) 247 | print("Epoch Start at:", time.strftime("%m/%d/%Y %H:%M:%S")) 248 | 249 | print("-" * 20, "learning rates", "-" * 20) 250 | for param_group in optimizer.param_groups: 251 | print(param_group['lr'], end=",") 252 | print() 253 | 254 | print("-" * 20, "training", "-" * 20) 255 | print("Trainign Start at:", time.strftime("%m/%d/%Y %H:%M:%S")) 256 | train(epoch,interest_class,criterion,train_loader) 257 | print("Trainign End at:", time.strftime("%m/%d/%Y %H:%M:%S")) 258 | print("-" * 60) 259 | 260 | 261 | print() 262 | 263 | print("-" * 20, "testing", "-" * 20) 264 | print("Testing Start at:", time.strftime("%m/%d/%Y %H:%M:%S")) 265 | cur_acc = test(interest_class,criterion,test_loader) 266 | print("Testing End at:", time.strftime("%m/%d/%Y %H:%M:%S")) 267 | print("-" * 60) 268 | print() 269 | 270 | 271 | 272 | 273 | scheduler.step() 274 | 275 | is_best = False 276 | if cur_acc > acc: 277 | is_best = True 278 | acc = cur_acc 279 | 280 | print("Best accuracy: {}; Current accuracy {}. Checkpointing".format(acc, cur_acc)) 281 | 282 | 283 | if save_chkp: 284 | save_checkpoint({ 285 | 'epoch': epoch + 1, 286 | 'acc': acc, 287 | 'state_dict': model.state_dict(), 288 | 'optimizer': optimizer.state_dict(), 289 | 'scheduler': scheduler.state_dict(), 290 | }, is_best, save_path, 'checkpoint_{}_{}.pth.tar'.format(epoch, round(cur_acc, 4))) 291 | print("Epoch End at:", time.strftime("%m/%d/%Y %H:%M:%S")) 292 | print("=" * 60) 293 | print() 294 | else: 295 | # print("=" * 20, max_epoch, "Testing", "=" * 20) 296 | # print("=" * 100) 297 | # for name, para in model.named_parameters(): 298 | # if "fc" in name: 299 | # print(name,binarize(para)) 300 | # else: 301 | # print(name, para) 302 | # print("="*100) 303 | # test(interest_class,criterion,test_loader,debug) 304 | # correct = 0 305 | # qc_correct = 0 306 | test_idx = 0 307 | for data, target in test_loader: 308 | # if test_idx < sim_range[0] or test_idx >= sim_range[1]: 309 | # test_idx += 1 310 | # continue 311 | target, new_target = modify_target(target, interest_class) 312 | print(test_idx, target.item()) 313 | test_idx += 1 314 | 315 | # start = time.time() 316 | # output = model(data, False) 317 | # end = time.time() 318 | # 319 | # q_start = time.time() 320 | # qc_output = run_simulator(model,data[0][0],layers) 321 | # q_end = time.time() 322 | # 323 | # print("Test iteration {}: COut {}, QOut {}, CTime {}, QTime {}".format(test_idx,output,qc_output,end-start,q_end-q_start)) 324 | # test_idx+=1 325 | # 326 | # pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability 327 | # qc_pred = qc_output.data.max(1, keepdim=True)[1] # get the index of the max log-probability 328 | # correct += pred.eq(target.data.view_as(pred)).cpu().sum() 329 | # qc_correct += pred.eq(target.data.view_as(pred)).cpu().sum() 330 | # 331 | # print('Test set: Accuracy Class: {}/{}, Accuracy QC: {}/{}'.format( 332 | # correct, sim_range[1]-sim_range[0], qc_correct, sim_range[1]-sim_range[0])) 333 | # 334 | 335 | 336 | 337 | 338 | -------------------------------------------------------------------------------- /Execute/exec_mnist.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true, 8 | "pycharm": { 9 | "is_executing": false 10 | } 11 | }, 12 | "outputs": [], 13 | "source": [ 14 | "from QF_FB_C.lib_mlp import *\n", 15 | "from QF_FB_C.lib_qf_fb import *\n", 16 | "from QF_Net.lib_qf_net import *\n", 17 | "from QF_Net.lib_util import *\n", 18 | "\n", 19 | "import argparse\n", 20 | "import time\n", 21 | "import torch\n", 22 | "import torchvision.transforms as transforms\n", 23 | "from torchvision import datasets\n", 24 | "import torch.nn as nn\n", 25 | "import os\n", 26 | "import sys\n", 27 | "\n", 28 | "from collections import Counter\n", 29 | "from pathlib import Path" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 9, 35 | "outputs": [], 36 | "source": [ 37 | "\n", 38 | "def train(model, optimizer, device, epoch,interest_num,criterion,train_loader):\n", 39 | " model.train()\n", 40 | " correct = 0\n", 41 | " epoch_loss = []\n", 42 | " for batch_idx, (data, target) in enumerate(train_loader):\n", 43 | " target, new_target = modify_target(target,interest_num)\n", 44 | "\n", 45 | " data, target = data.to(device), target.to(device)\n", 46 | " optimizer.zero_grad()\n", 47 | " output = model(data, True)\n", 48 | "\n", 49 | " pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n", 50 | " correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n", 51 | "\n", 52 | " loss = criterion(output, target)\n", 53 | " epoch_loss.append(loss.item())\n", 54 | " loss.backward()\n", 55 | "\n", 56 | " optimizer.step()\n", 57 | "\n", 58 | " if batch_idx % 50 == 0: \n", 59 | " print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tAccuracy: {}/{} ({:.2f}%)'.format(\n", 60 | " epoch, batch_idx * len(data), len(train_loader.dataset),\n", 61 | " 100. * batch_idx / len(train_loader), loss, correct, (batch_idx + 1) * len(data),\n", 62 | " 100. * float(correct) / float(((batch_idx + 1) * len(data)))))\n", 63 | " print(\"-\" * 20, \"training done, loss\", \"-\" * 20) \n", 64 | "\n", 65 | "def test(model,device,interest_num,criterion,test_loader):\n", 66 | " accur = []\n", 67 | " model.eval()\n", 68 | " test_loss = 0\n", 69 | " correct = 0\n", 70 | " for data, target in test_loader:\n", 71 | " target, new_target = modify_target(target,interest_num)\n", 72 | " data, target = data.to(device), target.to(device) \n", 73 | " output = model(data, False) \n", 74 | " test_loss += criterion(output, target) # sum up batch loss\n", 75 | " pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n", 76 | " correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n", 77 | "\n", 78 | " a = 100. * correct / len(test_loader.dataset)\n", 79 | " accur.append(a)\n", 80 | " test_loss /= len(test_loader.dataset)\n", 81 | " print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n", 82 | " test_loss, correct, len(test_loader.dataset),\n", 83 | " 100. * float(correct) / float(len(test_loader.dataset))))\n", 84 | "\n", 85 | " return float(correct) / len(test_loader.dataset)\n", 86 | "\n", 87 | "\n", 88 | "def load_data(interest_num,img_size,batch_size,inference_batch_size,num_workers):\n", 89 | " # convert data to torch.FloatTensor\n", 90 | " transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor()]) \n", 91 | " train_data = datasets.MNIST(root='data', train=True,\n", 92 | " download=True, transform=transform)\n", 93 | " test_data = datasets.MNIST(root='data', train=False,\n", 94 | " download=True, transform=transform)\n", 95 | " train_data = select_num(train_data, interest_num)\n", 96 | " test_data = select_num(test_data, interest_num)\n", 97 | "\n", 98 | " # prepare data loaders\n", 99 | " train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n", 100 | " num_workers=num_workers, shuffle=True, drop_last=True)\n", 101 | " test_loader = torch.utils.data.DataLoader(test_data, batch_size=inference_batch_size,\n", 102 | " num_workers=num_workers, shuffle=False, drop_last=True)\n", 103 | "\n", 104 | " return train_loader,test_loader" 105 | ], 106 | "metadata": { 107 | "collapsed": false, 108 | "pycharm": { 109 | "name": "#%%\n", 110 | "is_executing": false 111 | } 112 | } 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 10, 117 | "outputs": [ 118 | { 119 | "name": "stdout", 120 | "text": [ 121 | "====================================================================================================\n", 122 | "Training procedure for Quantum Computer:\n", 123 | "\tStart at: 07/02/2020 08:49:57\n", 124 | "\tProblems and issues, please contact Dr. Weiwen Jiang (wjiang2@nd.edu)\n", 125 | "\tEnjoy and Good Luck!\n", 126 | "====================================================================================================\n", 127 | "\n", 128 | "==================== 0 epoch ====================\n", 129 | "Epoch Start at: 07/02/2020 08:49:58\n", 130 | "-------------------- learning rates --------------------\n", 131 | "0.1,0.1,0.1,0.1,\n", 132 | "-------------------- training --------------------\n", 133 | "Trainign Start at: 07/02/2020 08:49:58\n", 134 | "Train Epoch: 0 [0/12049 (0%)]\tLoss: 0.692409\tAccuracy: 22/32 (68.75%)\n", 135 | "Train Epoch: 0 [1600/12049 (13%)]\tLoss: 0.671228\tAccuracy: 1530/1632 (93.75%)\n", 136 | "Train Epoch: 0 [3200/12049 (27%)]\tLoss: 0.671653\tAccuracy: 3077/3232 (95.20%)\n", 137 | "Train Epoch: 0 [4800/12049 (40%)]\tLoss: 0.673316\tAccuracy: 4616/4832 (95.53%)\n", 138 | "Train Epoch: 0 [6400/12049 (53%)]\tLoss: 0.669353\tAccuracy: 6141/6432 (95.48%)\n", 139 | "Train Epoch: 0 [8000/12049 (66%)]\tLoss: 0.669420\tAccuracy: 7670/8032 (95.49%)\n", 140 | "Train Epoch: 0 [9600/12049 (80%)]\tLoss: 0.677605\tAccuracy: 9199/9632 (95.50%)\n", 141 | "Train Epoch: 0 [11200/12049 (93%)]\tLoss: 0.666153\tAccuracy: 10733/11232 (95.56%)\n", 142 | "-------------------- training done, loss --------------------\n", 143 | "Trainign End at: 07/02/2020 08:50:08\n", 144 | "------------------------------------------------------------\n", 145 | "\n", 146 | "-------------------- testing --------------------\n", 147 | "Testing Start at: 07/02/2020 08:50:08\n", 148 | "Test set: Average loss: 0.0207, Accuracy: 1883/1968 (95.68%)\n", 149 | "Testing End at: 07/02/2020 08:50:09\n", 150 | "------------------------------------------------------------\n", 151 | "\n", 152 | "Best accuracy: 0.9568089430894309; Current accuracy 0.9568089430894309. Checkpointing\n", 153 | "Epoch End at: 07/02/2020 08:50:09\n", 154 | "============================================================\n", 155 | "\n", 156 | "==================== 1 epoch ====================\n", 157 | "Epoch Start at: 07/02/2020 08:50:09\n", 158 | "-------------------- learning rates --------------------\n", 159 | "0.010000000000000002,0.010000000000000002,0.010000000000000002,0.010000000000000002,\n", 160 | "-------------------- training --------------------\n", 161 | "Trainign Start at: 07/02/2020 08:50:09\n", 162 | "Train Epoch: 1 [0/12049 (0%)]\tLoss: 0.673631\tAccuracy: 30/32 (93.75%)\n", 163 | "Train Epoch: 1 [1600/12049 (13%)]\tLoss: 0.667995\tAccuracy: 1570/1632 (96.20%)\n", 164 | "Train Epoch: 1 [3200/12049 (27%)]\tLoss: 0.669594\tAccuracy: 3114/3232 (96.35%)\n", 165 | "Train Epoch: 1 [4800/12049 (40%)]\tLoss: 0.669415\tAccuracy: 4663/4832 (96.50%)\n", 166 | "Train Epoch: 1 [6400/12049 (53%)]\tLoss: 0.671196\tAccuracy: 6207/6432 (96.50%)\n", 167 | "Train Epoch: 1 [8000/12049 (66%)]\tLoss: 0.670984\tAccuracy: 7734/8032 (96.29%)\n", 168 | "Train Epoch: 1 [9600/12049 (80%)]\tLoss: 0.671099\tAccuracy: 9272/9632 (96.26%)\n", 169 | "Train Epoch: 1 [11200/12049 (93%)]\tLoss: 0.668966\tAccuracy: 10805/11232 (96.20%)\n", 170 | "-------------------- training done, loss --------------------\n", 171 | "Trainign End at: 07/02/2020 08:50:19\n", 172 | "------------------------------------------------------------\n", 173 | "\n", 174 | "-------------------- testing --------------------\n", 175 | "Testing Start at: 07/02/2020 08:50:19\n", 176 | "Test set: Average loss: 0.0207, Accuracy: 1882/1968 (95.63%)\n", 177 | "Testing End at: 07/02/2020 08:50:20\n", 178 | "------------------------------------------------------------\n", 179 | "\n", 180 | "Best accuracy: 0.9568089430894309; Current accuracy 0.9563008130081301. Checkpointing\n", 181 | "Epoch End at: 07/02/2020 08:50:20\n", 182 | "============================================================\n", 183 | "\n", 184 | "==================== 2 epoch ====================\n", 185 | "Epoch Start at: 07/02/2020 08:50:20\n", 186 | "-------------------- learning rates --------------------\n", 187 | "0.010000000000000002,0.010000000000000002,0.010000000000000002,0.010000000000000002,\n", 188 | "-------------------- training --------------------\n", 189 | "Trainign Start at: 07/02/2020 08:50:20\n", 190 | "Train Epoch: 2 [0/12049 (0%)]\tLoss: 0.671825\tAccuracy: 30/32 (93.75%)\n", 191 | "Train Epoch: 2 [1600/12049 (13%)]\tLoss: 0.670105\tAccuracy: 1549/1632 (94.91%)\n", 192 | "Train Epoch: 2 [3200/12049 (27%)]\tLoss: 0.666928\tAccuracy: 3091/3232 (95.64%)\n", 193 | "Train Epoch: 2 [4800/12049 (40%)]\tLoss: 0.672028\tAccuracy: 4631/4832 (95.84%)\n", 194 | "Train Epoch: 2 [6400/12049 (53%)]\tLoss: 0.673349\tAccuracy: 6181/6432 (96.10%)\n", 195 | "Train Epoch: 2 [8000/12049 (66%)]\tLoss: 0.666993\tAccuracy: 7719/8032 (96.10%)\n", 196 | "Train Epoch: 2 [9600/12049 (80%)]\tLoss: 0.669513\tAccuracy: 9252/9632 (96.05%)\n", 197 | "Train Epoch: 2 [11200/12049 (93%)]\tLoss: 0.670185\tAccuracy: 10794/11232 (96.10%)\n", 198 | "-------------------- training done, loss --------------------\n", 199 | "Trainign End at: 07/02/2020 08:50:31\n", 200 | "------------------------------------------------------------\n", 201 | "\n", 202 | "-------------------- testing --------------------\n", 203 | "Testing Start at: 07/02/2020 08:50:31\n", 204 | "Test set: Average loss: 0.0207, Accuracy: 1883/1968 (95.68%)\n", 205 | "Testing End at: 07/02/2020 08:50:32\n", 206 | "------------------------------------------------------------\n", 207 | "\n", 208 | "Best accuracy: 0.9568089430894309; Current accuracy 0.9568089430894309. Checkpointing\n", 209 | "Epoch End at: 07/02/2020 08:50:32\n", 210 | "============================================================\n", 211 | "\n" 212 | ], 213 | "output_type": "stream" 214 | } 215 | ], 216 | "source": [ 217 | "print(\"=\" * 100)\n", 218 | "print(\"Training procedure for Quantum Computer:\")\n", 219 | "print(\"\\tStart at:\", time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n", 220 | "print(\"\\tProblems and issues, please contact Dr. Weiwen Jiang (wjiang2@nd.edu)\")\n", 221 | "print(\"\\tEnjoy and Good Luck!\")\n", 222 | "print(\"=\" * 100)\n", 223 | "print()\n", 224 | "\n", 225 | "\n", 226 | "device = \"cpu\"\n", 227 | "interest_class = [3,6]\n", 228 | "img_size = 4\n", 229 | "num_workers = 0\n", 230 | "batch_size = 32\n", 231 | "inference_batch_size = 32\n", 232 | "layers = [4,2]\n", 233 | "init_lr = 0.1\n", 234 | "milestones = [1]\n", 235 | "max_epoch = 2\n", 236 | "resume_path = \"\"\n", 237 | "training = True\n", 238 | "binary = False\n", 239 | "debug = False\n", 240 | "classic = False\n", 241 | "init_qc_lr = 0.1\n", 242 | "with_norm = True\n", 243 | "sim_range = []\n", 244 | "given_ang = [[1,-1,1,-1],[-1,-1]]\n", 245 | "train_ang = True\n", 246 | "save_chkp = False\n", 247 | "epoch_init, acc = 0, 0\n", 248 | "\n", 249 | "\n", 250 | "# Load data and create model\n", 251 | "train_loader, test_loader = load_data(interest_class,img_size,batch_size,inference_batch_size,num_workers)\n", 252 | "criterion = nn.CrossEntropyLoss()\n", 253 | "model = Net(img_size,layers,with_norm,given_ang,train_ang,training,binary,classic).to(device)\n", 254 | "\n", 255 | "# Initialize Normalization Parameters\n", 256 | "para_list = []\n", 257 | "for idx in range(len(layers)):\n", 258 | " fc = getattr(model, \"fc\"+str(idx))\n", 259 | " IAdj = getattr(model, \"IAdj\"+str(idx))\n", 260 | " para_list.append({'params': fc.parameters(), 'lr': init_lr})\n", 261 | " para_list.append({'params': IAdj.parameters(), 'lr': init_qc_lr})\n", 262 | "optimizer = torch.optim.Adam(para_list)\n", 263 | "scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1)\n", 264 | "\n", 265 | "# Training\n", 266 | "\n", 267 | "for epoch in range(epoch_init, max_epoch + 1):\n", 268 | " print(\"=\" * 20, epoch, \"epoch\", \"=\" * 20)\n", 269 | " print(\"Epoch Start at:\", time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n", 270 | " print(\"-\" * 20, \"learning rates\", \"-\" * 20)\n", 271 | " for param_group in optimizer.param_groups:\n", 272 | " print(param_group['lr'], end=\",\")\n", 273 | " print()\n", 274 | " print(\"-\" * 20, \"training\", \"-\" * 20)\n", 275 | " print(\"Trainign Start at:\", time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n", 276 | " train(model, optimizer, device, epoch,interest_class,criterion,train_loader)\n", 277 | " print(\"Trainign End at:\", time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n", 278 | " print(\"-\" * 60)\n", 279 | " print()\n", 280 | " print(\"-\" * 20, \"testing\", \"-\" * 20)\n", 281 | " print(\"Testing Start at:\", time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n", 282 | " cur_acc = test(model,device,interest_class,criterion,test_loader)\n", 283 | " print(\"Testing End at:\", time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n", 284 | " print(\"-\" * 60)\n", 285 | " print()\n", 286 | "\n", 287 | " scheduler.step()\n", 288 | "\n", 289 | " is_best = False\n", 290 | " if cur_acc > acc:\n", 291 | " is_best = True\n", 292 | " acc = cur_acc\n", 293 | " print(\"Best accuracy: {}; Current accuracy {}. Checkpointing\".format(acc, cur_acc))\n", 294 | " print(\"Epoch End at:\", time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n", 295 | " print(\"=\" * 60)\n", 296 | " print()" 297 | ], 298 | "metadata": { 299 | "collapsed": false, 300 | "pycharm": { 301 | "name": "#%%\n", 302 | "is_executing": false 303 | } 304 | } 305 | } 306 | ], 307 | "metadata": { 308 | "kernelspec": { 309 | "name": "pycharm-8213722", 310 | "language": "python", 311 | "display_name": "PyCharm (qiskit_practice)" 312 | }, 313 | "language_info": { 314 | "codemirror_mode": { 315 | "name": "ipython", 316 | "version": 2 317 | }, 318 | "file_extension": ".py", 319 | "mimetype": "text/x-python", 320 | "name": "python", 321 | "nbconvert_exporter": "python", 322 | "pygments_lexer": "ipython2", 323 | "version": "2.7.6" 324 | }, 325 | "pycharm": { 326 | "stem_cell": { 327 | "cell_type": "raw", 328 | "source": [], 329 | "metadata": { 330 | "collapsed": false 331 | } 332 | } 333 | } 334 | }, 335 | "nbformat": 4, 336 | "nbformat_minor": 0 337 | } --------------------------------------------------------------------------------