├── .gitignore ├── QAE_Presentation.pdf ├── README.md ├── final_results ├── 0.0003_lr_64_samples_100_epochs.png ├── 0.0003_lr_64_samples_1_epochs.png ├── 0.0003_trained_model.npy ├── 0.0003_trained_model_4_2 compression.npy ├── 1_compr_0.0003_lr_64_samples_50_epochs.png ├── 1_compr_fidelities_0.0003_lr_64_samples_50_epochs.png ├── 2_compr_0.0003_lr_64_samples_50_epochs.png ├── 2_compr_fidelities_0.0003_lr_64_samples_50_epochs.png ├── 6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png ├── 6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy ├── 6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png ├── 6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy ├── 6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png ├── 6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy ├── fidelities_0.0003_lr_64_samples_100_epochs.png ├── fidelities_0.0003_lr_64_samples_1_epochs.png ├── fidelities_6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png ├── fidelities_6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png ├── fidelities_6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate.png └── trained_model.npy ├── main.py ├── qae_model.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /QAE_Presentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/QAE_Presentation.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QuantumAutoencoder 2 | 3 | This project implements the work presented at https://arxiv.org/abs/1612.02806. 4 | 5 | The purpose is to employ the advantages of quantum computing into the classical compression of data, used to reduce the space complexity needed to store it. One of the computational reducements is the method presented in the paper to test the fidelity and thus compute the loss of the model, namely adding a reference state and employing a SWAP test in order to test if the output is orthogonal or equal to the input. 6 | 7 | In order to run an experiment, one can clone the project and run the main program from the root folder of the program with the desired experiment parameters, such as: 8 | 9 | python main.py --qubits 6 --latent_qubits 3 --num_samples 64 --epochs 50 --batch_size 8 --learning_rate 0.0003 --shots 1000 10 | 11 | Any of the parameters are optional, if any of them are not given, the default ones are set, which are the ones employed above. 12 | 13 | The model employed is created using PennyLane and can be found in qae_model.py, and the loss and data loading are described in utils.py. 14 | 15 | A modification brought was writing the loss as loss = 1 / fidelity, which helped the improval of the fidelity during the training. 16 | 17 | Results obtained: 18 | 19 | | No. input qubits | No. output qubits | Fidelity | Train and test time | 20 | | ---------------- | ----------------- | --------- | -------------------- | 21 | | 4 | 3 | 0.997 | 15 mins | 22 | | 4 | 2 | 0.989 | 15 mins | 23 | | 6 | 5 | 0.996 | 50 mins | 24 | | 6 | 4 | 0.985 | 50 mins | 25 | | 6 | 3 | 0.902 | 50 mins | 26 | | 8 | 6 | 0.939 | 6 hours | 27 | -------------------------------------------------------------------------------- /final_results/0.0003_lr_64_samples_100_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/0.0003_lr_64_samples_100_epochs.png -------------------------------------------------------------------------------- /final_results/0.0003_lr_64_samples_1_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/0.0003_lr_64_samples_1_epochs.png -------------------------------------------------------------------------------- /final_results/0.0003_trained_model.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/0.0003_trained_model.npy -------------------------------------------------------------------------------- /final_results/0.0003_trained_model_4_2 compression.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/0.0003_trained_model_4_2 compression.npy -------------------------------------------------------------------------------- /final_results/1_compr_0.0003_lr_64_samples_50_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/1_compr_0.0003_lr_64_samples_50_epochs.png -------------------------------------------------------------------------------- /final_results/1_compr_fidelities_0.0003_lr_64_samples_50_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/1_compr_fidelities_0.0003_lr_64_samples_50_epochs.png -------------------------------------------------------------------------------- /final_results/2_compr_0.0003_lr_64_samples_50_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/2_compr_0.0003_lr_64_samples_50_epochs.png -------------------------------------------------------------------------------- /final_results/2_compr_fidelities_0.0003_lr_64_samples_50_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/2_compr_fidelities_0.0003_lr_64_samples_50_epochs.png -------------------------------------------------------------------------------- /final_results/6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png -------------------------------------------------------------------------------- /final_results/6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy -------------------------------------------------------------------------------- /final_results/6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png -------------------------------------------------------------------------------- /final_results/6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy -------------------------------------------------------------------------------- /final_results/6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png -------------------------------------------------------------------------------- /final_results/6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate__trained_model.npy -------------------------------------------------------------------------------- /final_results/fidelities_0.0003_lr_64_samples_100_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/fidelities_0.0003_lr_64_samples_100_epochs.png -------------------------------------------------------------------------------- /final_results/fidelities_0.0003_lr_64_samples_1_epochs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/fidelities_0.0003_lr_64_samples_1_epochs.png -------------------------------------------------------------------------------- /final_results/fidelities_6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/fidelities_6_qubits_3_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png -------------------------------------------------------------------------------- /final_results/fidelities_6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/fidelities_6_qubits_4_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate_.png -------------------------------------------------------------------------------- /final_results/fidelities_6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/fidelities_6_qubits_5_latent_qubits_64_num_samples_50_epochs_8_batch_size_0.0003_learning_rate.png -------------------------------------------------------------------------------- /final_results/trained_model.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theodoradragan/QuantumAutoencoder/99e815023d69a22fcd58837514c258751b500cd4/final_results/trained_model.npy -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: Theodora Dragan 4 | """ 5 | 6 | import argparse 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | import pennylane as qml 10 | import timeit 11 | import torch 12 | 13 | import os 14 | os.environ['KMP_DUPLICATE_LIB_OK']='True' 15 | 16 | from qae_model import QAE_model 17 | from utils import get_dataset, loss_func 18 | 19 | ### Default parameters 20 | 21 | epochs = 50 22 | learning_rate = 0.0003 # 0.0003 is the best so far 23 | batch_size = 8 24 | num_samples = 64 25 | 26 | total_qubits = 4 27 | latent_qubits = 2 28 | 29 | shots = 1000 30 | 31 | ### Reading the parameters from the command line, if given 32 | 33 | description = "The main program to run the Quantum Autoencoder" 34 | parser = argparse.ArgumentParser(description=description) 35 | 36 | parser.add_argument("-q", "--qubits", help="Set number of qubits") 37 | parser.add_argument("-lq", "--latent_qubits", help="Set the number of final qubits") 38 | parser.add_argument("-ns", "--num_samples", help="Set number of training samples") 39 | parser.add_argument("-e", "--epochs", help="Set number of epochs") 40 | parser.add_argument("-b", "--batch_size", help="Set batch size") 41 | parser.add_argument("-lr", "--learning_rate", help="Set learning rate for optimizer") 42 | parser.add_argument("-s", "--shots", help="Set number of shots") 43 | 44 | args = parser.parse_args() 45 | 46 | print("Experiment Parameters:") 47 | 48 | if args.qubits: 49 | total_qubits = int(args.qubits) 50 | print("Set no. of qubits to %s" % total_qubits) 51 | 52 | if args.latent_qubits: 53 | latent_qubits = int(args.latent_qubits) 54 | print("Set no. of final qubits (after compression) to %s" % latent_qubits) 55 | 56 | if args.epochs: 57 | epochs = int(args.epochs) 58 | print("Set no. of epochs to %s" % epochs) 59 | 60 | if args.learning_rate: 61 | learning_rate = float(args.learning_rate) 62 | print("Set learning rate to %s" % learning_rate) 63 | 64 | if args.batch_size: 65 | batch_size = int(args.batch_size) 66 | print("Set batch size to %s" % batch_size) 67 | 68 | if args.num_samples: 69 | num_samples = int(args.num_samples) 70 | print("Set number of training samples to %s" % num_samples) 71 | 72 | num_batches = int(num_samples/batch_size) 73 | 74 | trash_qubits = total_qubits - latent_qubits 75 | wires = 1 + trash_qubits + total_qubits 76 | dev = qml.device("default.qubit", wires = wires, shots = shots) 77 | 78 | qae = QAE_model(dev, wires, 1, trash_qubits) 79 | 80 | # the 1 in the lines above is for the qubit that is 0 81 | # and goes into the SWAP Test 82 | # and we add trash_qubits as that is the number of qubits for the reference state 83 | 84 | input_data = get_dataset(img_width = 2, img_height = int(total_qubits/2), train = True) 85 | 86 | start = timeit.timeit() 87 | total_loss_train = [] 88 | total_loss_test = [] 89 | 90 | fidelity_train = [] 91 | fidelity_val = [] 92 | 93 | opt = torch.optim.Adam(qae.parameters(), lr = learning_rate) 94 | for epoch in range(epochs): 95 | all_outs = 0 96 | running_loss_train = 0 97 | running_loss_val = 0 98 | 99 | running_fidelity_train = 0 100 | running_fidelity_val = 0 101 | 102 | for batch_id in range(num_batches): 103 | shuffled_indices = np.arange(batch_size) 104 | np.random.shuffle(shuffled_indices) 105 | outcomes = [] 106 | for i in range(batch_size): 107 | opt.zero_grad() 108 | idx = batch_id * batch_size + shuffled_indices[i] 109 | dataset_input = torch.reshape(input_data[idx][0], (1, total_qubits)) 110 | input_item = torch.cat((torch.zeros([1, 1 + trash_qubits]), 111 | dataset_input), 1) 112 | outcome = qae.forward(input_item, True) 113 | 114 | loss = loss_func(outcome) 115 | loss.backward() 116 | running_loss_train += loss 117 | running_fidelity_train += torch.squeeze(outcome)[0] 118 | opt.step() 119 | 120 | 121 | # the 2000 here is just to take a new slice in the dataset 122 | idx_val = 2000 + batch_id * batch_size + shuffled_indices[i] 123 | dataset_input_val = torch.reshape(input_data[idx_val][0], (1, total_qubits)) 124 | ## the cat is to add the leading 2 quantum zero states 125 | input_item_val = torch.cat((torch.zeros([1, 1 + trash_qubits]), dataset_input_val), 1) 126 | outcome_val = qae.forward(input_item_val, True) 127 | loss_test = loss_func(outcome_val) 128 | running_loss_val += loss_test 129 | running_fidelity_val += torch.squeeze(outcome_val)[0] 130 | 131 | total_loss_train.append(running_loss_train/num_samples) 132 | total_loss_test.append(running_loss_val/num_samples) 133 | 134 | fidelity_train.append(running_fidelity_train/num_samples) 135 | fidelity_val.append(running_fidelity_val/num_samples) 136 | 137 | if epoch % 1 == 0: 138 | print('Error for training for epoch no. ' + str(epoch + 1) + ': {:.4f}'.format(running_loss_train/num_samples)) 139 | print('Error for validation for epoch no. ' + str(epoch + 1) + ': {:.4f}'.format(running_loss_val/num_samples)) 140 | 141 | ### Testing on never-seen-before data 142 | running_loss_test = 0 143 | running_fidelity_test = 0 144 | 145 | experiment_title = str(total_qubits) + '_qubits_' + str(latent_qubits) + '_latent_qubits_' 146 | experiment_title += str(num_samples) + '_num_samples_' + str(epochs) + '_epochs_' 147 | experiment_title += str(batch_size) + '_batch_size_' + str(learning_rate) + '_learning_rate_' 148 | 149 | 150 | for i in range(64): 151 | idx_test = 4000 + i 152 | dataset_input_test = torch.reshape(input_data[idx_test][0], (1, total_qubits)) 153 | ## the cat is to add the leading quantum reference state 154 | input_item_test = torch.cat((torch.zeros([1, 1 + trash_qubits]), dataset_input_test), 1) 155 | outcome_test= qae.forward(input_item_test, True) 156 | loss_test = loss_func(outcome_test) 157 | running_loss_test += loss_test 158 | running_fidelity_test += torch.squeeze(outcome_test)[0] 159 | 160 | 161 | print("Final loss: ", running_loss_test / 64 ) 162 | print("Final fidelity: ", running_fidelity_test / 64) 163 | 164 | ### Save trained model 165 | model_path = os.path.join(os.getcwd(), experiment_title + '_trained_model.npy') 166 | print(model_path) 167 | torch.save(qae.state_dict(), model_path) 168 | 169 | ### Plotting the result losses 170 | 171 | figure_path = os.path.join(os.getcwd(), experiment_title + '.png') 172 | print(figure_path) 173 | epochs_array = np.arange(epochs) 174 | plt.plot(epochs_array, total_loss_train, color="blue", label="train") 175 | plt.plot(epochs_array, total_loss_test, color="red", label="validation") 176 | plt.xlabel("Epochs") 177 | plt.ylabel("Loss") 178 | plt.legend(loc="upper right") 179 | plt.draw() 180 | plt.savefig(figure_path) 181 | 182 | ### Plotting the result fidelities 183 | plt.clf() 184 | figure_path = os.path.join(os.getcwd(), 'fidelities_' + experiment_title + '.png') 185 | print(figure_path) 186 | epochs_array = np.arange(epochs) 187 | plt.plot(epochs_array, fidelity_train, color="blue", label="train") 188 | plt.plot(epochs_array, fidelity_val, color="red", label="validation") 189 | plt.xlabel("Epochs") 190 | plt.ylabel("Fidelity") 191 | plt.legend(loc="upper right") 192 | plt.draw() 193 | plt.savefig(figure_path) 194 | 195 | end = timeit.timeit() 196 | print("The whole experiment took {:.2f} seconds".format(end - start)) -------------------------------------------------------------------------------- /qae_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: Theodora Dragan 4 | """ 5 | 6 | import pennylane as qml 7 | import torch.nn as nn 8 | 9 | class QAE_model(nn.Module): 10 | @qml.template 11 | def angle_embedding(self,inputs): 12 | qml.templates.embeddings.AngleEmbedding(inputs, wires = range(self.n_aux_qubits + self.n_trash_qubits, self.n_qubits), rotation = 'X') 13 | 14 | @qml.template 15 | def amplitude_embedding(self,inputs): 16 | qml.templates.embeddings.AmplitudeEmbedding(inputs, wires = range(1+self.latent_space_size+self.auxillary_qubit_size,self.n_qubits), normalize = True,pad=(0.j)) 17 | 18 | @qml.template 19 | def SWAP(self): 20 | for i in range(self.n_aux_qubits): 21 | qml.Hadamard(wires = i) 22 | for i in range(self.n_trash_qubits): 23 | qml.CSWAP(wires = [i, i + self.n_aux_qubits , 2 * self.n_trash_qubits - i]) 24 | for i in range(self.n_aux_qubits): 25 | qml.Hadamard(wires = i) 26 | 27 | def forward(self, x, training_mode = True): 28 | self.training_mode = training_mode 29 | x = self.qlayer(x) 30 | #print(self.qlayer.qnode.draw()) 31 | return x 32 | 33 | 34 | def __init__(self, dev, n_qubits, n_aux_qubits, n_trash_qubits): 35 | super(QAE_model, self).__init__() 36 | 37 | self.n_qubits = n_qubits 38 | self.n_aux_qubits = n_aux_qubits 39 | self.n_trash_qubits = n_trash_qubits 40 | self.dev = dev 41 | 42 | @qml.qnode(dev) 43 | def q_circuit(params_rot_begin, params_crot, params_rot_end, inputs=False): 44 | # Embed the input 45 | # print(n_aux_qubits + n_trash_qubits) 46 | # print(len(inputs)) 47 | self.angle_embedding(inputs[n_aux_qubits + n_trash_qubits:]) 48 | 49 | # Add the first rotational gates: 50 | idx = 0 51 | for i in range(n_aux_qubits + n_trash_qubits, n_qubits): 52 | # qml.Rot(phi, theta, omega, wire) 53 | qml.Rot(params_rot_begin[idx], params_rot_begin[idx+1], params_rot_begin[idx+2], wires = i) 54 | idx += 3 55 | 56 | # Add the controlled rotational gates 57 | idx = 0 58 | for i in range(n_aux_qubits + n_trash_qubits, n_qubits): 59 | for j in range(n_aux_qubits + n_trash_qubits, n_qubits): 60 | if i!= j: 61 | qml.CRot(params_crot[idx], params_crot[idx+1], params_crot[idx+2], wires = [i, j]) 62 | idx += 3 63 | 64 | # Add the first rotational gates: 65 | idx = 0 66 | for i in range(n_aux_qubits + n_trash_qubits, n_qubits): 67 | # qml.Rot(phi, theta, omega, wire) 68 | qml.Rot(params_rot_end[idx], params_rot_end[idx+1], params_rot_end[idx+2], wires = i) 69 | idx += 3 70 | 71 | # In the end, test with the SWAP test 72 | self.SWAP() 73 | 74 | return [qml.probs(i) for i in range(self.n_aux_qubits)] 75 | 76 | 77 | training_qubits_size = n_qubits - n_aux_qubits - n_trash_qubits 78 | 79 | weight_shapes = {"params_rot_begin": (training_qubits_size * 3), 80 | "params_crot": (training_qubits_size * (training_qubits_size - 1) * 3), 81 | "params_rot_end": (training_qubits_size * 3)} 82 | 83 | self.qlayer = qml.qnn.TorchLayer(q_circuit, weight_shapes) -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: Theodora Dragan 4 | """ 5 | import torch 6 | from torchvision import datasets,transforms 7 | 8 | def loss_func(output): 9 | # Implemented as the Fidelity Loss 10 | # output[0] because we take the probability that the state after the 11 | # SWAP test is ket(0), like the reference state 12 | fidelity_loss = 1 / torch.squeeze(output)[0] 13 | return fidelity_loss 14 | 15 | def get_dataset(img_width, img_height, train): 16 | trainset = datasets.MNIST(root='./dataset', train=train, download=True, 17 | transform=transforms.Compose([transforms.Resize((img_width, img_height)),transforms.ToTensor()]) 18 | ) 19 | return trainset --------------------------------------------------------------------------------