├── .gitignore ├── .gitmodules ├── README.md ├── quantum_optimal_control ├── __init__.py ├── core │ ├── __init__.py │ ├── analysis.py │ ├── convergence.py │ ├── regularization_functions.py │ ├── run_session.py │ ├── system_parameters.py │ └── tensorflow_state.py ├── helper_functions │ ├── __init__.py │ ├── data_management.py │ ├── grape_functions.py │ └── qutip_verification.py └── main_grape │ ├── __init__.py │ └── grape.py ├── setup.py └── version.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # Jupyter Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # SageMath parsed files 79 | *.sage.py 80 | 81 | # Environments 82 | .env 83 | .venv 84 | env/ 85 | venv/ 86 | ENV/ 87 | 88 | # Spyder project settings 89 | .spyderproject 90 | .spyproject 91 | 92 | # Rope project settings 93 | .ropeproject 94 | 95 | # mkdocs documentation 96 | /site 97 | 98 | # mypy 99 | .mypy_cache/ 100 | 101 | *.swp 102 | sbatch_file/* 103 | *.pyc 104 | .ipynb_checkpoints/ 105 | Examples/* 106 | *.so 107 | *.o 108 | custom_kernels/build/temp/* 109 | 110 | #ipynb 111 | ipynb 112 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "examples"] 2 | path = examples 3 | url = https://github.com/SchusterLab/GRAPE-Tensorflow-Examples 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This work has been followed up [here](https://github.com/SchusterLab/rbqoc). 2 | 3 | 4 | # GRAPE-Tensorflow 5 | 6 | This is the code repository of our recent publication "Speedup for quantum optimal control from automatic differentiation based on graphics processing units" https://journals.aps.org/pra/abstract/10.1103/PhysRevA.95.042318 7 | 8 | This is a software package that performs quantum optimal control using the automatic differentiation capabilities of [Tensorflow](https://www.tensorflow.org/) and has full GPU support. Its main goal is to produce a set of optimal pulses to apply in a given period of time that will drive a quantum system to achieve a certain unitary gate or to reach a certain final quantum state with a fidelity as close as possible to unity. In addition, the user can add any penalties (cost functions) on either the control pulses or the quantum intermediate states and the code will automatically include this constraint in the optimization process without having to write down an analytical form for the gradient of the new cost function. 9 | 10 | As an example of what the package produces, here is its output in the example of a qubit pi pulse: 11 | 12 | 13 | ![Qubit Pi Pulse Example](http://i.imgur.com/OfqFqZ6.png) 14 | 15 | # Setup 16 | You will just need to setup Tensorflow, Please follow the instructions [here] (https://www.tensorflow.org/versions/r0.10/get_started/os_setup.html) 17 | 18 | Currently only supports linux system and Python 2.7. 19 | 20 | # Currently Implemented Cost Functions 21 | Refer to the [Regularization functions file](https://github.com/SchusterLab/GRAPE-Tensorflow/blob/master/core/RegularizationFunctions.py) for details or to add a new cost function 22 | **1) The fidelity cost function:** The overlap between the target unitary/final state and the achieved unitary/final state. In the code, it's referred to as tfs.loss. 23 | **2) The gaussian envelope cost function:** A penalty if the control pulses do not have a gaussian envelope. The user supplies a coeffecient called **'envelope'** in the reg_coeffs input. A value of 0.01 is found to be a good statring value empirically. 24 | **3) The first derivative cost function:** To make the control pulses smooth. The user supplies a coeffecient called **'dwdt'** in the reg_coeffs input. A value of 0.001 is found to be a good statring value empirically. 25 | **4) The second derivative cost function:** To make the control pulses smooth. The user supplies a coeffecient called **'d2wdt2'** in the reg_coeffs input. A value of 0.000001 is found to be a good statring value empirically. 26 | **5) The bandpass cost function:** To filter the control pulses frequency **'bandpass'** (start around 0.1) to supress control pulses frequency outside the defined band **'band'**. This cost function requires GPU, since TensorFlow QFT is only implemented in GPU. 27 | **6) The forbidden state cost function:** A cost function to forbid the quantum occupation of certain levels through out the time of the control. The user supplies a coeffecinet called **'forbidden'** (start around 100 empirically) and a list called **'states_forbidden_list'** to specify the indices of the levels to forbid. **forbid_dressed**: A boolean (default is True) to forbid dressed (hamiltonian's eigen vectors) vs bare states in coupled systems 28 | **7) The time optimal cost function:** If the user wants to speed up the gate, he should provide a coeffecient called **'speed_up'** (start around 100) to award the occupation of the target state at all intermediate states, hence, making the gate as fast as possible. 29 | 30 | 31 | **To add a new cost function:** 32 | Just follow the same logic we used and add new code [here](https://github.com/SchusterLab/GRAPE-Tensorflow/blob/master/core/RegularizationFunctions.py) penalizing properties of: 33 | 1) The control fields: held in **tfs.ops_weight** 34 | and/or 35 | 2) The intermediate states: held in **tfs.inter_vecs** 36 | 37 | 38 | # Use 39 | You should call this function: 40 | ```python 41 | uks, U_final = Grape(H0,Hops,Hnames,U,total_time,steps,states_concerned_list,convergence, U0, 42 | reg_coeffs,dressed_info, maxA ,use_gpu, draw, initial_guess, show_plots, H_time_scales, 43 | unitary_error, method,state_transfer, no_scaling, freq_unit, file_name, save, data_path) 44 | ``` 45 | 46 | You can follow the [examples](https://github.com/SchusterLab/GRAPE-Tensorflow-Examples/tree/master) we are providing for details on defining the quantum system and then calling the function. We suggest starting with a simple example (e.g. spin Pi). 47 | 48 | # Returns: 49 | **uks:** The optimized control pulses ( a list of list of floats, each of them has length = ctrl_steps(ctrl_op) ) same order as the input 50 | **U_final:** The final Unitary (n by n) 51 | 52 | # Mandatory Arguments: 53 | **H0:** Drift Hamiltonian (n by n) 54 | **Hops:** A list of Control Hamiltonians (k hamiltonians, each is n by n) 55 | **Hnames:** A list of Control Hamiltonian names, with k string elements 56 | **U:** Target Unitary (n by n) if state_transfer = False. a vector (n by 1) if state_transfer = True 57 | **total_time:** Total Time (float) 58 | **Steps:** Number of time steps (int) 59 | **states_concerned_list:** Initial States (list of integers specifying the indices of those states) 60 | 61 | # Optional Arguments: 62 | **U0:** Initial Unitary (n by n), default is identity 63 | **convergence:** A dictionary (can be empty) that might include the following parameters with default values as shown: 64 | convergence = {'rate':0.01, 'update_step':100, 'max_iterations':5000, 65 | 'conv_target':1e-8,'learning_rate_decay':2500, 'min_grad': 1e-25} 66 | **Initial_guess:** A list of k elements, each of them is a steps size array, defining the initial pulses for all operators. If not provided, a default value of a gaussian random distribution will be used. 67 | **reg_coeffs:** A dictionary of regularization coeffecients 68 | **dressed_info :** A dictionary including the eigenvalues and eigenstates of dressed states 69 | **maxA:** a list of the maximum amplitudes of the control pulses (default value is 4) 70 | **use_gpu:** a boolean switching gpu and cpu, default is True 71 | **sparse_H, sparse_U, sparse_K:** booleans specifying whether (Hamiltonian, Unitary Operator, Unitary Evolution) is sparse. Speedup is expected if the corresponding sparsity is satisfied. (only available in CPU) 72 | **use_inter_vecs:** a boolean enable/disable the involvement of state evolution in graph building 73 | **draw:** a list including the indices and names for the states to include in drawing state occupation. Ex: states_draw_list = [0,1] 74 | states_draw_names = ['g00','g01','g10','g11','e00'] and draw = [states_draw_list,states_draw_names] 75 | default value is to draw states with indices 0-3 76 | **show_plots:** a boolean (default is True) toggling between progress bar and graphs 77 | **state_transfer:** a boolean (default is False) if True, targetting state transfer. If false, targetting unitary evolution. If True, the U is expected to be a vector, not a matrix. 78 | **method:** 'ADAM', 'BFGS', 'L-BFGS-B' or 'EVOLVE'. Defining the optimizer. Default is ADAM. EVOLVE only simulate the propagation without optimizing. 79 | **Unitary_error:** a float indicating the desired maximum error of the Taylor expansion of the exponential to choose a proper number of expansion terms, default is 1e-4 80 | **no_scaling**: a boolean (default is False)) to disable scaling and squaring 81 | **Taylor_terms**: a list [expansion terms, scaling and squaring terms], manually choose the Taylor terms for matrix exponentials. 82 | **freq_unit**: a string with default 'GHz'. Can be 'MHz', 'kHz' or 'Hz' 83 | **file_name**: file name for saving the simulation 84 | **save**: A boolean (default is True) to save the control ops, intermediate vectors, final unitary every update step 85 | **data_path**: path for saving the simulation 86 | 87 | # More examples: 88 | We applied the optimizer to generate photonic Schrodinger cat states for a circuit quantum electrodynamics system: 89 | ![photonic Schrodinger cat states](http://i.imgur.com/ponY2R9.png) 90 | 91 | 92 | # Questions 93 | If you have any questions, please reach either of the developers of the package: Nelson Leung (nelsonleung@uchicago.edu), Mohamed Abdelhafez (abdelhafez@uchicago.edu) or David Schuster (david.schuster@uchicago.edu) 94 | -------------------------------------------------------------------------------- /quantum_optimal_control/__init__.py: -------------------------------------------------------------------------------- 1 | #IMPORTS 2 | from core import * 3 | from helper_functions import * 4 | from main_grape import * -------------------------------------------------------------------------------- /quantum_optimal_control/core/__init__.py: -------------------------------------------------------------------------------- 1 | #IMPORTS 2 | from analysis import * 3 | from convergence import * 4 | from regularization_functions import * 5 | from system_parameters import * 6 | from tensorflow_state import * 7 | from run_session import * -------------------------------------------------------------------------------- /quantum_optimal_control/core/analysis.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from quantum_optimal_control.helper_functions.grape_functions import sort_ev,get_state_index 3 | import os 4 | import tensorflow as tf 5 | 6 | from quantum_optimal_control.helper_functions.data_management import H5File 7 | 8 | class Analysis: 9 | 10 | def __init__(self, sys_para,tf_final_state, tf_ops_weight, tf_unitary_scale, tf_inter_vecs): 11 | self.sys_para = sys_para 12 | self.tf_final_state = tf_final_state 13 | self.tf_ops_weight = tf_ops_weight 14 | self.tf_unitary_scale = tf_unitary_scale 15 | self.tf_inter_vecs = tf_inter_vecs 16 | self.this_dir = os.path.dirname(__file__) 17 | 18 | def RtoCMat(self,M): 19 | # real to complex matrix isomorphism 20 | state_num = self.sys_para.state_num 21 | M_real = M[:state_num,:state_num] 22 | M_imag = M[state_num:2*state_num,:state_num] 23 | 24 | return (M_real+1j*M_imag) 25 | 26 | def get_final_state(self,save=True): 27 | # get final evolved unitary state 28 | M = self.tf_final_state.eval() 29 | CMat = self.RtoCMat(M) 30 | 31 | if self.sys_para.save and save: 32 | with H5File(self.sys_para.file_path) as hf: 33 | hf.append('final_state',np.array(M)) 34 | 35 | return CMat 36 | 37 | def get_ops_weight(self): 38 | # get control field 39 | ops_weight = self.tf_ops_weight.eval() 40 | 41 | return ops_weight 42 | 43 | 44 | def get_inter_vecs(self): 45 | # get propagated states at each time step 46 | if not self.sys_para.use_inter_vecs: 47 | return None 48 | 49 | state_num = self.sys_para.state_num 50 | inter_vecs_mag_squared = [] 51 | 52 | inter_vecs_real = [] 53 | inter_vecs_imag = [] 54 | 55 | if self.sys_para.is_dressed: 56 | v_sorted=sort_ev(self.sys_para.v_c,self.sys_para.dressed_id) 57 | 58 | ii=0 59 | 60 | inter_vecs = tf.stack(self.tf_inter_vecs).eval() 61 | 62 | if self.sys_para.save: 63 | with H5File(self.sys_para.file_path) as hf: 64 | hf.append('inter_vecs_raw_real',np.array(inter_vecs[:,0:state_num,:])) 65 | hf.append('inter_vecs_raw_imag',np.array(inter_vecs[:,state_num:2*state_num,:])) 66 | 67 | for inter_vec in inter_vecs: 68 | inter_vec_real = (inter_vec[0:state_num,:]) 69 | inter_vec_imag = (inter_vec[state_num:2*state_num,:]) 70 | inter_vec_c = inter_vec_real+1j*inter_vec_imag 71 | 72 | if self.sys_para.is_dressed: 73 | 74 | dressed_vec_c= np.dot(np.transpose(v_sorted),inter_vec_c) 75 | 76 | inter_vec_mag_squared = np.square(np.abs(dressed_vec_c)) 77 | 78 | inter_vec_real = np.real(dressed_vec_c) 79 | inter_vec_imag = np.imag(dressed_vec_c) 80 | 81 | else: 82 | inter_vec_mag_squared = np.square(np.abs(inter_vec_c)) 83 | 84 | inter_vec_real = np.real(inter_vec_c) 85 | inter_vec_imag = np.imag(inter_vec_c) 86 | 87 | 88 | inter_vecs_mag_squared.append(inter_vec_mag_squared) 89 | 90 | inter_vecs_real.append(inter_vec_real) 91 | inter_vecs_imag.append(inter_vec_imag) 92 | 93 | ii+=1 94 | 95 | if self.sys_para.save: 96 | with H5File(self.sys_para.file_path) as hf: 97 | hf.append('inter_vecs_mag_squared',np.array(inter_vecs_mag_squared)) 98 | hf.append('inter_vecs_real',np.array(inter_vecs_real)) 99 | hf.append('inter_vecs_imag',np.array(inter_vecs_imag)) 100 | 101 | return inter_vecs_mag_squared 102 | -------------------------------------------------------------------------------- /quantum_optimal_control/core/convergence.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import matplotlib.gridspec as gridspec 5 | from IPython import display 6 | from quantum_optimal_control.helper_functions.grape_functions import sort_ev 7 | 8 | 9 | class Convergence: 10 | 11 | def __init__(self,sys_para,time_unit,convergence): 12 | # paramters 13 | self.sys_para = sys_para 14 | self.time_unit = time_unit 15 | 16 | if 'rate' in convergence: 17 | self.rate = convergence['rate'] 18 | else: 19 | self.rate = 0.01 20 | 21 | if 'update_step' in convergence: 22 | self.update_step = convergence['update_step'] 23 | else: 24 | self.update_step = 100 25 | 26 | if 'evol_save_step' in convergence: 27 | self.evol_save_step = convergence['evol_save_step'] 28 | else: 29 | self.evol_save_step = 100 30 | 31 | if 'conv_target' in convergence: 32 | self.conv_target = convergence['conv_target'] 33 | else: 34 | self.conv_target = 1e-8 35 | 36 | if 'max_iterations' in convergence: 37 | self.max_iterations = convergence['max_iterations'] 38 | else: 39 | self.max_iterations = 5000 40 | 41 | if 'learning_rate_decay' in convergence: 42 | self.learning_rate_decay = convergence['learning_rate_decay'] 43 | else: 44 | self.learning_rate_decay = 2500 45 | 46 | if 'min_grad' in convergence: 47 | self.min_grad = convergence['min_grad'] 48 | else: 49 | self.min_grad = 1e-25 50 | 51 | 52 | self.reset_convergence() 53 | if self.sys_para.show_plots: 54 | plt.figure() 55 | 56 | def reset_convergence(self): 57 | self.costs=[] 58 | self.reg_costs = [] 59 | self.iterations=[] 60 | self.learning_rate=[] 61 | self.last_iter = 0 62 | self.accumulate_rate = 1.00 63 | 64 | def save_evol(self,anly): 65 | if self.sys_para.state_transfer == False: 66 | self.final_state = anly.get_final_state() 67 | 68 | self.inter_vecs = anly.get_inter_vecs() 69 | 70 | def update_plot_summary(self,last_cost, last_reg_cost, anly): 71 | self.concerned = self.sys_para.states_concerned_list 72 | self.last_cost = last_cost 73 | self.last_reg_cost = last_reg_cost 74 | 75 | self.anly = anly 76 | self.save_evol(anly) 77 | self.plot_summary() 78 | 79 | def get_convergence(self): 80 | self.costs.append(self.last_cost) 81 | self.reg_costs.append(self.last_reg_cost) 82 | self.iterations.append(self.last_iter) 83 | self.last_iter+=self.update_step 84 | 85 | 86 | def plot_inter_vecs_general(self,pop_inter_vecs,start): 87 | # plot state evolution 88 | if self.sys_para.draw_list !=[]: 89 | for kk in range(len(self.sys_para.draw_list)): 90 | plt.plot(np.array([self.sys_para.dt* ii for ii in range(self.sys_para.steps+1)]),np.array(pop_inter_vecs[self.sys_para.draw_list[kk],:]),label=self.sys_para.draw_names[kk]) 91 | 92 | 93 | else: 94 | 95 | if start > 4: 96 | plt.plot(np.array([self.sys_para.dt* ii for ii in range(self.sys_para.steps+1)]),np.array(pop_inter_vecs[start,:]),label='Starting level '+str(start)) 97 | 98 | for jj in range(4): 99 | 100 | plt.plot(np.array([self.sys_para.dt* ii for ii in range(self.sys_para.steps+1)]),np.array(pop_inter_vecs[jj,:]),label='level '+str(jj)) 101 | 102 | 103 | forbidden =np.zeros(self.sys_para.steps+1) 104 | if 'states_forbidden_list' in self.sys_para.reg_coeffs: 105 | # summing all population of forbidden states 106 | for forbid in self.sys_para.reg_coeffs['states_forbidden_list']: 107 | if self.sys_para.dressed_info is None or ('forbid_dressed' in self.sys_para.reg_coeffs and self.sys_para.reg_coeffs['forbid_dressed']) : 108 | forbidden = forbidden +np.array(pop_inter_vecs[forbid,:]) 109 | else: 110 | v_sorted=sort_ev(self.sys_para.v_c,self.sys_para.dressed_id) 111 | dressed_vec= np.dot(v_sorted,np.sqrt(pop_inter_vecs)) 112 | forbidden = forbidden +np.array(np.square(np.abs(dressed_vec[forbid,:]))) 113 | 114 | plt.plot(np.array([self.sys_para.dt* ii for ii in range(self.sys_para.steps+1)]), forbidden,label='forbidden',linestyle='--',linewidth=4) 115 | 116 | plt.ylabel('Population') 117 | plt.ylim(-0.1,1.1) 118 | plt.xlabel('Time ('+ self.time_unit+')') 119 | plt.legend(ncol=7) 120 | 121 | def plot_summary(self): 122 | # plotting data 123 | 124 | if not self.last_iter == 0: 125 | self.runtime = time.time() - self.start_time 126 | self.estimated_runtime = float(self.runtime * (self.max_iterations-self.last_iter) / self.last_iter)/(60*60) 127 | else: 128 | self.start_time = time.time() 129 | self.runtime = 0 130 | self.estimated_runtime = 0 131 | 132 | 133 | self.get_convergence() 134 | i1=0 135 | i2=0 136 | 137 | if self.sys_para.state_transfer: 138 | i2 = i2-1 139 | 140 | gs = gridspec.GridSpec(3+i1+i2+len(self.concerned), 2) 141 | 142 | index = 0 143 | ## cost 144 | if self.sys_para.show_plots == True: 145 | 146 | 147 | 148 | plt.subplot(gs[index, :],title='Error = %1.2e; Other errors = %1.2e; Unitary Metric: %.5f; Runtime: %.1fs; Estimated Remaining Runtime: %.1fh' % (self.last_cost, self.last_reg_cost-self.last_cost, 149 | self.anly.tf_unitary_scale.eval(), 150 | 151 | self.runtime, 152 | self.estimated_runtime)) 153 | 154 | index +=1 155 | plt.plot(np.array(self.iterations),np.array(self.costs),'bx-',label='Fidelity Error') 156 | plt.plot(np.array(self.iterations),np.array(self.reg_costs),'go-',label='All Penalties') 157 | plt.ylabel('Error') 158 | plt.xlabel('Iteration') 159 | try: 160 | plt.yscale('log') 161 | except: 162 | plt.yscale('linear') 163 | 164 | plt.legend() 165 | else: 166 | print "Error = %.9f"%self.last_cost 167 | ## unitary evolution 168 | if not self.sys_para.state_transfer: 169 | M = self.final_state 170 | plt.subplot(gs[index, 0],title="operator: real") 171 | plt.imshow(M.real,interpolation='none') 172 | plt.clim(-1,1) 173 | plt.colorbar() 174 | plt.subplot(gs[index, 1],title="operator: imaginary") 175 | plt.imshow(M.imag,interpolation='none') 176 | plt.clim(-1,1) 177 | plt.colorbar() 178 | index +=1 179 | 180 | ## operators 181 | plt.subplot(gs[index, :],title="Simulation Weights") 182 | ops_weight = self.anly.get_ops_weight() 183 | 184 | for jj in range (self.sys_para.ops_len): 185 | 186 | plt.plot(np.array([self.sys_para.dt* ii for ii in range(self.sys_para.steps)]),np.array(self.sys_para.ops_max_amp[jj]*ops_weight[jj,:]),label='u'+self.sys_para.Hnames[jj]) 187 | 188 | ## Control Fields 189 | plt.title('Optimized pulse') 190 | 191 | plt.ylabel('Amplitude') 192 | plt.xlabel('Time ('+ self.time_unit+')') 193 | plt.legend() 194 | 195 | index+=1 196 | 197 | ## state evolution 198 | 199 | if self.sys_para.use_inter_vecs: 200 | inter_vecs = self.inter_vecs 201 | 202 | inter_vecs_array = np.array(inter_vecs) 203 | 204 | for ii in range(len(self.concerned)): 205 | plt.subplot(gs[index+ii, :],title="Evolution") 206 | 207 | pop_inter_vecs = inter_vecs[ii] 208 | self.plot_inter_vecs_general(pop_inter_vecs,self.concerned[ii]) 209 | 210 | 211 | 212 | fig = plt.gcf() 213 | if self.sys_para.state_transfer: 214 | plots = 2 215 | else: 216 | plots = 3 217 | 218 | 219 | fig.set_size_inches(15, int (plots+len(self.concerned)*18)) 220 | 221 | display.display(plt.gcf()) 222 | display.clear_output(wait=True) 223 | -------------------------------------------------------------------------------- /quantum_optimal_control/core/regularization_functions.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import math 4 | 5 | from quantum_optimal_control.helper_functions.grape_functions import c_to_r_mat, sort_ev 6 | 7 | def get_reg_loss(tfs): 8 | 9 | # Regulizer 10 | with tf.name_scope('reg_errors'): 11 | 12 | reg_loss = tfs.loss 13 | 14 | # amplitude 15 | if 'amplitude' in tfs.sys_para.reg_coeffs: 16 | amp_reg_alpha_coeff = tfs.sys_para.reg_coeffs['amplitude'] 17 | amp_reg_alpha = amp_reg_alpha_coeff / float(tfs.sys_para.steps) 18 | reg_loss = reg_loss + amp_reg_alpha * tf.nn.l2_loss(tfs.ops_weight) 19 | 20 | # gaussian envelope 21 | if 'envelope' in tfs.sys_para.reg_coeffs: 22 | reg_alpha_coeff = tfs.sys_para.reg_coeffs['envelope'] 23 | reg_alpha = reg_alpha_coeff / float(tfs.sys_para.steps) 24 | reg_loss = reg_loss + reg_alpha * tf.nn.l2_loss( 25 | tf.multiply(tfs.tf_one_minus_gaussian_envelope, tfs.ops_weight)) 26 | 27 | # Limiting the dwdt of control pulse 28 | if 'dwdt' in tfs.sys_para.reg_coeffs: 29 | zeros_for_training = tf.zeros([tfs.sys_para.ops_len, 2]) 30 | new_weights = tf.concat([tfs.ops_weight, zeros_for_training],1) 31 | new_weights = tf.concat([zeros_for_training, new_weights],1) 32 | dwdt_reg_alpha_coeff = tfs.sys_para.reg_coeffs['dwdt'] 33 | dwdt_reg_alpha = dwdt_reg_alpha_coeff / float(tfs.sys_para.steps) 34 | reg_loss = reg_loss + dwdt_reg_alpha * tf.nn.l2_loss( 35 | (new_weights[:, 1:] - new_weights[:, :tfs.sys_para.steps + 3]) / tfs.sys_para.dt) 36 | 37 | # Limiting the d2wdt2 of control pulse 38 | if 'd2wdt2' in tfs.sys_para.reg_coeffs: 39 | d2wdt2_reg_alpha_coeff = tfs.sys_para.reg_coeffs['d2wdt2'] 40 | d2wdt2_reg_alpha = d2wdt2_reg_alpha_coeff / float(tfs.sys_para.steps) 41 | reg_loss = reg_loss + d2wdt2_reg_alpha * tf.nn.l2_loss((new_weights[:, 2:] - \ 42 | 2 * new_weights[:, 43 | 1:tfs.sys_para.steps + 3] + new_weights[:, 44 | :tfs.sys_para.steps + 2]) / ( 45 | tfs.sys_para.dt ** 2)) 46 | # bandpass filter on the control 47 | if 'bandpass' in tfs.sys_para.reg_coeffs: 48 | ## currently does not support bandpass reg for CPU (no CPU kernel for FFT) 49 | if not tfs.sys_para.use_gpu: 50 | raise ValueError('currently does not support bandpass reg for CPU (no CPU kernel for FFT)') 51 | 52 | bandpass_reg_alpha_coeff = tfs.sys_para.reg_coeffs['bandpass'] 53 | bandpass_reg_alpha = bandpass_reg_alpha_coeff/ float(tfs.sys_para.steps) 54 | 55 | tf_u = tf.cast(tfs.ops_weight,dtype=tf.complex64) 56 | 57 | tf_fft = tf.complex_abs(tf.fft(tf_u)) 58 | 59 | band = np.array(tfs.sys_para.reg_coeffs['band']) 60 | 61 | band_id = (band*tfs.sys_para.total_time).astype(int) 62 | half_id = int(tfs.sys_para.steps/2) 63 | 64 | 65 | fft_loss = bandpass_reg_alpha*(tf.reduce_sum(tf_fft[:,0:band_id[0]]) + tf.reduce_sum(tf_fft[:,band_id[1]:half_id])) 66 | 67 | reg_loss = reg_loss + fft_loss 68 | 69 | 70 | # Limiting the access to forbidden states 71 | if 'forbidden_coeff_list' in tfs.sys_para.reg_coeffs: 72 | 73 | if tfs.sys_para.is_dressed: 74 | v_sorted = tf.constant(c_to_r_mat(np.reshape(sort_ev(tfs.sys_para.v_c, tfs.sys_para.dressed_id), 75 | [len(tfs.sys_para.dressed_id), len(tfs.sys_para.dressed_id)])), 76 | dtype=tf.float32) 77 | 78 | for inter_vec in tfs.inter_vecs: 79 | if tfs.sys_para.is_dressed and ('forbid_dressed' in tfs.sys_para.reg_coeffs and tfs.sys_para.reg_coeffs['forbid_dressed']): 80 | inter_vec = tf.matmul(tf.transpose(v_sorted), inter_vec) 81 | for inter_reg_alpha_coeff, state in zip(tfs.sys_para.reg_coeffs['forbidden_coeff_list'],tfs.sys_para.reg_coeffs['states_forbidden_list']): 82 | inter_reg_alpha = inter_reg_alpha_coeff / float(tfs.sys_para.steps) 83 | forbidden_state_pop = tf.square(inter_vec[state, :]) + \ 84 | tf.square(inter_vec[tfs.sys_para.state_num + state, :]) 85 | reg_loss = reg_loss + inter_reg_alpha * tf.nn.l2_loss(forbidden_state_pop) 86 | 87 | # Speeding up the gate time 88 | if 'speed_up' in tfs.sys_para.reg_coeffs: 89 | speed_up_reg_alpha_coeff = tfs.sys_para.reg_coeffs['speed_up'] 90 | speed_up_reg_alpha = speed_up_reg_alpha_coeff / float(tfs.sys_para.steps) 91 | 92 | target_vecs_all_timestep = tf.tile(tf.reshape(tfs.target_vecs,[2*tfs.sys_para.state_num,1,len(tfs.inter_vecs)]) , [1,tfs.sys_para.steps+1,1]) 93 | 94 | target_vecs_inner_product = tfs.get_inner_product_3D(tfs.inter_vecs_packed,target_vecs_all_timestep) 95 | reg_loss = reg_loss + speed_up_reg_alpha * tf.nn.l2_loss(tfs.sys_para.steps+1 - target_vecs_inner_product) 96 | 97 | return reg_loss 98 | 99 | -------------------------------------------------------------------------------- /quantum_optimal_control/core/run_session.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from analysis import Analysis 4 | import os 5 | import time 6 | from scipy.optimize import minimize 7 | 8 | from quantum_optimal_control.helper_functions.data_management import H5File 9 | 10 | 11 | class run_session: 12 | def __init__(self, tfs,graph,conv,sys_para,method,show_plots=True,single_simulation = False,use_gpu =True): 13 | self.tfs=tfs 14 | self.graph = graph 15 | self.conv = conv 16 | self.sys_para = sys_para 17 | self.update_step = conv.update_step 18 | self.iterations = 0 19 | self.method = method.upper() 20 | self.show_plots = show_plots 21 | self.target = False 22 | if not use_gpu: 23 | config = tf.ConfigProto(device_count = {'GPU': 0}) 24 | else: 25 | config = None 26 | 27 | with tf.Session(graph=graph, config = config) as self.session: 28 | 29 | tf.global_variables_initializer().run() 30 | 31 | print "Initialized" 32 | 33 | if self.method == 'EVOLVE': 34 | self.start_time = time.time() 35 | x0 = self.sys_para.ops_weight_base 36 | self.l,self.rl,self.grads,self.metric,self.g_squared=self.get_error(x0) 37 | self.get_end_results() 38 | 39 | else: 40 | if self.method != 'ADAM': #Any BFGS scheme 41 | self.bfgs_optimize(method=self.method) 42 | 43 | if self.method =='ADAM': 44 | self.start_adam_optimizer() 45 | 46 | 47 | def start_adam_optimizer(self): 48 | # adam optimizer 49 | self.start_time = time.time() 50 | self.end = False 51 | while True: 52 | 53 | self.g_squared, self.l, self.rl, self.metric = self.session.run( 54 | [self.tfs.grad_squared, self.tfs.loss, self.tfs.reg_loss, self.tfs.unitary_scale]) 55 | 56 | if (self.l < self.conv.conv_target) or (self.g_squared < self.conv.min_grad) \ 57 | or (self.iterations >= self.conv.max_iterations): 58 | self.end = True 59 | 60 | self.update_and_save() 61 | 62 | if self.end: 63 | self.get_end_results() 64 | break 65 | 66 | learning_rate = float(self.conv.rate) * np.exp(-float(self.iterations) / self.conv.learning_rate_decay) 67 | self.feed_dict = {self.tfs.learning_rate: learning_rate} 68 | 69 | _ = self.session.run([self.tfs.optimizer], feed_dict=self.feed_dict) 70 | 71 | 72 | 73 | 74 | 75 | def update_and_save(self): 76 | 77 | if not self.end: 78 | 79 | if (self.iterations % self.conv.update_step == 0): 80 | self.anly = Analysis(self.sys_para, self.tfs.final_state, self.tfs.ops_weight, self.tfs.unitary_scale, 81 | self.tfs.inter_vecs) 82 | self.save_data() 83 | self.display() 84 | if (self.iterations % self.conv.evol_save_step == 0): 85 | if not (self.sys_para.show_plots == True and (self.iterations % self.conv.update_step == 0)): 86 | self.anly = Analysis(self.sys_para, self.tfs.final_state, self.tfs.ops_weight, self.tfs.unitary_scale, 87 | self.tfs.inter_vecs) 88 | if not (self.iterations % self.conv.update_step == 0): 89 | self.save_data() 90 | self.conv.save_evol(self.anly) 91 | 92 | self.iterations += 1 93 | 94 | def get_end_results(self): 95 | # get optimized pulse and propagation 96 | 97 | # get and save inter vects 98 | 99 | self.anly = Analysis(self.sys_para, self.tfs.final_state, self.tfs.ops_weight, self.tfs.unitary_scale, 100 | self.tfs.inter_vecs) 101 | self.save_data() 102 | self.display() 103 | if not self.show_plots: 104 | self.conv.save_evol(self.anly) 105 | 106 | self.uks = self.Get_uks() 107 | if not self.sys_para.state_transfer: 108 | self.Uf = self.anly.get_final_state() 109 | else: 110 | self.Uf = [] 111 | 112 | def Get_uks(self): 113 | # to get the pulse amplitudes 114 | uks = self.anly.get_ops_weight() 115 | for ii in range (len(uks)): 116 | uks[ii] = self.sys_para.ops_max_amp[ii]*uks[ii] 117 | return uks 118 | 119 | def get_error(self,uks): 120 | #get error and gradient for scipy bfgs: 121 | self.session.run(self.tfs.ops_weight_base.assign(uks)) 122 | 123 | g,l,rl,metric,g_squared = self.session.run([self.tfs.grad_pack, self.tfs.loss, self.tfs.reg_loss, self.tfs.unitary_scale, self.tfs.grad_squared]) 124 | 125 | final_g = np.transpose(np.reshape(g,(len(self.sys_para.ops_c)*self.sys_para.steps))) 126 | 127 | return l,rl,final_g,metric, g_squared 128 | 129 | def save_data(self): 130 | if self.sys_para.save: 131 | self.elapsed = time.time() - self.start_time 132 | with H5File(self.sys_para.file_path) as hf: 133 | hf.append('error', np.array(self.l)) 134 | hf.append('reg_error', np.array(self.rl)) 135 | hf.append('uks', np.array(self.Get_uks())) 136 | hf.append('iteration', np.array(self.iterations)) 137 | hf.append('run_time', np.array(self.elapsed)) 138 | hf.append('unitary_scale', np.array(self.metric)) 139 | 140 | 141 | def display(self): 142 | # display of simulation results 143 | 144 | if self.show_plots: 145 | self.conv.update_plot_summary(self.l, self.rl, self.anly) 146 | else: 147 | print 'Error = :%1.2e; Runtime: %.1fs; Iterations = %d, grads = %10.3e, unitary_metric = %.5f' % ( 148 | self.l, self.elapsed, self.iterations, self.g_squared, self.metric) 149 | 150 | 151 | def minimize_opt_fun(self,x): 152 | # minimization function called by scipy in each iteration 153 | self.l,self.rl,self.grads,self.metric,self.g_squared=self.get_error(np.reshape(x,(len(self.sys_para.ops_c),len(x)/len(self.sys_para.ops_c)))) 154 | 155 | if self.l 1.0: 45 | raise ValueError('Initial guess has strength > max_amp for op %d' % (ii) ) 46 | self.u0_base = np.arcsin(self.u0_base) #because we take the sin of weights later 47 | 48 | 49 | 50 | 51 | else: 52 | self.u0 =[] 53 | self.states_concerned_list = states_concerned_list 54 | 55 | self.is_dressed = False 56 | self.U0_c = U0 57 | self.initial_unitary = c_to_r_mat(U0) #CtoRMat is converting complex matrices to their equivalent real (double the size) matrices 58 | if self.state_transfer == False: 59 | self.target_unitary = c_to_r_mat(U) 60 | else: 61 | self.target_vectors=[] 62 | 63 | for target_vector_c in U: 64 | self.target_vector = c_to_r_vec(target_vector_c) 65 | self.target_vectors.append(self.target_vector) 66 | 67 | if draw is not None: 68 | self.draw_list = draw[0] 69 | self.draw_names = draw[1] 70 | else: 71 | self.draw_list = [] 72 | self.draw_names = [] 73 | 74 | 75 | if dressed_info !=None: 76 | self.v_c = dressed_info['eigenvectors'] 77 | self.dressed_id = dressed_info['dressed_id'] 78 | self.w_c = dressed_info['eigenvalues'] 79 | self.is_dressed = dressed_info['is_dressed'] 80 | self.H0_diag=np.diag(self.w_c) 81 | 82 | self.init_system() 83 | self.init_vectors() 84 | self.init_operators() 85 | self.init_one_minus_gaussian_envelope() 86 | self.init_guess() 87 | 88 | def approx_expm(self,M,exp_t, scaling_terms): 89 | #approximate the exp at the beginning to estimate the number of taylor terms and scaling and squaring needed 90 | U=np.identity(len(M),dtype=M.dtype) 91 | Mt=np.identity(len(M),dtype=M.dtype) 92 | factorial=1.0 #for factorials 93 | 94 | for ii in xrange(1,exp_t): 95 | factorial*=ii 96 | Mt=np.dot(Mt,M) 97 | U+=Mt/((2.**float(ii*scaling_terms))*factorial) #scaling by 2**scaling_terms 98 | 99 | 100 | for ii in xrange(scaling_terms): 101 | U=np.dot(U,U) #squaring scaling times 102 | 103 | return U 104 | 105 | def approx_exp(self,M,exp_t, scaling_terms): 106 | # the scaling and squaring of matrix exponential with taylor expansions 107 | U=1.0 108 | Mt=1.0 109 | factorial=1.0 #for factorials 110 | 111 | for ii in xrange(1,exp_t): 112 | factorial*=ii 113 | Mt=M*Mt 114 | U+=Mt/((2.**float(ii*scaling_terms))*factorial) #scaling by 2**scaling_terms 115 | 116 | 117 | for ii in xrange(scaling_terms): 118 | U=np.dot(U,U) #squaring scaling times 119 | 120 | return U 121 | 122 | def Choose_exp_terms(self, d): 123 | #given our hamiltonians and a number of scaling/squaring, we determine the number of Taylor terms 124 | 125 | 126 | exp_t = 20 #maximum 127 | 128 | H=self.H0_c 129 | U_f = self.U0_c 130 | for ii in range (len(self.ops_c)): 131 | H = H + self.ops_max_amp[ii]*self.ops_c[ii] 132 | if d == 0: 133 | self.scaling = max(int(2*np.log2(np.max(np.abs(-(0+1j) * self.dt*H)))),0) 134 | 135 | else: 136 | self.scaling += d 137 | 138 | if self.state_transfer or self.no_scaling: 139 | self.scaling =0 140 | while True: 141 | 142 | if len(self.H0_c) < 10: 143 | for ii in range (self.steps): 144 | U_f = np.dot(U_f,self.approx_expm((0-1j)*self.dt*H, exp_t, self.scaling)) 145 | Metric = np.abs(np.trace(np.dot(np.conjugate(np.transpose(U_f)), U_f)))/(self.state_num) 146 | else: 147 | max_term = np.max(np.abs(-(0+1j) * self.dt*H)) 148 | 149 | Metric = 1 + self.steps *np.abs((self.approx_exp(max_term, exp_t, self.scaling) - np.exp(max_term))/np.exp(max_term)) 150 | 151 | if exp_t == 3: 152 | break 153 | if np.abs(Metric - 1.0) < self.Unitary_error: 154 | exp_t = exp_t-1 155 | else: 156 | break 157 | 158 | return exp_t 159 | 160 | 161 | 162 | 163 | def init_system(self): 164 | self.dt = float(self.total_time)/self.steps 165 | self.state_num= len(self.H0_c) 166 | 167 | 168 | def init_vectors(self): 169 | # initialized vectors used for propagation 170 | self.initial_vectors=[] 171 | self.initial_vectors_c=[] 172 | 173 | for state in self.states_concerned_list: 174 | 175 | if self.state_transfer: 176 | self.initial_vector_c = np.array(state) 177 | else: 178 | if self.is_dressed: 179 | self.initial_vector_c= self.v_c[:,get_state_index(state,self.dressed_id)] 180 | else: 181 | self.initial_vector_c=np.zeros(self.state_num) 182 | self.initial_vector_c[state]=1 183 | 184 | self.initial_vectors_c.append(self.initial_vector_c) 185 | self.initial_vector = c_to_r_vec(self.initial_vector_c) 186 | 187 | self.initial_vectors.append(self.initial_vector) 188 | 189 | if self.save: 190 | with H5File(self.file_path) as hf: 191 | hf.add('initial_vectors_c',data=np.array(self.initial_vectors_c)) 192 | 193 | 194 | def init_operators(self): 195 | # Create operator matrix in numpy array 196 | 197 | self.ops=[] 198 | for op_c in self.ops_c: 199 | op = c_to_r_mat(-1j*self.dt*op_c) 200 | self.ops.append(op) 201 | 202 | self.ops_len = len(self.ops) 203 | 204 | self.H0 = c_to_r_mat(-1j*self.dt*self.H0_c) 205 | self.identity_c = np.identity(self.state_num) 206 | self.identity = c_to_r_mat(self.identity_c) 207 | 208 | if self.Taylor_terms is None: 209 | self.exps =[] 210 | self.scalings = [] 211 | if self.state_transfer or self.no_scaling: 212 | comparisons = 1 213 | else: 214 | comparisons = 6 215 | d = 0 216 | while comparisons >0: 217 | 218 | self.exp_terms = self.Choose_exp_terms(d) 219 | self.exps.append(self.exp_terms) 220 | self.scalings.append(self.scaling) 221 | comparisons = comparisons -1 222 | d = d+1 223 | self.complexities = np.add(self.exps,self.scalings) 224 | a = np.argmin(self.complexities) 225 | 226 | self.exp_terms = self.exps[a] 227 | self.scaling = self.scalings[a] 228 | else: 229 | self.exp_terms = self.Taylor_terms[0] 230 | self.scaling = self.Taylor_terms[1] 231 | 232 | 233 | if self.save: 234 | with H5File(self.file_path) as hf: 235 | hf.add('taylor_terms',data=self.exp_terms) 236 | hf.add('taylor_scaling',data=self.scaling) 237 | 238 | print "Using "+ str(self.exp_terms) + " Taylor terms and "+ str(self.scaling)+" Scaling & Squaring terms" 239 | 240 | i_array = np.eye(2*self.state_num) 241 | op_matrix_I=i_array.tolist() 242 | 243 | self.H_ops = [] 244 | for op in self.ops: 245 | self.H_ops.append(op) 246 | self.matrix_list = [self.H0] 247 | for ii in range(self.ops_len): 248 | self.matrix_list = self.matrix_list + [self.H_ops[ii]] 249 | self.matrix_list = self.matrix_list + [op_matrix_I] 250 | 251 | self.matrix_list = np.array(self.matrix_list) 252 | 253 | def init_one_minus_gaussian_envelope(self): 254 | # Generating the Gaussian envelope that pulses should obey 255 | one_minus_gauss = [] 256 | offset = 0.0 257 | overall_offset = 0.01 258 | opsnum=self.ops_len 259 | for ii in range(opsnum): 260 | constraint_shape = np.ones(self.steps)- self.gaussian(np.linspace(-2,2,self.steps)) - offset 261 | constraint_shape = constraint_shape * (constraint_shape>0) 262 | constraint_shape = constraint_shape + overall_offset* np.ones(self.steps) 263 | one_minus_gauss.append(constraint_shape) 264 | 265 | 266 | self.one_minus_gauss = np.array(one_minus_gauss) 267 | 268 | 269 | def gaussian(self,x, mu = 0. , sig = 1. ): 270 | return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) 271 | 272 | def init_guess(self): 273 | # initail guess for control field 274 | if self.u0 != []: 275 | 276 | self.ops_weight_base = np.reshape(self.u0_base, [self.ops_len,self.steps]) 277 | else: 278 | initial_mean = 0 279 | index = 0 280 | 281 | initial_stddev = (1./np.sqrt(self.steps)) 282 | self.ops_weight_base = np.random.normal(initial_mean, initial_stddev, [self.ops_len ,self.steps]) 283 | 284 | self.raw_shape = np.shape(self.ops_weight_base) 285 | 286 | 287 | -------------------------------------------------------------------------------- /quantum_optimal_control/core/tensorflow_state.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | import math 6 | from quantum_optimal_control.helper_functions.grape_functions import c_to_r_mat, sort_ev 7 | from regularization_functions import get_reg_loss 8 | from tensorflow.python.framework import function 9 | from tensorflow.python.framework import ops 10 | 11 | class TensorflowState: 12 | 13 | def __init__(self,sys_para): 14 | 15 | self.sys_para = sys_para 16 | 17 | 18 | def init_defined_functions(self): 19 | # define propagation functions used for evolution 20 | input_num = len(self.sys_para.Hnames) +1 21 | taylor_terms = self.sys_para.exp_terms 22 | scaling = self.sys_para.scaling 23 | 24 | 25 | def get_matexp(uks,H_all): 26 | # matrix exponential 27 | I = H_all[input_num] 28 | matexp = I 29 | uks_Hk_list = [] 30 | for ii in range(input_num): 31 | uks_Hk_list.append((uks[ii]/(2.**scaling))*H_all[ii]) 32 | 33 | H = tf.add_n(uks_Hk_list) 34 | H_n = H 35 | factorial = 1. 36 | 37 | for ii in range(1,taylor_terms+1): 38 | factorial = factorial * ii 39 | matexp = matexp + H_n/factorial 40 | if not ii == (taylor_terms): 41 | H_n = tf.matmul(H,H_n,a_is_sparse=self.sys_para.sparse_H,b_is_sparse=self.sys_para.sparse_U) 42 | 43 | for ii in range(scaling): 44 | matexp = tf.matmul(matexp,matexp,a_is_sparse=self.sys_para.sparse_U,b_is_sparse=self.sys_para.sparse_U) 45 | 46 | return matexp 47 | 48 | 49 | @function.Defun(tf.float32,tf.float32,tf.float32) 50 | def matexp_op_grad(uks,H_all, grad): 51 | # gradient of matrix exponential 52 | coeff_grad = [] 53 | 54 | coeff_grad.append(tf.constant(0,dtype=tf.float32)) 55 | 56 | 57 | ### get output of the function 58 | matexp = get_matexp(uks,H_all) 59 | ### 60 | 61 | for ii in range(1,input_num): 62 | coeff_grad.append(tf.reduce_sum(tf.multiply(grad, 63 | tf.matmul(H_all[ii],matexp,a_is_sparse=self.sys_para.sparse_H,b_is_sparse=self.sys_para.sparse_U)))) 64 | 65 | return [tf.stack(coeff_grad), tf.zeros(tf.shape(H_all),dtype=tf.float32)] 66 | 67 | global matexp_op 68 | 69 | 70 | @function.Defun(tf.float32,tf.float32, grad_func=matexp_op_grad) 71 | def matexp_op(uks,H_all): 72 | # matrix exponential defun operator 73 | matexp = get_matexp(uks,H_all) 74 | 75 | return matexp 76 | 77 | def get_matvecexp(uks,H_all,psi): 78 | # matrix vector exponential 79 | I = H_all[input_num] 80 | matvecexp = psi 81 | 82 | uks_Hk_list = [] 83 | 84 | for ii in range(input_num): 85 | uks_Hk_list.append(uks[ii]*H_all[ii]) 86 | 87 | H = tf.add_n(uks_Hk_list) 88 | 89 | psi_n = psi 90 | factorial = 1. 91 | 92 | for ii in range(1,taylor_terms): 93 | factorial = factorial * ii 94 | psi_n = tf.matmul(H,psi_n,a_is_sparse=self.sys_para.sparse_H,b_is_sparse=self.sys_para.sparse_K) 95 | matvecexp = matvecexp + psi_n/factorial 96 | 97 | return matvecexp 98 | 99 | 100 | @function.Defun(tf.float32,tf.float32,tf.float32,tf.float32) 101 | def matvecexp_op_grad(uks,H_all,psi, grad): 102 | # graident of matrix vector exponential 103 | coeff_grad = [] 104 | 105 | coeff_grad.append(tf.constant(0,dtype=tf.float32)) 106 | 107 | ### get output of the function 108 | matvecexp = get_matvecexp(uks,H_all,psi) 109 | ##### 110 | 111 | 112 | for ii in range(1,input_num): 113 | coeff_grad.append(tf.reduce_sum(tf.multiply(grad, 114 | tf.matmul(H_all[ii],matvecexp,a_is_sparse=self.sys_para.sparse_H,b_is_sparse=self.sys_para.sparse_K)))) 115 | 116 | 117 | 118 | I = H_all[input_num] 119 | vec_grad = grad 120 | uks_Hk_list = [] 121 | for ii in range(input_num): 122 | uks_Hk_list.append((-uks[ii])*H_all[ii]) 123 | 124 | H = tf.add_n(uks_Hk_list) 125 | vec_grad_n = grad 126 | factorial = 1. 127 | 128 | for ii in range(1,taylor_terms): 129 | factorial = factorial * ii 130 | vec_grad_n = tf.matmul(H,vec_grad_n,a_is_sparse=self.sys_para.sparse_H,b_is_sparse=self.sys_para.sparse_K) 131 | vec_grad = vec_grad + vec_grad_n/factorial 132 | 133 | return [tf.stack(coeff_grad), tf.zeros(tf.shape(H_all),dtype=tf.float32),vec_grad] 134 | 135 | global matvecexp_op 136 | 137 | @function.Defun(tf.float32,tf.float32,tf.float32, grad_func=matvecexp_op_grad) 138 | def matvecexp_op(uks,H_all,psi): 139 | # matrix vector exponential defun operator 140 | matvecexp = get_matvecexp(uks,H_all,psi) 141 | 142 | return matvecexp 143 | 144 | 145 | 146 | def init_variables(self): 147 | self.tf_one_minus_gaussian_envelope = tf.constant(self.sys_para.one_minus_gauss,dtype=tf.float32, name = 'Gaussian') 148 | 149 | 150 | def init_tf_vectors(self): 151 | 152 | self.tf_initial_vectors=[] 153 | for initial_vector in self.sys_para.initial_vectors: 154 | tf_initial_vector = tf.constant(initial_vector,dtype=tf.float32) 155 | self.tf_initial_vectors.append(tf_initial_vector) 156 | self.packed_initial_vectors = tf.transpose(tf.stack(self.tf_initial_vectors)) 157 | 158 | def init_tf_propagators(self): 159 | #tf initial and target propagator 160 | if self.sys_para.state_transfer: 161 | self.target_vecs = tf.transpose(tf.constant(np.array(self.sys_para.target_vectors),dtype=tf.float32)) 162 | else: 163 | self.tf_initial_unitary = tf.constant(self.sys_para.initial_unitary,dtype=tf.float32, name = 'U0') 164 | self.tf_target_state = tf.constant(self.sys_para.target_unitary,dtype=tf.float32) 165 | self.target_vecs = tf.matmul(self.tf_target_state,self.packed_initial_vectors) 166 | print "Propagators initialized." 167 | 168 | def init_tf_ops_weight(self): 169 | 170 | #tf weights of operators 171 | 172 | self.H0_weight = tf.Variable(tf.ones([self.sys_para.steps]), trainable=False) #Just a vector of ones needed for the kernel 173 | self.weights_unpacked=[self.H0_weight] #will collect all weights here 174 | self.ops_weight_base = tf.Variable(tf.constant(self.sys_para.ops_weight_base, dtype = tf.float32), dtype=tf.float32,name ="weights_base") 175 | 176 | self.ops_weight = tf.sin(self.ops_weight_base,name="weights") 177 | for ii in range (self.sys_para.ops_len): 178 | self.weights_unpacked.append(self.sys_para.ops_max_amp[ii]*self.ops_weight[ii,:]) 179 | 180 | #print len(self.sys_para.ops_max_amp) 181 | self.H_weights = tf.stack(self.weights_unpacked,name="packed_weights") 182 | 183 | 184 | 185 | print "Operators weight initialized." 186 | 187 | def init_tf_inter_propagators(self): 188 | #initialize intermediate unitaries 189 | self.inter_states = [] 190 | for ii in range(self.sys_para.steps): 191 | self.inter_states.append(tf.zeros([2*self.sys_para.state_num,2*self.sys_para.state_num], 192 | dtype=tf.float32,name="inter_state_"+str(ii))) 193 | print "Intermediate propagation variables initialized." 194 | 195 | def get_inter_state_op(self,layer): 196 | # build operator for intermediate state propagation 197 | # This function determines the nature of propagation 198 | 199 | propagator = matexp_op(self.H_weights[:,layer],self.tf_matrix_list) 200 | 201 | 202 | return propagator 203 | 204 | def init_tf_propagator(self): 205 | self.tf_matrix_list = tf.constant(self.sys_para.matrix_list,dtype=tf.float32) 206 | 207 | # build propagator for all the intermediate states 208 | 209 | tf_inter_state_op = [] 210 | for ii in np.arange(0,self.sys_para.steps): 211 | tf_inter_state_op.append(self.get_inter_state_op(ii)) 212 | 213 | #first intermediate propagator 214 | self.inter_states[0] = tf.matmul(tf_inter_state_op[0],self.tf_initial_unitary,a_is_sparse=self.sys_para.sparse_U, 215 | b_is_sparse=self.sys_para.sparse_K) 216 | #subsequent operation layers and intermediate propagators 217 | 218 | for ii in np.arange(1,self.sys_para.steps): 219 | self.inter_states[ii] = tf.matmul(tf_inter_state_op[ii],self.inter_states[ii-1],a_is_sparse=self.sys_para.sparse_U, 220 | b_is_sparse=self.sys_para.sparse_K) 221 | 222 | 223 | self.final_state = self.inter_states[self.sys_para.steps-1] 224 | 225 | self.unitary_scale = (0.5/self.sys_para.state_num)*tf.reduce_sum(tf.matmul(tf.transpose(self.final_state),self.final_state)) 226 | 227 | print "Intermediate propagators initialized." 228 | 229 | def init_tf_inter_vectors(self): 230 | # inter vectors for unitary evolution, obtained by multiplying the propagation operator K_j with initial vector 231 | self.inter_vecs_list =[] 232 | 233 | inter_vec = self.packed_initial_vectors 234 | self.inter_vecs_list.append(inter_vec) 235 | 236 | for ii in np.arange(0,self.sys_para.steps): 237 | inter_vec = tf.matmul(self.inter_states[ii],self.packed_initial_vectors,name="inter_vec_"+str(ii)) 238 | self.inter_vecs_list.append(inter_vec) 239 | self.inter_vecs_packed = tf.stack(self.inter_vecs_list, axis=1) 240 | self.inter_vecs = tf.unstack(self.inter_vecs_packed, axis = 2) 241 | 242 | print "Vectors initialized." 243 | 244 | def init_tf_inter_vector_state(self): 245 | # inter vectors for state transfer, obtained by evolving the initial vector 246 | 247 | tf_matrix_list = tf.constant(self.sys_para.matrix_list,dtype=tf.float32) 248 | 249 | self.inter_vecs_list = [] 250 | inter_vec = self.packed_initial_vectors 251 | self.inter_vecs_list.append(inter_vec) 252 | 253 | for ii in np.arange(0,self.sys_para.steps): 254 | psi = inter_vec 255 | inter_vec = matvecexp_op(self.H_weights[:,ii],tf_matrix_list,psi) 256 | self.inter_vecs_list.append(inter_vec) 257 | self.inter_vecs_packed = tf.stack(self.inter_vecs_list, axis=1) 258 | self.inter_vecs = tf.unstack(self.inter_vecs_packed, axis = 2) 259 | 260 | 261 | print "Vectors initialized." 262 | 263 | def get_inner_product(self,psi1,psi2): 264 | #Take 2 states psi1,psi2, calculate their overlap, for single vector 265 | state_num=self.sys_para.state_num 266 | 267 | psi_1_real = (psi1[0:state_num]) 268 | psi_1_imag = (psi1[state_num:2*state_num]) 269 | psi_2_real = (psi2[0:state_num]) 270 | psi_2_imag = (psi2[state_num:2*state_num]) 271 | # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude 272 | with tf.name_scope('inner_product'): 273 | ac = tf.multiply(psi_1_real,psi_2_real) 274 | bd = tf.multiply(psi_1_imag,psi_2_imag) 275 | bc = tf.multiply(psi_1_imag,psi_2_real) 276 | ad = tf.multiply(psi_1_real,psi_2_imag) 277 | reals = tf.square(tf.add(tf.reduce_sum(ac),tf.reduce_sum(bd))) 278 | imags = tf.square(tf.subtract(tf.reduce_sum(bc),tf.reduce_sum(ad))) 279 | norm = tf.add(reals,imags) 280 | return norm 281 | 282 | def get_inner_product_2D(self,psi1,psi2): 283 | #Take 2 states psi1,psi2, calculate their overlap, for arbitrary number of vectors 284 | # psi1 and psi2 are shaped as (2*state_num, number of vectors) 285 | state_num=self.sys_para.state_num 286 | 287 | psi_1_real = (psi1[0:state_num,:]) 288 | psi_1_imag = (psi1[state_num:2*state_num,:]) 289 | psi_2_real = (psi2[0:state_num,:]) 290 | psi_2_imag = (psi2[state_num:2*state_num,:]) 291 | # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude 292 | with tf.name_scope('inner_product'): 293 | ac = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_real),0) 294 | bd = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_imag),0) 295 | bc = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_real),0) 296 | ad = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_imag),0) 297 | reals = tf.square(tf.reduce_sum(tf.add(ac,bd))) # first trace inner product of all vectors, then squared 298 | imags = tf.square(tf.reduce_sum(tf.subtract(bc,ad))) 299 | norm = (tf.add(reals,imags))/(len(self.sys_para.states_concerned_list)**2) 300 | return norm 301 | 302 | def get_inner_product_3D(self,psi1,psi2): 303 | #Take 2 states psi1,psi2, calculate their overlap, for arbitrary number of vectors and timesteps 304 | # psi1 and psi2 are shaped as (2*state_num, time_steps, number of vectors) 305 | state_num=self.sys_para.state_num 306 | 307 | psi_1_real = (psi1[0:state_num,:]) 308 | psi_1_imag = (psi1[state_num:2*state_num,:]) 309 | psi_2_real = (psi2[0:state_num,:]) 310 | psi_2_imag = (psi2[state_num:2*state_num,:]) 311 | # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude 312 | with tf.name_scope('inner_product'): 313 | ac = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_real),0) 314 | bd = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_imag),0) 315 | bc = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_real),0) 316 | ad = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_imag),0) 317 | reals = tf.reduce_sum(tf.square(tf.reduce_sum(tf.add(ac,bd),1))) 318 | # first trace inner product of all vectors, then squared, then sum contribution of all time steps 319 | imags = tf.reduce_sum(tf.square(tf.reduce_sum(tf.subtract(bc,ad),1))) 320 | norm = (tf.add(reals,imags))/(len(self.sys_para.states_concerned_list)**2) 321 | return norm 322 | 323 | def init_training_loss(self): 324 | # Adding all penalties 325 | if self.sys_para.state_transfer == False: 326 | 327 | self.final_vecs = tf.matmul(self.final_state, self.packed_initial_vectors) 328 | 329 | self.loss = 1-self.get_inner_product_2D(self.final_vecs,self.target_vecs) 330 | 331 | else: 332 | self.loss = tf.constant(0.0, dtype = tf.float32) 333 | self.final_state = self.inter_vecs_packed[:,self.sys_para.steps,:] 334 | self.loss = 1-self.get_inner_product_2D(self.final_state,self.target_vecs) 335 | self.unitary_scale = self.get_inner_product_2D(self.final_state,self.final_state) 336 | 337 | 338 | self.reg_loss = get_reg_loss(self) 339 | 340 | print "Training loss initialized." 341 | 342 | def init_optimizer(self): 343 | # Optimizer. Takes a variable learning rate. 344 | self.learning_rate = tf.placeholder(tf.float32,shape=[]) 345 | self.opt = tf.train.AdamOptimizer(learning_rate = self.learning_rate) 346 | 347 | #Here we extract the gradients of the pulses 348 | self.grad = self.opt.compute_gradients(self.reg_loss) 349 | 350 | self.grad_pack = tf.stack([g for g, _ in self.grad]) 351 | 352 | self.grads =[tf.nn.l2_loss(g) for g, _ in self.grad] 353 | self.grad_squared = tf.reduce_sum(tf.stack(self.grads)) 354 | self.optimizer = self.opt.apply_gradients(self.grad) 355 | 356 | print "Optimizer initialized." 357 | 358 | def init_utilities(self): 359 | # Add ops to save and restore all the variables. 360 | self.saver = tf.train.Saver() 361 | 362 | print "Utilities initialized." 363 | 364 | 365 | 366 | def build_graph(self): 367 | # graph building for the quantum optimal control 368 | graph = tf.Graph() 369 | with graph.as_default(): 370 | 371 | print "Building graph:" 372 | 373 | self.init_defined_functions() 374 | self.init_variables() 375 | self.init_tf_vectors() 376 | self.init_tf_propagators() 377 | self.init_tf_ops_weight() 378 | if self.sys_para.state_transfer == False: 379 | self.init_tf_inter_propagators() 380 | self.init_tf_propagator() 381 | if self.sys_para.use_inter_vecs: 382 | self.init_tf_inter_vectors() 383 | else: 384 | self.inter_vecs = None 385 | else: 386 | self.init_tf_inter_vector_state() 387 | self.init_training_loss() 388 | self.init_optimizer() 389 | self.init_utilities() 390 | 391 | 392 | print "Graph built!" 393 | 394 | return graph 395 | -------------------------------------------------------------------------------- /quantum_optimal_control/helper_functions/__init__.py: -------------------------------------------------------------------------------- 1 | #IMPORTS 2 | from data_management import * 3 | from grape_functions import * 4 | from qutip_verification import * 5 | -------------------------------------------------------------------------------- /quantum_optimal_control/helper_functions/data_management.py: -------------------------------------------------------------------------------- 1 | """ 2 | data management library used for the Schuster lab experiments 3 | originally written by: Phil Reinhold & David Schuster 4 | """ 5 | 6 | import numpy as np 7 | import h5py 8 | import json 9 | 10 | class H5File(h5py.File): 11 | def __init__(self, *args, **kwargs): 12 | h5py.File.__init__(self, *args, **kwargs) 13 | # self.attrs["_script"] = open(sys.argv[0], 'r').read() 14 | # if self.mode is not 'r': 15 | # self.attrs["_script"] = get_script() 16 | # if not read-only or existing then save the script into the .h5 17 | # Maybe should take this automatic feature out and just do it when you want to 18 | # Automatic feature taken out. Caused more trouble than convenience. Ge Yang 19 | # if 'save_script' in kwargs: 20 | # save_script = kwargs['save_script'] 21 | # else: 22 | # save_script = True 23 | # if (self.mode is not 'r') and ("_script" not in self.attrs) and (save_script): 24 | # self.save_script() 25 | self.flush() 26 | 27 | # Methods for proxy use 28 | def _my_ds_from_path(self, dspath): 29 | """returns the object (dataset or group) specified by dspath""" 30 | branch = self 31 | for ds in dspath: 32 | branch = branch[ds] 33 | return branch 34 | 35 | def _my_assign_dset(self, dspath, ds, val): 36 | print 'assigning', ds, val 37 | branch = self._my_ds_from_path(dspath) 38 | branch[ds] = val 39 | 40 | def _get_dset_array(self, dspath): 41 | """returns a pickle-safe array for the branch specified by dspath""" 42 | branch = self._my_ds_from_path(dspath) 43 | if isinstance(branch, h5py.Group): 44 | return 'group' 45 | else: 46 | return (H5Array(branch), dict(branch.attrs)) 47 | 48 | def _get_attrs(self, dspath): 49 | branch = self._my_ds_from_path(dspath) 50 | return dict(branch.attrs) 51 | 52 | def _set_attr(self, dspath, item, value): 53 | branch = self._my_ds_from_path(dspath) 54 | branch.attrs[item] = value 55 | 56 | def _call_with_path(self, dspath, method, args, kwargs): 57 | branch = self._my_ds_from_path(dspath) 58 | return getattr(branch, method)(*args, **kwargs) 59 | 60 | def _ping(self): 61 | return 'OK' 62 | 63 | def set_range(self, dataset, xmin, xmax, ymin=None, ymax=None): 64 | if ymin is not None and ymax is not None: 65 | dataset.attrs["_axes"] = ((xmin, xmax), (ymin, ymax)) 66 | else: 67 | dataset.attrs["_axes"] = (xmin, xmax) 68 | 69 | def set_labels(self, dataset, x_lab, y_lab, z_lab=None): 70 | if z_lab is not None: 71 | dataset.attrs["_axes_labels"] = (x_lab, y_lab, z_lab) 72 | else: 73 | dataset.attrs["_axes_labels"] = (x_lab, y_lab) 74 | 75 | def append_line(self, dataset, line, axis=0): 76 | if isinstance(dataset,unicode): dataset=str(dataset) 77 | if isinstance(dataset, str): 78 | try: 79 | dataset = self[dataset] 80 | except: 81 | shape, maxshape = (0, len(line)), (None, len(line)) 82 | if axis == 1: 83 | shape, maxshape = (shape[1], shape[0]), (maxshape[1], maxshape[0]) 84 | self.create_dataset(dataset, shape=shape, maxshape=maxshape, dtype='float64') 85 | dataset = self[dataset] 86 | shape = list(dataset.shape) 87 | shape[axis] = shape[axis] + 1 88 | dataset.resize(shape) 89 | if axis == 0: 90 | dataset[-1, :] = line 91 | else: 92 | dataset[:, -1] = line 93 | self.flush() 94 | 95 | def append_pt(self, dataset, pt): 96 | if isinstance(dataset,unicode): dataset=str(dataset) 97 | if isinstance(dataset, str) : 98 | try: 99 | dataset = self[dataset] 100 | except: 101 | self.create_dataset(dataset, shape=(0,), maxshape=(None,), dtype='float64') 102 | dataset = self[dataset] 103 | shape = list(dataset.shape) 104 | shape[0] = shape[0] + 1 105 | dataset.resize(shape) 106 | dataset[-1] = pt 107 | self.flush() 108 | 109 | def note(self, note): 110 | """Add a timestamped note to HDF file, in a dataset called 'notes'""" 111 | ts = datetime.datetime.now() 112 | try: 113 | ds = self['notes'] 114 | except: 115 | ds = self.create_dataset('notes', (0,), maxshape=(None,), dtype=h5py.new_vlen(str)) 116 | 117 | shape = list(ds.shape) 118 | shape[0] = shape[0] + 1 119 | ds.resize(shape) 120 | ds[-1] = str(ts) + ' -- ' + note 121 | self.flush() 122 | 123 | def get_notes(self, one_string=False, print_notes=False): 124 | """Returns notes embedded in HDF file if present. 125 | @param one_string=False if True concatenates them all together 126 | @param print_notes=False if True prints all the notes to stdout 127 | """ 128 | try: 129 | notes = list(self['notes']) 130 | except: 131 | notes = [] 132 | if print_notes: 133 | print '\n'.join(notes) 134 | if one_string: 135 | notes = '\n'.join(notes) 136 | return notes 137 | 138 | def add_data(self, f, key, data): 139 | data = np.array(data) 140 | try: 141 | f.create_dataset(key, shape=data.shape, 142 | maxshape=tuple([None] * len(data.shape)), 143 | dtype=str(data.dtype)) 144 | except RuntimeError: 145 | del f[key] 146 | f.create_dataset(key, shape=data.shape, 147 | maxshape=tuple([None] * len(data.shape)), 148 | dtype=str(data.dtype)) 149 | f[key][...] = data 150 | 151 | def append_data(self, f, key, data, forceInit=False): 152 | """ 153 | the main difference between append_pt and append is thta 154 | append takes care of highier dimensional data, but not append_pt 155 | """ 156 | 157 | data = np.array(data) 158 | try: 159 | f.create_dataset(key, shape=tuple([1] + list(data.shape)), 160 | maxshape=tuple([None] * (len(data.shape) + 1)), 161 | dtype=str(data.dtype)) 162 | except RuntimeError: 163 | if forceInit == True: 164 | del f[key] 165 | f.create_dataset(key, shape=tuple([1] + list(data.shape)), 166 | maxshape=tuple([None] * (len(data.shape) + 1)), 167 | dtype=str(data.dtype)) 168 | dataset = f[key] 169 | Shape = list(dataset.shape) 170 | Shape[0] = Shape[0] + 1 171 | dataset.resize(Shape) 172 | 173 | dataset = f[key] 174 | try: 175 | dataset[-1, :] = data 176 | except TypeError: 177 | dataset[-1] = data 178 | # Usage require strictly same dimensionality for all data appended. 179 | # currently I don't have it setup to return a good exception, but should 180 | 181 | def add(self, key, data): 182 | self.add_data(self, key, data) 183 | 184 | def append(self, dataset, pt): 185 | self.append_data(self, dataset, pt) 186 | 187 | # def save_script(self, name="_script"): 188 | # self.attrs[name] = get_script() 189 | def save_dict(self, dict, group='/'): 190 | if group not in self: 191 | self.create_group(group) 192 | for k in dict.keys(): 193 | self[group].attrs[k] = dict[k] 194 | 195 | def get_dict(self, group='/'): 196 | d = {} 197 | for k in self[group].attrs.keys(): 198 | d[k] = self[group].attrs[k] 199 | return d 200 | 201 | get_attrs = get_dict 202 | save_attrs = save_dict 203 | 204 | 205 | def save_settings(self, dic, group='settings'): 206 | self.save_dict(dic, group) 207 | 208 | def load_settings(self, group='settings'): 209 | return self.get_dict(group) 210 | 211 | def load_config(self): 212 | if 'config' in self.attrs.keys(): 213 | return AttrDict(json.loads(self.attrs['config'])) 214 | else: 215 | return None -------------------------------------------------------------------------------- /quantum_optimal_control/helper_functions/grape_functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.linalg as la 3 | 4 | def dressed_unitary(U,v,dressed_id): 5 | # get unitary matrix in dressed basis 6 | conversion_U = sort_ev(v,dressed_id) 7 | return np.dot(np.dot(conversion_U,U),np.conjugate(np.transpose(conversion_U))) 8 | 9 | def get_dressed_info(H0): 10 | # assign index of the dressed state according to the overall with bare state 11 | w_c, v_c = la.eig(H0) 12 | dressed_id=[] 13 | for ii in range(len(v_c)): 14 | index = np.argmax(np.abs(v_c[:, ii])) 15 | if index not in dressed_id: 16 | dressed_id.append(index) 17 | else: 18 | temp = (np.abs(v_c[:, ii])).tolist() 19 | while index in dressed_id: 20 | temp[index] = 0 21 | index = np.argmax(temp) 22 | dressed_id.append(index) 23 | 24 | return w_c, v_c, dressed_id 25 | 26 | def qft(N): 27 | # quantum fourier transform operator 28 | phase = 2.0j * np.pi / (2**N) 29 | L, M = np.meshgrid(np.arange(2**N), np.arange(2**N)) 30 | L = np.exp(phase * (L * M)) 31 | q = 1.0 / np.sqrt(2**N) * L 32 | return q 33 | 34 | def hamming_distance(x): 35 | tot = 0 36 | while x: 37 | tot += 1 38 | x &= x - 1 39 | return tot 40 | 41 | def Hadamard (N=1): 42 | # Hadamard gate 43 | Had = (2.0 ** (-N / 2.0)) * np.array([[((-1) ** hamming_distance(i & j)) 44 | for i in range(2 ** N)] 45 | for j in range(2 ** N)]) 46 | return Had 47 | 48 | def concerned(N,levels): 49 | concern = [] 50 | for ii in range (levels**N): 51 | ii_b = Basis(ii,N,levels) 52 | if is_binary(ii_b): 53 | concern.append(ii) 54 | return concern 55 | 56 | def is_binary(num): 57 | flag = True 58 | for c in num: 59 | if c!='0' and c!='1': 60 | flag = False 61 | break 62 | return flag 63 | 64 | def transmon_gate(gate,levels): 65 | N = int(np.log2(len(gate))) 66 | result = np.identity(levels**N,dtype=complex) 67 | for ii in range (len(result)): 68 | for jj in range(len(result)): 69 | ii_b = Basis(ii,N,levels) 70 | jj_b = Basis(jj,N,levels) 71 | if is_binary(ii_b) and is_binary(jj_b): 72 | result[ii,jj]=gate[int(ii_b, 2),int(jj_b, 2)] 73 | 74 | return result 75 | def rz(theta): 76 | return [[np.exp(-1j * theta / 2), 0],[0, np.exp(1j * theta / 2)]] 77 | def rx (theta): 78 | return [[np.cos(theta / 2), -1j * np.sin(theta / 2)], 79 | [-1j * np.sin(theta / 2), np.cos(theta / 2)]] 80 | 81 | 82 | def Bin(a,N): 83 | a_bin = np.binary_repr(a) 84 | while len(a_bin) < N: 85 | a_bin = '0'+a_bin 86 | return a_bin 87 | 88 | def baseN(num,b,numerals="0123456789abcdefghijklmnopqrstuvwxyz"): 89 | return ((num == 0) and numerals[0]) or (baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b]) 90 | 91 | def Basis(a,N,r): 92 | a_new = baseN(a,r) 93 | while len(a_new) < N: 94 | a_new = '0'+a_new 95 | return a_new 96 | 97 | 98 | def kron_all(op,num,op_2): 99 | # returns an addition of sth like xii + ixi + iix for op =x and op_2 =i 100 | total = np.zeros([len(op)**num,len(op)**num]) 101 | a=op 102 | for jj in range(num): 103 | if jj != 0: 104 | a = op_2 105 | else: 106 | a = op 107 | 108 | for ii in range(num-1): 109 | if (jj - ii) == 1: 110 | 111 | b = op 112 | else: 113 | b = op_2 114 | a = np.kron(a,b) 115 | total = total + a 116 | return a 117 | 118 | def multi_kron(op,num): 119 | #returns xx...x 120 | a=op 121 | for ii in range(num-1): 122 | a = np.kron(a,op) 123 | return a 124 | 125 | def append_separate_krons(op,name,num,state_num,Hops,Hnames,ops_max_amp,amp=4.0): 126 | #appends xii,ixi,iix separately 127 | string = name 128 | I_q = np.identity(state_num) 129 | x = 1 130 | y = 1 131 | z = 1 132 | X1 = op 133 | while(x < num): 134 | X1 = np.kron(X1, I_q) 135 | x = x + 1 136 | Hops.append(X1) 137 | ops_max_amp.append(amp) 138 | x = 1 139 | while(x < num): 140 | string = string + 'i' 141 | x = x+1 142 | Hnames.append(string) 143 | 144 | x = 1 145 | 146 | while(x < num): 147 | X1 = I_q 148 | string = 'i' 149 | while(y 0: 207 | return dressed_id.index(bareindex) 208 | else: 209 | return bareindex 210 | 211 | def c_to_r_mat(M): 212 | # complex to real isomorphism for matrix 213 | return np.asarray(np.bmat([[M.real,-M.imag],[M.imag,M.real]])) 214 | 215 | def c_to_r_vec(V): 216 | # complex to real isomorphism for vector 217 | new_v =[] 218 | new_v.append(V.real) 219 | new_v.append(V.imag) 220 | return np.reshape(new_v,[2*len(V)]) 221 | -------------------------------------------------------------------------------- /quantum_optimal_control/helper_functions/qutip_verification.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import h5py 3 | import qutip as qt 4 | 5 | def qutip_verification(datafile,atol): 6 | 7 | 8 | # load data from file 9 | with h5py.File(datafile,'r') as hf: 10 | 11 | gate_time = np.array(hf.get('total_time')) 12 | gate_steps = np.array(hf.get('steps')) 13 | H0 = np.array(hf.get('H0')) 14 | Hops = np.array(hf.get('Hops')) 15 | initial_vectors_c = np.array(hf.get('initial_vectors_c')) 16 | uks = np.array(hf.get('uks'))[-1] 17 | 18 | inter_vecs_raw_real = np.array(hf.get('inter_vecs_raw_real'))[-1] 19 | inter_vecs_raw_imag = np.array(hf.get('inter_vecs_raw_imag'))[-1] 20 | 21 | inter_vecs_raw = inter_vecs_raw_real + 1j*inter_vecs_raw_imag 22 | 23 | 24 | max_abs_diff_list = [] 25 | all_close_list = [] 26 | 27 | # H0 and Hops 28 | H0_qobj = qt.Qobj(H0) 29 | Hops_qobj = [] 30 | 31 | for Hop in Hops: 32 | Hops_qobj.append(qt.Qobj(Hop)) 33 | 34 | # define time 35 | tlist = np.linspace(0,gate_time,gate_steps+1) 36 | dt = gate_time/gate_steps 37 | 38 | # append zero control pulse at the end of uks (final timestep) 39 | uks_t0 = np.zeros((uks.shape[0],1)) 40 | uks = np.hstack([uks,uks_t0]) 41 | 42 | # looping over each initial vector 43 | for init_vector_id in range(len(initial_vectors_c)): 44 | 45 | print "Verifying init vector id: %d" %(init_vector_id) 46 | 47 | # initial vector 48 | psi0 = qt.Qobj(initial_vectors_c[init_vector_id]) 49 | 50 | # make functions to return uks field 51 | def make_get_uks_func(id): 52 | def _function(t,args=None): 53 | time_id = int(t/dt) 54 | return uks[id][time_id] 55 | return _function 56 | 57 | # create the time-dependent Hamiltonian list 58 | Ht_list = [] 59 | Ht_list.append(H0_qobj) 60 | for ii in range(len(Hops)): 61 | Ht_list.append([Hops_qobj[ii],make_get_uks_func(ii)]) 62 | 63 | # solving the Schrodinger evolution in QuTiP's sesolve 64 | output = qt.sesolve(Ht_list, psi0, tlist, []) 65 | 66 | # obtaining the simulation result 67 | state_tlist = [] 68 | for state in output.states: 69 | state_tlist.append(state.full()) 70 | state_tlist = np.array(state_tlist)[:,:,0] 71 | state_tlist = np.transpose(state_tlist) 72 | 73 | 74 | # absolute difference of simulation result from Tensorflow and QuTiP 75 | abs_diff = np.abs(state_tlist) - np.abs(inter_vecs_raw[init_vector_id]) 76 | max_abs_diff_list.append(np.max(abs_diff)) 77 | 78 | # if all close between simulation result from Tensorflow and QuTiP 79 | all_close = np.allclose(state_tlist,inter_vecs_raw[init_vector_id],atol=atol) 80 | all_close_list.append(all_close) 81 | 82 | print "QuTiP simulation verification result for each initial state" 83 | print "================================================" 84 | print "max abs diff: " + str(max_abs_diff_list) 85 | print "all close: " + str(all_close_list) 86 | print "================================================" -------------------------------------------------------------------------------- /quantum_optimal_control/main_grape/__init__.py: -------------------------------------------------------------------------------- 1 | #IMPORTS 2 | from grape import * -------------------------------------------------------------------------------- /quantum_optimal_control/main_grape/grape.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import scipy.linalg as la 4 | from quantum_optimal_control.core.tensorflow_state import TensorflowState 5 | from quantum_optimal_control.core.system_parameters import SystemParameters 6 | from quantum_optimal_control.core.convergence import Convergence 7 | from quantum_optimal_control.core.run_session import run_session 8 | 9 | 10 | 11 | import random as rd 12 | import time 13 | from IPython import display 14 | 15 | from quantum_optimal_control.helper_functions.data_management import H5File 16 | import os 17 | 18 | 19 | def Grape(H0,Hops,Hnames,U,total_time,steps,states_concerned_list,convergence = None, U0= None, reg_coeffs = None,dressed_info = None, maxA = None ,use_gpu= True, sparse_H=True,sparse_U=False,sparse_K=False,draw= None, initial_guess = None,show_plots = True, unitary_error=1e-4, method = 'Adam',state_transfer = False,no_scaling = False, freq_unit = 'GHz', file_name = None, save = True, data_path = None, Taylor_terms = None, use_inter_vecs=True): 20 | 21 | # start time 22 | grape_start_time = time.time() 23 | 24 | # set timing unit used for plotting 25 | freq_time_unit_dict = {"GHz": "ns", "MHz": "us","KHz":"ms","Hz":"s"} 26 | time_unit = freq_time_unit_dict[freq_unit] 27 | 28 | # make sparse_{H,U,K} False if use_gpu is True, as GPU Sparse Matmul is not supported yet. 29 | if use_gpu: 30 | sparse_H = False 31 | sparse_U = False 32 | sparse_K = False 33 | 34 | file_path = None 35 | 36 | if save: 37 | # saves all the input values 38 | if file_name is None: 39 | raise ValueError('Grape function input: file_name, is not specified.') 40 | 41 | if data_path is None: 42 | raise ValueError('Grape function input: data_path, is not specified.') 43 | 44 | 45 | file_num = 0 46 | while (os.path.exists(os.path.join(data_path,str(file_num).zfill(5) + "_"+ file_name+".h5"))): 47 | file_num+=1 48 | 49 | file_name = str(file_num).zfill(5) + "_"+ file_name+ ".h5" 50 | 51 | file_path = os.path.join(data_path,file_name) 52 | 53 | print "data saved at: " + str(file_path) 54 | 55 | with H5File(file_path) as hf: 56 | hf.add('H0',data=H0) 57 | hf.add('Hops',data=Hops) 58 | hf.add('Hnames',data=Hnames) 59 | hf.add('U',data=U) 60 | hf.add('total_time', data=total_time) 61 | hf.add('steps', data=steps) 62 | hf.add('states_concerned_list', data=states_concerned_list) 63 | hf.add('use_gpu',data=use_gpu) 64 | hf.add('sparse_H',data=sparse_H) 65 | hf.add('sparse_U',data=sparse_U) 66 | hf.add('sparse_K',data=sparse_K) 67 | 68 | if not maxA is None: 69 | hf.add('maxA', data=maxA) 70 | 71 | if not initial_guess is None: 72 | hf.add('initial_guess', data =initial_guess) 73 | hf.add('method', method) 74 | 75 | g1 = hf.create_group('convergence') 76 | for k, v in convergence.items(): 77 | g1.create_dataset(k, data = v) 78 | 79 | if not reg_coeffs is None: 80 | g2 = hf.create_group('reg_coeffs') 81 | for k, v in reg_coeffs.items(): 82 | g2.create_dataset(k, data = v) 83 | 84 | if not dressed_info is None: 85 | g3 = hf.create_group('dressed_info') 86 | for k, v in dressed_info.items(): 87 | g3.create_dataset(k, data = v) 88 | 89 | if U0 is None: 90 | U0 = np.identity(len(H0)) 91 | if convergence is None: 92 | convergence = {'rate':0.01, 'update_step':100, 'max_iterations':5000,'conv_target':1e-8,'learning_rate_decay':2500} 93 | 94 | 95 | if maxA is None: 96 | if initial_guess is None: 97 | maxAmp = 4*np.ones(len(Hops)) 98 | else: 99 | maxAmp = 1.5*np.max(np.abs(initial_guess))*np.ones(len(Hops)) 100 | else: 101 | maxAmp = maxA 102 | 103 | # pass in system parameters 104 | sys_para = SystemParameters(H0,Hops,Hnames,U,U0,total_time,steps,states_concerned_list,dressed_info,maxAmp, draw,initial_guess, show_plots,unitary_error,state_transfer,no_scaling,reg_coeffs, save, file_path, Taylor_terms, use_gpu, use_inter_vecs,sparse_H,sparse_U,sparse_K) 105 | 106 | if use_gpu: 107 | dev = '/gpu:0' 108 | else: 109 | dev = '/cpu:0' 110 | 111 | 112 | with tf.device(dev): 113 | tfs = TensorflowState(sys_para) # create tensorflow graph 114 | graph = tfs.build_graph() 115 | 116 | conv = Convergence(sys_para,time_unit,convergence) 117 | 118 | # run the optimization 119 | try: 120 | SS = run_session(tfs,graph,conv,sys_para,method, show_plots = sys_para.show_plots, use_gpu = use_gpu) 121 | 122 | # save wall clock time 123 | if save: 124 | wall_clock_time = time.time() - grape_start_time 125 | with H5File(file_path) as hf: 126 | hf.add('wall_clock_time',data=np.array(wall_clock_time)) 127 | print "data saved at: " + str(file_path) 128 | 129 | return SS.uks,SS.Uf 130 | except KeyboardInterrupt: 131 | 132 | # save wall clock time 133 | if save: 134 | wall_clock_time = time.time() - grape_start_time 135 | with H5File(file_path) as hf: 136 | hf.add('wall_clock_time',data=np.array(wall_clock_time)) 137 | print "data saved at: " + str(file_path) 138 | 139 | display.clear_output() 140 | 141 | 142 | 143 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | #Make sure python 2.7 5 | if not (sys.version_info[0] == 2 and sys.version_info[1] == 7): 6 | sys.exit("Sorry, only Python 2.7 is currently supported currently.") 7 | 8 | try: 9 | from setuptools import setup 10 | except: 11 | from disutils.core import setup 12 | 13 | 14 | #write verion to file so installation is not required to 15 | #import library prior to installation 16 | MAJOR = 0 17 | MINOR = 1 18 | MICRO = 0 19 | VERSION = '{0}.{1}.{2}'.format(MAJOR,MINOR,MICRO) 20 | NAME = 'quantum_optimal_control' 21 | URL = 'https://github.com/SchusterLab/quantum-optimal-control' 22 | AUTHOR = '''Nelson Leung, Mohamed Abdelhafez, 23 | Jens Koch and David Schuster''' 24 | AUTHOR_EMAIL = 'nelsonleuon@uchicago.edu' 25 | MAINTAINER = AUTHOR 26 | MAINTAINER_EMAIL = AUTHOR_EMAIL 27 | 28 | KEYWORDS = ['quantum','GRAPE','optimal','control','tensorflow','gpu','qubit'] 29 | 30 | DESCRIPTION = 'Tensorflow implementation of GRAPE, a quantum optimal control algorithm.' 31 | 32 | 33 | REQUIRES = [ 34 | 'numpy (>=1.8)', 35 | 'scipy (>=0.15)', 36 | 'tensorflow (>=1.0)', 37 | 'qutip (>=4.0)', 38 | 'matplotlib (>=2.0)', 39 | 'h5py (>=2.5)', 40 | 'IPython (>=4.0)' 41 | ] 42 | INSTALL_REQUIRES = [ 43 | 'numpy>=1.8', 44 | 'scipy>=0.15', 45 | 'tensorflow>=1.0', 46 | 'qutip>=4.0', 47 | 'matplotlib>=2.0', 48 | 'h5py>=2.5', 49 | 'IPython>=4.0' 50 | ] 51 | 52 | PACKAGES = [ 53 | 'quantum_optimal_control', 54 | 'quantum_optimal_control/main_grape', 55 | 'quantum_optimal_control/core', 56 | 'quantum_optimal_control/helper_functions' 57 | ] 58 | 59 | #project needs a license 60 | LICENSE = '' 61 | 62 | PLATFORMS=['linux'] 63 | 64 | CLASSIFIERS = [ 65 | 'Development Status :: Beta', 66 | 'Intended Audience :: Science/Research', 67 | 'Natural Language :: English', 68 | 'Operating System :: Linux', 69 | 'Programming Language :: Python :: 2.7', 70 | 'Programming Language :: Python :: Implementation :: CPython', 71 | 'Topic :: Scientific/Engineering :: Physics', 72 | ] 73 | version_file = 'version.py' 74 | 75 | def write_version(version_file=version_file): 76 | #open and overwrite old version file 77 | with open(version_file,'w+') as v: 78 | 79 | v.write(""" 80 | version = {0} 81 | 82 | """.format(VERSION)) 83 | 84 | 85 | write_version() 86 | 87 | 88 | 89 | try: 90 | readme = open('README.md','r') 91 | LONG_DESCRIPTION= readme.read() 92 | except: 93 | LONG_DESCRIPTION = '' 94 | 95 | 96 | 97 | 98 | 99 | #perform setup 100 | setup( 101 | name=NAME, 102 | version=VERSION, 103 | url=URL, 104 | author=AUTHOR, 105 | author_email=AUTHOR_EMAIL, 106 | maintainer=MAINTAINER, 107 | maintainer_email = MAINTAINER_EMAIL, 108 | packages=PACKAGES, 109 | keywords=KEYWORDS, 110 | description=DESCRIPTION, 111 | platforms=PLATFORMS, 112 | install_requies=INSTALL_REQUIRES, 113 | classifiers=CLASSIFIERS 114 | ) -------------------------------------------------------------------------------- /version.py: -------------------------------------------------------------------------------- 1 | 2 | version = 0.1.0 3 | 4 | --------------------------------------------------------------------------------