├── .gitattributes ├── .gitignore ├── README ├── fowler ├── __init__.py ├── btree.py ├── group_su4.py ├── read_group.py ├── read_group_su2.py ├── read_group_su4.py ├── settings_su2.py ├── settings_su4.py ├── write_group_su2.py └── write_group_su4.py ├── manage ├── __init__.py ├── dump_sequences.py ├── generate_su2.py ├── generate_su4.py └── settings_su4.py ├── releases └── git-clone.sh ├── scratch ├── __init__.py ├── controlled_rk_su4.py ├── n_from_epsilon.py ├── scratch_180_decompose.py ├── scratch_btree.py ├── scratch_controlled_rz_decompose.py ├── scratch_decompose_single.py ├── scratch_kdtree_su2.py ├── scratch_kdtree_su4.py ├── scratch_orthogonal_axis.py ├── scratch_rotations_commute.py ├── scratch_rotations_compose.py ├── scratch_super_phase_prob.py ├── sk_dawson_su2.py └── sk_dawson_su4.py ├── skc ├── __init__.py ├── basic_approx │ ├── __init__.py │ ├── file.py │ ├── generate.py │ ├── process.py │ └── search.py ├── basis.py ├── compose.py ├── dawson │ ├── __init__.py │ └── factor.py ├── decompose.py ├── diagonalize.py ├── group_factor.py ├── hypersphere.py ├── kdtree.py ├── main.py ├── operator.py ├── rotate.py ├── simplify.py ├── tile.py ├── trig.py └── utils.py ├── super ├── __init__.py └── precompile.py └── tests ├── test_angle.py ├── test_aram_diagonal_factor.py ├── test_canonical_order.py ├── test_compose.py ├── test_dawson_factor.py ├── test_dawson_x_group_factor.py ├── test_diagonalize.py ├── test_find_basis.py ├── test_hypersphere.py ├── test_matrix_exp.py ├── test_matrix_ln.py ├── test_operator.py ├── test_operator_norm.py ├── test_permutations.py ├── test_random.py ├── test_recompose.py ├── test_rotate.py ├── test_runner.py ├── test_similarity_matrix.py ├── test_simplify.py ├── test_skc.py ├── test_skc_dawson.py ├── test_tiles.py ├── test_trig.py └── test_utils.py /.gitattributes: -------------------------------------------------------------------------------- 1 | pickles filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.pyo 3 | *~ 4 | \#*\# 5 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Quantum Compiler, v0.02 2 | https://sourceforge.net/p/quantumcompiler/home/ 3 | 4 | This is a research implementation of a quantum compiler, 5 | initially using the Solovay-Kitaev algorithm 6 | of successive approximation. 7 | It is based on similar compilers by Chris Dawson and Aram Harrow. 8 | Links can be found at the project homepage above. 9 | I will think of a snazzier URL later. 10 | 11 | ----------------------------------------------------------------- 12 | RELEASE NOTES: 13 | 14 | v0.01: Initial implementation with working SU(2) example. 15 | v0.02: Added README file, releases script, and working SU(4) example. 16 | 17 | ----------------------------------------------------------------- 18 | REQUIREMENTS: 19 | 20 | I've tested this on 21 | Mac OS X 10.5, python-2.5.4, numpy-1.4.1 from fink 22 | Linux (Fedora Core 13), python-2.6.4, numpy-1.3.0. 23 | 24 | Note that generated files are included in this distribution, 25 | generated on the Mac OS X system above, but they may not be 26 | compatible with your version of Python and numpy. 27 | 28 | If it doesn't work (complains about lack of defmatrix module), 29 | then you'll have to wipe those files (in pickles/) and 30 | re-generate them according to the instructions below. 31 | 32 | ----------------------------------------------------------------- 33 | FILES: 34 | 35 | This distribution contains the following top-level directories: 36 | /skc/ The main Solovay-Kitaev Python top-level module 37 | /manage/ Scripts for running maintenance tasks 38 | /pickles/ Generated files (described below) 39 | /scratch/ Sandbox for experimenting with functionality 40 | /releases/ Scripts and directory For generating releases 41 | /tests/ Unit tests 42 | 43 | ----------------------------------------------------------------- 44 | SETUP: 45 | 46 | You will probably need to set your PYTHONPATH to the local directory, 47 | like this bash example: 48 | 49 | export PYTHONPATH=. 50 | 51 | ----------------------------------------------------------------- 52 | OPERATION: 53 | 54 | The compiler operates in the following stages. 55 | 56 | 1. Generating basic approximations. 57 | 2. Building a search tree out of basic approximations. 58 | 3. Running the Solovay-Kitaev algorithm for a desired gate 59 | using the tree of basic approximations as a base case. 60 | 61 | How to perform each of these steps is given in more detail below. 62 | with example commands given below for SU(2), that is, for single-qubit gates. 63 | 64 | You can perform analogous commands for SU(4), they will just take longer 65 | and you are more likely to run out of memory. 66 | 67 | 1. GENERATION 68 | 69 | Generate the basic approximations (epsilon-0 net) as files on disk to 70 | be read / processed later. This depends on a given instruction set. 71 | You can view the SU(2) settings used in the pre-packaged example 72 | by viewing the file: 73 | 74 | manage/generate_su2.py 75 | 76 | This distribution should already come with generated files for your 77 | convenience, which you can list like this: 78 | 79 | anti-hero-1:skc-python buy-ppham$ ls -lh pickles/su2/ 80 | total 33520 81 | -rw-r--r-- 1 buy-ppham staff 852B Aug 26 00:37 gen-g1-1.pickle 82 | -rw-r--r-- 1 buy-ppham staff 505K Aug 26 00:27 gen-g10-1.pickle 83 | -rw-r--r-- 1 buy-ppham staff 1.0M Aug 26 00:28 gen-g11-1.pickle 84 | -rw-r--r-- 1 buy-ppham staff 2.0M Aug 26 00:28 gen-g12-1.pickle 85 | -rw-r--r-- 1 buy-ppham staff 4.1M Aug 26 00:29 gen-g13-1.pickle 86 | -rw-r--r-- 1 buy-ppham staff 8.3M Aug 26 00:31 gen-g14-1.pickle 87 | -rw-r--r-- 1 buy-ppham staff 1.4K Aug 26 00:25 gen-g2-1.pickle 88 | -rw-r--r-- 1 buy-ppham staff 3.2K Aug 26 00:25 gen-g3-1.pickle 89 | -rw-r--r-- 1 buy-ppham staff 6.4K Aug 26 00:25 gen-g4-1.pickle 90 | -rw-r--r-- 1 buy-ppham staff 14K Aug 26 00:25 gen-g5-1.pickle 91 | -rw-r--r-- 1 buy-ppham staff 29K Aug 26 00:25 gen-g6-1.pickle 92 | -rw-r--r-- 1 buy-ppham staff 60K Aug 26 00:25 gen-g7-1.pickle 93 | -rw-r--r-- 1 buy-ppham staff 123K Aug 26 00:27 gen-g8-1.pickle 94 | -rw-r--r-- 1 buy-ppham staff 248K Aug 26 00:27 gen-g9-1.pickle 95 | -rw-r--r-- 1 buy-ppham staff 540B Aug 26 00:37 gen-iset.pickle 96 | 97 | Sequences are enumerated in "generations", one generation per file, 98 | where generation x contains all sequences up to x instructions in length 99 | (before simplifying). Naturally, generation 16 is larger than generation 100 | 1 because there are many more sequences of length 16 than of length 1. 101 | 102 | If you want to regenerate these files from scratch, just wipe out 103 | the old ones and run the generate script again: 104 | 105 | rm pickles/su2/* 106 | python manage/generate_su2.py 107 | 108 | This takes a few minutes, so go read e-mail 109 | or get coffee. Some helpful stats are printed to amuse you if you really 110 | want to watch. 111 | 112 | Okay, so now we have generated sequences on disk, ready to be processed 113 | into a tree for efficient nearest-neighbor searching. 114 | 115 | ----------------------------------------------------------------- 116 | 2. SEARCHING 117 | 118 | This is combined with actually running the Solovay-Kitaev algorithm in practice, 119 | since it takes longer to build the tree, save it to a file, and then load it 120 | into memory again later. So we just build the tree, reading the generation 121 | files from the previous step, and then run the compiler at the same time. 122 | 123 | However, you may be curious about the kdtree we use. Yes, that's right, I said 124 | kd-tree. 125 | 126 | http://en.wikipedia.org/wiki/Kd-tree 127 | 128 | I stole the Python implementation in that example, and modified it for use 129 | with unitary operators. 130 | 131 | If you want to build a kd-tree and then immediately search for the nearest 132 | neighbor to a random unitary, you can run this script: 133 | 134 | python scratch/scratch_kdtree_su2.py 135 | 136 | This is the basic lookup operation done when Solovay-Kitaev bottoms out in 137 | its recursion. 138 | 139 | It will show you both the Fowler distance (which discounts any global phase 140 | factor) as well as the more conventional trace distance. 141 | 142 | For SU(2) and sequences up to 16 in length in our example, 143 | you will typically get a Fowler distance of about 0.02 to 0.06. 144 | Not great, but workable. 145 | 146 | ----------------------------------------------------------------- 147 | 3. COMPILING 148 | 149 | Okay okay, enough chit chat. Time for the main attraction. 150 | 151 | python scratch/sk_dawson_su2.py 152 | 153 | This script is so-named because it uses Chris Dawson's group factoring 154 | method for SU(2). For the given example (n=2), you should get an error 155 | of 0.00498 in Fowler distance, which again, is nothing to sneeze at for 156 | two levels of recursion. 157 | 158 | ----------------------------------------------------------------- 159 | SU(4) 160 | 161 | So far, we have only done what Chris and Aram have done in their code, 162 | just much slower and several years later. But wait! Now we will, for 163 | the first time ever, compile an SU(4) unitary using Solovay-Kitaev. 164 | 165 | 1. Generate: we've already generated SU(4) basic approximations up to 166 | sequence length = 6, which is good enough for testing purposes. 167 | If you want to generate them again (this takes a looong time): 168 | 169 | rm -f pickles/su4/* 170 | python manage/generate_su4.py 171 | 172 | 2. Build a search tree: again, this is really part of the next step. 173 | But you can test the basic lookup feature of the kdtree. 174 | 175 | python scratch/scratch_kdtree_su4.py 176 | 177 | This will give you fowler distance errors of about 0.2 - 0.4, 178 | which isn't great, but we only went to sequence length = 6 after all. 179 | 180 | 3. Compile: here we do n=4 recursion levels to get fowler distance errors 181 | of about 3e-7. Pretty good! 182 | 183 | python scratch/sk_dawson_su4.py 184 | 185 | ----------------------------------------------------------------- 186 | FUTURE IMPROVEMENTS 187 | 188 | So there is a lot of work to be done to improve the performance of this 189 | implementation. As it is, it would take several hours to run an SU(4) 190 | compilation with sufficient accuracy, and it would probably be infeasible 191 | to run it for SU(8). 192 | 193 | Here are the areas I can think of off the top of my head: 194 | 195 | * Rewrite it in Cython and static typing to speed this up. 196 | Really, I kinda regret writing this in Python because it is sooo 197 | slow, but numpy is very convenient. 198 | But compared to Aram's and Chris's C++ implementations, it makes 199 | me cry. 200 | * Profile it to find the slow parts. 201 | * Remove assertions. 202 | 203 | As for new functionality, it would be interesting to compare different 204 | group factoring methods, and see whether better convergence occurs for 205 | Aram's SU(2) factoring or Chris's. 206 | 207 | Also, we could implement net calculation to determine the epsilon-0 of 208 | our basic approximations (initial net) to see whether it meets the 209 | critical epsilon for Solovay-Kitaev to converge. Right now I am just 210 | winging it. 211 | 212 | Of course, generic SU(d) compilation is not that interesting and way too hard. 213 | It would probably be more effective to implement Kitaev's idea of whole 214 | circuit compilation, instead of the single gate compilation presented above. 215 | 216 | Alternatively, we could examine what gates are interesting in popular algorithms, 217 | like quantum phase estimation, and then work backwards to optimize our compilers 218 | just for those gates. 219 | 220 | A third approach is to extend Austin Fowler's work to hand-compile a universal 221 | two qubit fault-tolerant gate (like CNOT) for the Steane code, or to try generalizing 222 | his techniques to other codes. 223 | 224 | -------------------------------------------------------------------------------- /fowler/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/learner-long-life/skc-python/8a023f82ab99eeb25214f490bc4e50eda1723160/fowler/__init__.py -------------------------------------------------------------------------------- /fowler/btree.py: -------------------------------------------------------------------------------- 1 | # Btree data structure and methods for manipulating same to be find unique 2 | # subsequences and their successors. 3 | 4 | class BNode: 5 | 6 | # Canonical key order 7 | KEY_ORDER = [] 8 | 9 | ########################################################################## 10 | def __init__(self, key_in_parent=None): 11 | # Key in parent is used to find the next sibling. For root, it's None 12 | self.key_in_parent = key_in_parent 13 | self.children = {} 14 | 15 | ########################################################################## 16 | # Returns true if we are a root (key_in_parent == None) 17 | def is_root(self): 18 | return (self.key_in_parent == None) 19 | 20 | ########################################################################## 21 | # Returns true if we are a leaf (data exists in dir(self)) 22 | def is_leaf(self): 23 | return ('data') in dir(self) 24 | 25 | ########################################################################## 26 | # Peel off the next item of the key and pass the data on down recursively, 27 | # creating new children as we go, until we bottom out (len(key_list) == 0) 28 | # then insert the data into a leaf. 29 | # Make a defensive copy of the key_list, it will be modified! 30 | def insert_data(self, key_list, data): 31 | 32 | if (len(key_list) == 0): 33 | # If this is the base case, that's it: we're the leaf 34 | self.data = data 35 | else: 36 | # Peel off the key and add to the list 37 | next_key = key_list[0] 38 | 39 | key_list.remove(next_key) 40 | new_node = BNode(next_key) 41 | new_node.parent = self 42 | new_node.key_in_parent = next_key 43 | self.children[next_key] = new_node 44 | new_node.insert_data(key_list, data) 45 | 46 | ########################################################################## 47 | # Find the data corresponding to the given key list, if it exists in tree 48 | # Return the node where the data is (if node.data != None) 49 | # or where the data would be (if node.data == None) 50 | # or None if the key is not found 51 | def find_data(self, key_list): 52 | 53 | if (len(key_list) == 0): 54 | # We are the leaf, and have been found! 55 | return self 56 | 57 | # Peel off the next item in the key 58 | next_key = key_list[0] 59 | next_child = None 60 | if (next_key in self.children.keys()): 61 | next_child = self.children[next_key] 62 | key_list.remove(next_key) 63 | # Recursively find the next child 64 | next_child = next_child.find_data(key_list) 65 | 66 | return next_child 67 | 68 | ######################################################################### 69 | def __str__(self): 70 | if (self.key_in_parent != None): 71 | label = str(self.key_in_parent) 72 | else: 73 | label = "Root" 74 | string = label + ": " + str(self.children.keys()) + " " 75 | if ('data' in dir(self)): 76 | string += str(self.data) 77 | return string 78 | 79 | ######################################################################### 80 | def get(self, key): 81 | return self.children[key] 82 | 83 | ######################################################################### 84 | # Returns the next sibling to the child with the given key, if any exists 85 | # according to canonical key order. 86 | # Otherwise returns None 87 | def find_next_child(self, key): 88 | children_keys = self.children.keys() 89 | 90 | if ((key not in children_keys) or (key not in BNode.KEY_ORDER)): 91 | return None 92 | 93 | index1 = BNode.KEY_ORDER.index(key) 94 | 95 | # Initially set next_index to invalid value 96 | next_index = len(BNode.KEY_ORDER) 97 | for key2 in BNode.KEY_ORDER: 98 | if (key2 not in children_keys): 99 | continue 100 | index2 = BNode.KEY_ORDER.index(key2) 101 | if ((index2 > index1) and (index2 < next_index)): 102 | # Update the next index if we find one that's closer 103 | next_index = index2 104 | 105 | if (next_index != len(BNode.KEY_ORDER)): 106 | # If we've updated, then that means key2 must be in children_keys 107 | return self.children[BNode.KEY_ORDER[next_index]] 108 | else: 109 | return None 110 | 111 | ########################################################################## 112 | # Find the leftmost child of the given node in the canonical KEY_ORDER 113 | # Return None otherwise 114 | def find_leftmost_child(self): 115 | 116 | for key in BNode.KEY_ORDER: 117 | if (key in self.children.keys()): 118 | return self.children[key] 119 | 120 | # Otherwise, there are no children, return None 121 | return None 122 | 123 | ########################################################################## 124 | # Returns the successor in lexicographic order of the key list, 125 | # going as far back as the root 126 | def find_successor(self): 127 | 128 | current_node = self 129 | while (not current_node.is_root()): 130 | sibling = current_node.parent.find_next_child(current_node.key_in_parent) 131 | 132 | # Check if we have no sibling, if so, go up one more level 133 | if (sibling == None): 134 | current_node = current_node.parent 135 | else: 136 | break 137 | 138 | if (sibling == None): 139 | # If we can't find a next sibling, this is the lexicographically 140 | # last node 141 | return None 142 | else: 143 | # Else traverse the leftmost child of the sibling until we get to 144 | # leaf 145 | current_node = sibling 146 | while (not current_node.is_leaf()): 147 | current_node = current_node.find_leftmost_child() 148 | return current_node -------------------------------------------------------------------------------- /fowler/group_su4.py: -------------------------------------------------------------------------------- 1 | # Generate the two-qubit operator group for Fowler enumeration 2 | 3 | from skc.basic_approx.generate import * 4 | 5 | import numpy 6 | from skc.operator import * 7 | from skc.simplify import * 8 | from skc.simplify import * 9 | from skc.basis import * 10 | from skc.basic_approx import * 11 | 12 | # S matrix 13 | S_matrix = matrixify([[1, 0], [0, 1.0j]]) 14 | S = Operator("S", S_matrix) 15 | Sd_matrix = matrixify([[1, 0], [0, -1.0j]]) 16 | Sd = Operator("Sd", Sd_matrix) 17 | 18 | X1_matrix = numpy.kron(SX.matrix, I2.matrix) 19 | X2_matrix = numpy.kron(I2.matrix, SX.matrix) 20 | X1 = Operator(name="X1", matrix=X1_matrix) 21 | X2 = Operator(name="X2", matrix=X2_matrix) 22 | 23 | Z1_matrix = numpy.kron(SZ.matrix, I2.matrix) 24 | Z2_matrix = numpy.kron(I2.matrix, SZ.matrix) 25 | Z1 = Operator(name="Z1", matrix=Z1_matrix) 26 | Z2 = Operator(name="Z2", matrix=Z2_matrix) 27 | 28 | H1_matrix = numpy.kron(H.matrix, I2.matrix) 29 | H2_matrix = numpy.kron(I2.matrix, H.matrix) 30 | H1 = Operator(name="H1", matrix=H1_matrix) 31 | H2 = Operator(name="H2", matrix=H2_matrix) 32 | 33 | S1_matrix = numpy.kron(S.matrix, I2.matrix) 34 | S2_matrix = numpy.kron(I2.matrix, S.matrix) 35 | S1 = Operator(name="S1", matrix=S1_matrix) 36 | S2 = Operator(name="S2", matrix=S2_matrix) 37 | 38 | Sd1_matrix = numpy.kron(Sd.matrix, I2.matrix) 39 | Sd2_matrix = numpy.kron(I2.matrix, Sd.matrix) 40 | Sd1 = Operator(name="Sd1", matrix=Sd1_matrix) 41 | Sd2 = Operator(name="Sd2", matrix=Sd2_matrix) 42 | 43 | T1_matrix = numpy.kron(T.matrix, I2.matrix) 44 | T2_matrix = numpy.kron(I2.matrix, T.matrix) 45 | T1 = Operator(name="T1", matrix=T1_matrix) 46 | T2 = Operator(name="T2", matrix=T2_matrix) 47 | 48 | CNot12_matrix = matrixify([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]]) 49 | CNot12 = Operator(name="CNOT12", matrix=CNot12_matrix) 50 | 51 | CNot21_matrix = matrixify([[1,0,0,0],[0,0,0,1],[0,0,1,0],[0,1,0,0]]) 52 | CNot21 = Operator(name="CNOT12", matrix=CNot21_matrix) 53 | 54 | gset = [X1, X2, Z1, Z2, H1, H2, S1, S2, Sd1, Sd2, CNot12, CNot21] 55 | 56 | ############################################################################## 57 | # Hermitian basis 58 | 59 | H4 = get_hermitian_basis(d=4) 60 | 61 | print "BASIS H2" 62 | for (k,v) in H4.items_minus_identity(): 63 | print str(k) + " => " + str(v.matrix) 64 | 65 | ############################################################################## 66 | # Simplifying rules 67 | identity_rule = IdentityRule(H4.identity.name) 68 | double_H1_rule = DoubleIdentityRule(symbol='H1', id_sym=H4.identity.name) 69 | double_H2_rule = DoubleIdentityRule(symbol='H2', id_sym=H4.identity.name) 70 | double_CN_rule = DoubleIdentityRule(symbol='CN', id_sym=H4.identity.name) 71 | 72 | # H1 and H2 commute with each other 73 | H_rules = [] 74 | h1h2h1_rule = GeneralRule(['H1', 'H2', 'H1'], 'H2') 75 | h2h1h2_rule = GeneralRule(['H2', 'H1', 'H2'], 'H1') 76 | 77 | H_rules.append(h1h2h1_rule) 78 | H_rules.append(h2h1h2_rule) 79 | 80 | # Leave out T rules for now, since T is not in our group 81 | 82 | adjoint_rule = AdjointRule(id_sym=H4.identity.name) 83 | 84 | simplify_rules = [ 85 | identity_rule, 86 | double_H1_rule, 87 | double_H2_rule, 88 | double_CN_rule, 89 | adjoint_rule 90 | ] 91 | simplify_rules.extend(H_rules) 92 | 93 | ############################################################################## 94 | # Prepare settings 95 | set_filename_prefix("pickles/fowler_su4/gen") 96 | 97 | settings = BasicApproxSettings() 98 | settings.set_iset(gset) 99 | settings.init_simplify_engine(simplify_rules) 100 | settings.set_identity(H4.identity) 101 | settings.basis = H4 102 | 103 | ############################################################################## 104 | # Do it: kablooey! 105 | generate_approxes(3, settings) 106 | -------------------------------------------------------------------------------- /fowler/read_group.py: -------------------------------------------------------------------------------- 1 | from skc.basic_approx.file import * 2 | from skc.utils import * 3 | 4 | # Read in enumerated, potential group members, and check for uniqueness 5 | def read_and_simplify(l_0): 6 | sequences = [] 7 | # Start numbering gates from 1, since identity is 0 8 | i = 1 9 | 10 | for generation_num in range(1,l_0+1): 11 | filename_pattern = filename_prefix + "-g" + str(generation_num) \ 12 | + "*.pickle" 13 | print str(filename_pattern) 14 | 15 | filenames = glob.glob(filename_pattern) 16 | if (len(filenames) == 0): 17 | raise RuntimeError("No files found for generation " + str(generation_num)) 18 | for filename in filenames: 19 | new_sequences = read_from_file(filename) 20 | 21 | print "Generation " + str(generation_num) + ":" 22 | print str(len(new_sequences)) + " read" 23 | 24 | unique_sequences = [] 25 | 26 | for new_op in new_sequences: 27 | print str(new_op) 28 | found = False 29 | for op in sequences: 30 | dist = fowler_distance(new_op.matrix, op.matrix) 31 | if (dist < TOLERANCE10): 32 | print "Non-unique sequence found: " + str(new_op) + " *** " + str(op) 33 | found = True 34 | break # This is the important part! 35 | #else: 36 | # print "dist("+str(new_op)+","+str(op)+")=" + str(dist) 37 | 38 | # Update the sequences for the next iteration 39 | if (not found): 40 | new_op.name = "G" + str(i) 41 | i += 1 42 | sequences.append(new_op) 43 | 44 | print str(len(sequences)) 45 | 46 | for sequence in sequences: 47 | print str(sequence) 48 | 49 | # Write out the final group to a file 50 | dump_to_file(sequences, "final-group-"+str(l_0)) 51 | -------------------------------------------------------------------------------- /fowler/read_group_su2.py: -------------------------------------------------------------------------------- 1 | from fowler.settings_su2 import * 2 | from fowler.read_group import * 3 | 4 | read_and_simplify(l_0 = 4) 5 | -------------------------------------------------------------------------------- /fowler/read_group_su4.py: -------------------------------------------------------------------------------- 1 | from fowler.settings_su4 import * 2 | from fowler.read_group import * 3 | 4 | read_and_simplify(l_0 = 5) 5 | -------------------------------------------------------------------------------- /fowler/settings_su2.py: -------------------------------------------------------------------------------- 1 | # Generate the single-qubit Fowler group 2 | 3 | from skc.basic_approx.generate import * 4 | 5 | import numpy 6 | from skc.operator import * 7 | from skc.simplify import * 8 | from skc.simplify import * 9 | from skc.basis import * 10 | from skc.basic_approx import * 11 | 12 | # S matrix 13 | S_matrix = matrixify([[1, 0], [0, 1.0j]]) 14 | S = Operator("S", S_matrix) 15 | Sd_matrix = matrixify([[1, 0], [0, -1.0j]]) 16 | Sd = Operator("Sd", Sd_matrix) 17 | 18 | gset = [H, SX, SZ, S, Sd] 19 | 20 | ############################################################################## 21 | # Hermitian basis 22 | 23 | H2 = get_hermitian_basis(d=2) 24 | 25 | print "BASIS H2" 26 | for (k,v) in H2.items_minus_identity(): 27 | print str(k) + " => " + str(v.matrix) 28 | 29 | ############################################################################## 30 | # Simplifying rules 31 | identity_rule = IdentityRule(H2.identity.name) 32 | 33 | double_rules = [] 34 | 35 | for gate in gset: 36 | new_double_rule = DoubleIdentityRule(symbol=gate.name, id_sym=H2.identity.name) 37 | double_rules.append(new_double_rule) 38 | 39 | adjoint_rule = AdjointRule(id_sym=H2.identity.name) 40 | 41 | simplify_rules = [ 42 | identity_rule, 43 | adjoint_rule 44 | ] 45 | #simplify_rules.extend(double_rules) 46 | 47 | ############################################################################## 48 | # Prepare settings 49 | set_filename_prefix("pickles/fowler_su2/gen") 50 | 51 | settings = BasicApproxSettings() 52 | settings.set_iset(gset) 53 | settings.init_simplify_engine(simplify_rules) 54 | settings.set_identity(H2.identity) 55 | settings.basis = H2 56 | #settings.custom_rules_func = None 57 | -------------------------------------------------------------------------------- /fowler/settings_su4.py: -------------------------------------------------------------------------------- 1 | # Generate the two-qubit operator group for Fowler enumeration 2 | 3 | from skc.basic_approx.generate import * 4 | 5 | import numpy 6 | from skc.operator import * 7 | from skc.simplify import * 8 | from skc.simplify import * 9 | from skc.basis import * 10 | from skc.basic_approx import * 11 | 12 | # S matrix 13 | S_matrix = matrixify([[1, 0], [0, 1.0j]]) 14 | S = Operator("S", S_matrix) 15 | Sd_matrix = matrixify([[1, 0], [0, -1.0j]]) 16 | Sd = Operator("Sd", Sd_matrix) 17 | 18 | X1_matrix = numpy.kron(SX.matrix, I2.matrix) 19 | X2_matrix = numpy.kron(I2.matrix, SX.matrix) 20 | X1 = Operator(name="X1", matrix=X1_matrix) 21 | X2 = Operator(name="X2", matrix=X2_matrix) 22 | 23 | Z1_matrix = numpy.kron(SZ.matrix, I2.matrix) 24 | Z2_matrix = numpy.kron(I2.matrix, SZ.matrix) 25 | Z1 = Operator(name="Z1", matrix=Z1_matrix) 26 | Z2 = Operator(name="Z2", matrix=Z2_matrix) 27 | 28 | H1_matrix = numpy.kron(H.matrix, I2.matrix) 29 | H2_matrix = numpy.kron(I2.matrix, H.matrix) 30 | H1 = Operator(name="H1", matrix=H1_matrix) 31 | H2 = Operator(name="H2", matrix=H2_matrix) 32 | 33 | S1_matrix = numpy.kron(S.matrix, I2.matrix) 34 | S2_matrix = numpy.kron(I2.matrix, S.matrix) 35 | S1 = Operator(name="S1", matrix=S1_matrix) 36 | S2 = Operator(name="S2", matrix=S2_matrix) 37 | 38 | Sd1_matrix = numpy.kron(Sd.matrix, I2.matrix) 39 | Sd2_matrix = numpy.kron(I2.matrix, Sd.matrix) 40 | Sd1 = Operator(name="Sd1", matrix=Sd1_matrix) 41 | Sd2 = Operator(name="Sd2", matrix=Sd2_matrix) 42 | 43 | T1_matrix = numpy.kron(T.matrix, I2.matrix) 44 | T2_matrix = numpy.kron(I2.matrix, T.matrix) 45 | T1 = Operator(name="T1", matrix=T1_matrix) 46 | T2 = Operator(name="T2", matrix=T2_matrix) 47 | 48 | CNot12_matrix = matrixify([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]]) 49 | CNot12 = Operator(name="CNOT12", matrix=CNot12_matrix) 50 | 51 | CNot21_matrix = matrixify([[1,0,0,0],[0,0,0,1],[0,0,1,0],[0,1,0,0]]) 52 | CNot21 = Operator(name="CNOT21", matrix=CNot21_matrix) 53 | 54 | gset = [X1, X2, Z1, Z2, H1, H2, S1, S2, Sd1, Sd2, CNot12, CNot21] 55 | 56 | ############################################################################## 57 | # Hermitian basis 58 | 59 | H4 = get_hermitian_basis(d=4) 60 | 61 | print "BASIS H2" 62 | for (k,v) in H4.items_minus_identity(): 63 | print str(k) + " => " + str(v.matrix) 64 | 65 | ############################################################################## 66 | # Simplifying rules 67 | identity_rule = IdentityRule(H4.identity.name) 68 | 69 | double_rules = [] 70 | 71 | for gate in gset: 72 | new_double_rule = DoubleIdentityRule(symbol=gate.name, id_sym=H4.identity.name) 73 | double_rules.append(new_double_rule) 74 | 75 | # H1 and H2 commute with each other 76 | H_rules = [] 77 | h1h2h1_rule = GeneralRule(['H1', 'H2', 'H1'], 'H2') 78 | h2h1h2_rule = GeneralRule(['H2', 'H1', 'H2'], 'H1') 79 | 80 | H_rules.append(h1h2h1_rule) 81 | H_rules.append(h2h1h2_rule) 82 | 83 | # Leave out T rules for now, since T is not in our group 84 | 85 | adjoint_rule = AdjointRule(id_sym=H4.identity.name) 86 | 87 | simplify_rules = [ 88 | identity_rule, 89 | adjoint_rule 90 | ] 91 | simplify_rules.extend(H_rules) 92 | simplify_rules.extend(double_rules) 93 | 94 | ############################################################################## 95 | # Custom enumeration rules 96 | def custom_enumeration_rules(operator): 97 | # Reorder operators which commute 98 | if (len(operator.ancestors) == 2): 99 | ancestor1 = operator.ancestors[0] 100 | ancestor2 = operator.ancestors[1] 101 | len_ancestor1 = len(ancestor1) 102 | len_ancestor2 = len(ancestor2) 103 | 104 | # Don't do this commutation check for CNOT gates 105 | if ((len_ancestor1 > 4) or (len_ancestor2 > 4)): 106 | return False 107 | 108 | if ((ancestor1[len_ancestor1-1] == '2') and 109 | (ancestor2[len_ancestor2-1] == '1')): 110 | new_op = Operator('', operator.matrix); 111 | # Swap the ancestors 112 | new_op.ancestors = [ancestor2, ancestor1] 113 | # Verify that the canonical order is already in the list 114 | # assert(new_op in new_sequences) 115 | print "Eliminating the commuted sequence: " + str(operator) 116 | return True 117 | return False 118 | 119 | ############################################################################## 120 | # Prepare settings 121 | set_filename_prefix("pickles/fowler_su4/gen") 122 | 123 | settings = BasicApproxSettings() 124 | settings.set_iset(gset) 125 | settings.init_simplify_engine(simplify_rules) 126 | settings.set_identity(H4.identity) 127 | settings.basis = H4 128 | settings.custom_rules_func = custom_enumeration_rules 129 | -------------------------------------------------------------------------------- /fowler/write_group_su2.py: -------------------------------------------------------------------------------- 1 | from fowler.settings_su2 import * 2 | 3 | ############################################################################## 4 | # Do it: kablooey! 5 | generate_approxes(4, settings) 6 | -------------------------------------------------------------------------------- /fowler/write_group_su4.py: -------------------------------------------------------------------------------- 1 | from fowler.settings_su4 import * 2 | 3 | ############################################################################## 4 | # Do it: kablooey! 5 | generate_approxes(3, settings) 6 | -------------------------------------------------------------------------------- /manage/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/learner-long-life/skc-python/8a023f82ab99eeb25214f490bc4e50eda1723160/manage/__init__.py -------------------------------------------------------------------------------- /manage/dump_sequences.py: -------------------------------------------------------------------------------- 1 | # Read sequences from a pickle file and print them out 2 | 3 | from skc.basic_approx.file import * 4 | import sys 5 | 6 | if (len(sys.argv) < 2): 7 | print "Specify a pickle file to dump." 8 | sys.exit() 9 | 10 | sequences = read_from_file(sys.argv[1]) 11 | 12 | for sequence in sequences: 13 | print str(sequence) 14 | -------------------------------------------------------------------------------- /manage/generate_su2.py: -------------------------------------------------------------------------------- 1 | from skc.basic_approx.generate import * 2 | 3 | from skc.operator import * 4 | from skc.simplify import * 5 | from skc.basic_approx import * 6 | from skc.basis import * 7 | 8 | import numpy 9 | 10 | iset2 = [H, T, T_inv] 11 | 12 | for insn in iset2: 13 | print str(insn) 14 | 15 | # Simplifying rules 16 | identity_rule = IdentityRule() 17 | double_H_rule = DoubleIdentityRule('H') 18 | adjoint_rule = AdjointRule() 19 | T8_rule = GeneralRule(['T','T','T','T','T','T','T','T'], 'I') 20 | Td8_rule = GeneralRule(['Td','Td','Td','Td','Td','Td','Td','Td'], 'I') 21 | # We should also add a rule for 8T gates -> I 22 | 23 | simplify_rules = [ 24 | identity_rule, 25 | double_H_rule, 26 | adjoint_rule, 27 | T8_rule, 28 | Td8_rule 29 | ] 30 | #simplify_rules = [] 31 | 32 | H2 = get_hermitian_basis(d=2) 33 | 34 | print "BASIS H2" 35 | for (k,v) in H2.items_minus_identity(): 36 | print str(k) + " => " + str(v.matrix) 37 | 38 | set_filename_prefix("pickles/su2/gen") 39 | 40 | settings = BasicApproxSettings() 41 | settings.set_iset(iset2) 42 | settings.init_simplify_engine(simplify_rules) 43 | settings.set_identity(I2) 44 | settings.basis = H2 45 | 46 | generate_approxes(16, settings) 47 | -------------------------------------------------------------------------------- /manage/generate_su4.py: -------------------------------------------------------------------------------- 1 | from manage.settings_su4 import * 2 | 3 | ############################################################################## 4 | # Do it: kablooey! 5 | generate_approxes(6, settings) 6 | -------------------------------------------------------------------------------- /manage/settings_su4.py: -------------------------------------------------------------------------------- 1 | from skc.basic_approx.generate import * 2 | 3 | from skc.operator import * 4 | from skc.simplify import * 5 | from skc.basis import * 6 | from skc.basic_approx import * 7 | 8 | import numpy 9 | 10 | ############################################################################## 11 | # Instruction set 12 | H1_matrix = numpy.kron(H.matrix, I2.matrix) 13 | H2_matrix = numpy.kron(I2.matrix, H.matrix) 14 | H1 = Operator(name="H1", matrix=H1_matrix) 15 | H2 = Operator(name="H2", matrix=H2_matrix) 16 | 17 | T1_matrix = numpy.kron(T.matrix, I2.matrix) 18 | T2_matrix = numpy.kron(I2.matrix, T.matrix) 19 | T1 = Operator(name="T1", matrix=T1_matrix) 20 | T2 = Operator(name="T2", matrix=T2_matrix) 21 | 22 | Tinv1_matrix = numpy.kron(T_inv.matrix, I2.matrix) 23 | Tinv2_matrix = numpy.kron(I2.matrix, T_inv.matrix) 24 | Tinv1 = Operator(name="T1d", matrix=Tinv1_matrix) 25 | Tinv2 = Operator(name="T2d", matrix=Tinv2_matrix) 26 | 27 | CNot_matrix = matrixify([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]]) 28 | CNot = Operator(name="CN", matrix=CNot_matrix) 29 | # CNot is its own inverse, so we don't need to add a dagger for it 30 | 31 | iset4 = [H1, H2, T1, T2, Tinv1, Tinv2, CNot] 32 | 33 | print "ISET SU(4)" 34 | 35 | for insn in iset4: 36 | print str(insn) 37 | 38 | ############################################################################## 39 | # Hermitian basis 40 | 41 | H4 = get_hermitian_basis(d=4) 42 | 43 | print "BASIS H2" 44 | for (k,v) in H4.items_minus_identity(): 45 | print str(k) + " => " + str(v.matrix) 46 | 47 | ############################################################################## 48 | # Simplifying rules 49 | identity_rule = IdentityRule(H4.identity.name) 50 | double_H1_rule = DoubleIdentityRule(symbol='H1', id_sym=H4.identity.name) 51 | double_H2_rule = DoubleIdentityRule(symbol='H2', id_sym=H4.identity.name) 52 | double_CN_rule = DoubleIdentityRule(symbol='CN', id_sym=H4.identity.name) 53 | 54 | # H1 and H2 commute with a bunch of stuff, generate rules for them here 55 | 56 | # First create a list of just iset labels, and don't include CNot 57 | H_rule_iset = [x.name for x in iset4] 58 | H_rule_iset.remove ('CN') 59 | 60 | H_rules = [] 61 | for arg1 in ['H1','H2']: 62 | # defensive copy 63 | iset = list(H_rule_iset) 64 | # remove the current arg 65 | iset.remove(arg1) 66 | for arg2 in iset: 67 | new_rule = GeneralRule([arg1, arg2, arg1], arg2) 68 | print str(new_rule) 69 | H_rules.append(new_rule) 70 | 71 | # Similarly, T1 commutes with T2 and H2 and likewise 72 | # T2 commutes with T1 and H1 73 | 74 | T_rules = [] 75 | 76 | T1_set = ['T1','T1d'] 77 | for arg1 in T1_set: 78 | T1_setd = list(T1_set) 79 | T1_setd.remove(arg1) 80 | arg1d = T1_setd[0] 81 | for arg2 in ['T2','T2d','H2']: 82 | new_rule = GeneralRule([arg1, arg2, arg1d], arg2) 83 | print str(new_rule) 84 | T_rules.append(new_rule) 85 | 86 | T2_set = ['T2','T2d'] 87 | for arg1 in T2_set: 88 | T2_setd = list(T2_set) 89 | T2_setd.remove(arg1) 90 | arg1d = T2_setd[0] 91 | for arg2 in ['T1','T1d','H1']: 92 | new_rule = GeneralRule([arg1, arg2, arg1d], arg2) 93 | print str(new_rule) 94 | T_rules.append(new_rule) 95 | 96 | adjoint_rule = AdjointRule(id_sym=H4.identity.name) 97 | 98 | simplify_rules = [ 99 | identity_rule, 100 | double_H1_rule, 101 | double_H2_rule, 102 | double_CN_rule, 103 | adjoint_rule 104 | ] 105 | simplify_rules.extend(H_rules) 106 | simplify_rules.extend(T_rules) 107 | #simplify_rules = [] 108 | 109 | ############################################################################## 110 | # Prepare settings 111 | set_filename_prefix("pickles/su4/gen") 112 | 113 | settings = BasicApproxSettings() 114 | settings.set_iset(iset4) 115 | settings.init_simplify_engine(simplify_rules) 116 | settings.set_identity(H4.identity) 117 | settings.basis = H4 118 | -------------------------------------------------------------------------------- /releases/git-clone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | git clone git@github.com:ppham/skc-python.git 3 | -------------------------------------------------------------------------------- /scratch/__init__.py: -------------------------------------------------------------------------------- 1 | # Empty __init__ file so we can import the "scratch" pseudo module 2 | # for inter-dependency testing -------------------------------------------------------------------------------- /scratch/controlled_rk_su4.py: -------------------------------------------------------------------------------- 1 | from skc.operator import * 2 | from skc.dawson import * 3 | from skc.group_factor import * 4 | from skc.compose import * 5 | from skc.basis import * 6 | import math 7 | import time 8 | 9 | H4 = get_hermitian_basis(d=4) 10 | theta = math.pi / 4 # 45 degrees 11 | 12 | axis = pick_random_axis(H4) 13 | # Compose a unitary to compile 14 | matrix_U = matrixify([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,numpy.exp(1j * math.pi / 128)]]) 15 | op_U = Operator(name="U", matrix=matrix_U) 16 | 17 | n = 4 18 | print "U= " + str(matrix_U) 19 | print "n= " + str(n) 20 | 21 | # Prepare the compiler 22 | sk_set_factor_method(aram_diagonal_factor) 23 | sk_set_basis(H4) 24 | # We don't need this for Aram's factoring method 25 | #sk_set_axis(X_AXIS) 26 | sk_build_tree("su4", 6) 27 | 28 | begin_time = time.time() 29 | 30 | Un = solovay_kitaev(op_U, n) 31 | 32 | compile_time = time.time() - begin_time 33 | 34 | print "Compile Time: " + str(compile_time) 35 | print "Approximated U: " + str(Un) 36 | 37 | print "Un= " + str(Un.matrix) 38 | print "len(Un)= " + str(len(Un.ancestors)) 39 | 40 | print "trace_dist(U,Un)= " + str(trace_distance(Un.matrix, op_U.matrix)) 41 | print "fowler_dist(U,Un)= " + str(fowler_distance(Un.matrix, op_U.matrix)) 42 | -------------------------------------------------------------------------------- /scratch/n_from_epsilon.py: -------------------------------------------------------------------------------- 1 | from skc.utils import * 2 | import math 3 | 4 | c_approx = 4*math.sqrt(2) 5 | eps_0 = 1.0 / 64 6 | 7 | print "c_approx= " + str(c_approx) 8 | print "eps_0= " + str(eps_0) 9 | 10 | for i in range(1,100): 11 | eps = 1.0 / (2**i) 12 | n =n_from_eps(eps, eps_0, c_approx) 13 | print "n= " + str(n) 14 | print "eps= " + str(eps) 15 | 16 | -------------------------------------------------------------------------------- /scratch/scratch_180_decompose.py: -------------------------------------------------------------------------------- 1 | # Scratch file to test Kitaev's decomposition of an arbitrary unitary into 2 | # two 180 degree rotations (see Figure S8.2b in his book) 3 | 4 | from skc.decompose import * 5 | from skc.compose import * 6 | from skc.basis import * 7 | from skc.hypersphere import * 8 | from skc.utils import * 9 | 10 | import numpy 11 | import scipy.linalg 12 | 13 | B2 = get_hermitian_basis(d=2) 14 | 15 | (matrix_U, axis_U, angle) = get_random_unitary(B2) 16 | print "U= " + str(matrix_U) 17 | print "axis_U= " + str(axis_U) 18 | print "angle= " + str(angle) 19 | 20 | axis_U_array = B2.sort_canonical_order(axis_U) 21 | print "axis_U_array= " + str(axis_U_array) 22 | 23 | hsphere_coords = unitary_to_hspherical(matrix_U, B2) 24 | axis_U_theta = hsphere_coords[1] 25 | axis_U_phi = hsphere_coords[2] 26 | 27 | axis_U_z = numpy.cos(axis_U_theta) 28 | axis_U_y = numpy.sin(axis_U_theta) * numpy.cos(axis_U_phi) 29 | axis_U_x = numpy.sin(axis_U_theta) * numpy.sin(axis_U_phi) 30 | axis_U_array2 = [axis_U_x, axis_U_y, axis_U_z] 31 | 32 | print "axis_U_array2= " + str(axis_U_array2) 33 | 34 | axis_A_theta = (axis_U_theta + PI_HALF) % PI 35 | axis_A_phi = axis_U_phi 36 | 37 | print "hsphere= " + str(hsphere_coords) 38 | 39 | axis_A_z = numpy.cos(axis_A_theta) 40 | axis_A_y = numpy.sin(axis_A_theta) * numpy.cos(axis_A_phi) 41 | axis_A_x = numpy.sin(axis_A_theta) * numpy.sin(axis_A_phi) 42 | axis_A_array = [axis_A_x, axis_A_y, axis_A_z] 43 | 44 | print "axis_A_array= " + str(axis_A_array) 45 | 46 | # Take inner product to verify axis_A and axis_U are orthogonal 47 | inner_A_U = numpy.inner(axis_U_array2, axis_A_array) 48 | 49 | print "inner(axis_A, axis_U)= " + str(inner_A_U) 50 | 51 | # Axis to rotate about to get from \hat{u} to \hat{a} (cross product) 52 | axis_A1_array = numpy.cross(axis_U_array, axis_A_array) 53 | 54 | print "axis_A1_array= " + str(axis_A1_array) 55 | 56 | # Take more inner products to verify orthogonality 57 | inner_A_A1 = numpy.inner(axis_A_array, axis_A1_array) 58 | inner_U_A1 = numpy.inner(axis_U_array, axis_A1_array) 59 | print "inner(axis_A, axis_A1)= " + str(inner_A_A1) 60 | print "inner(axis_U, axis_A1)= " + str(inner_U_A1) 61 | 62 | norm_A1 = scipy.linalg.norm(axis_A1_array) 63 | axis_A1_array = axis_A1_array / norm_A1 64 | axis_A1 = B2.unsort_canonical_order(axis_A1_array) 65 | 66 | # Matrix A is the rotation of 90 degrees from \hat{u} about \hat{a1} 67 | matrix_A1 = axis_to_unitary(axis_A1, PI/2.0, B2) 68 | 69 | # Conjugate about a Pauli X rotation 70 | matrix_A = matrix_A1 * SX.matrix * matrix_A1.H 71 | 72 | # Add PI_HALF to the last hsphere coordinates to get orthogonal axis 73 | # Also, we want the first coord (the unitary rotation angle) to be 180 degrees 74 | # when we convert back below 75 | #hsphere_A = [PI, hsphere_coords[1], (hsphere_coords[2] + PI_HALF) % PI] 76 | #matrix_A = hspherical_to_unitary(hsphere_A, B2) 77 | #print "A= " + str(matrix_A) 78 | 79 | matrix_X = SX.matrix 80 | 81 | # Construct the rotation of 90 degrees about the x-axis (arbitrary choice) 82 | # For some reason Kitaev just uses the Pauli X matrix. 83 | #axis_A = B2.unsort_canonical_order(axis_A_array) 84 | #matrix_A1 = axis_to_unitary(axis_A, PI/4.0, B2) 85 | #matrix_A = matrix_A1 * matrix_X * matrix_A1.H 86 | #print "A= " + str(matrix_A) 87 | #print "Adag= " + str(matrix_A.H) 88 | 89 | #(axis_A, K_A, hermitian_A) = unitary_to_axis(matrix_A, B2) 90 | #print "axis_A= " + str(axis_A) 91 | #print "angle_A= " + str(K_A/2.0) 92 | 93 | #matrix_B1 = axis_to_unitary(axis_U, angle/2.0, B2) 94 | #matrix_B = matrix_B1 * matrix_B * matrix_B1 95 | 96 | #matrix_B = axis_to_unitary(axis_U, angle/2.0, B2) 97 | #print "B= " + str 98 | # Add PI_HALF to the last hsphere coordinates to get orthogonal axis 99 | # Also, we want the first coord (the unitary rotation angle) to be 180 degrees 100 | # when we convert back below 101 | #hsphere_A = [PI, hsphere_coords[1], (hsphere_coords[2] + PI_HALF) % PI] 102 | 103 | # Here we go 104 | #matrix_A = axis_to_unitary(X_AXIS, hspherical_to_unitary(hsphere_A, B2) 105 | #print "A= " + str(matrix_A) 106 | 107 | # And now rotate this rotation by conjugating it with angle/2 about the 108 | # original unitary axis 109 | matrix_B2 = axis_to_unitary(axis_U, angle/2.0, B2) 110 | matrix_B1 = matrix_B2 * matrix_A1 * matrix_B2.H 111 | matrix_B = matrix_B2 * matrix_A * matrix_B2.H 112 | 113 | (axis_B, K_B, hermitian_B) = unitary_to_axis(matrix_B, B2) 114 | print "axis_B= " + str(axis_B) 115 | 116 | axis_B_array = B2.sort_canonical_order(axis_B) 117 | 118 | inner_U_B = numpy.inner(axis_U_array, axis_B_array) 119 | print "inner(axis_U, axis_B)= " + str(inner_U_B) 120 | 121 | overlap_A_B = numpy.dot(axis_B_array, axis_A_array) 122 | angle_A_B = math.acos(overlap_A_B) 123 | print "angle_A_B= " + str(angle_A_B) 124 | 125 | matrix_U2 = matrix_A * matrix_B 126 | 127 | print "U2= " + str(matrix_U2) 128 | 129 | dist = fowler_distance(matrix_U, matrix_U2) 130 | print "dist(U,U2)= " + str(dist) 131 | 132 | matrix_I = matrix_A * matrix_A.H * matrix_B * matrix_B.H 133 | dist = fowler_distance(I2.matrix, matrix_I) 134 | print "dist(I,I2)= " + str(dist) 135 | 136 | 137 | #matrix_B = matrix_C * matrix_A * matrix_C.H 138 | #print "B= " + str(matrix_B) 139 | 140 | # Now compose the two 180 degree rotations, and hopefully we get back U! 141 | #matrix_U2 = matrix_A 142 | -------------------------------------------------------------------------------- /scratch/scratch_btree.py: -------------------------------------------------------------------------------- 1 | from fowler.btree import * 2 | from skc.operator import * 3 | 4 | key_list1 = ['H1', 'X1', 'Z2', 'H2'] 5 | key_list2 = ['S2d', 'Z1', 'X2', 'S1d'] 6 | 7 | BNode.KEY_ORDER = ['X1', 'X2', 'Z1', 'Z2', 'H1', 'H2', 'S1', 'S2', 'S1d', 'S2d', 8 | 'CNOT12', 'CNOT21'] 9 | 10 | btree_root = BNode() 11 | 12 | ############################################################################## 13 | # Insert the first key list and then trace it down the tree 14 | print "*** INSERT FIRST KEY ***" 15 | 16 | btree_root.insert_data(list(key_list1), I2) 17 | 18 | print str(btree_root) 19 | 20 | child1 = btree_root.get('H1') 21 | 22 | print str(child1) 23 | 24 | child2 = child1.get('X1') 25 | 26 | print str(child2) 27 | 28 | child3 = child2.get('Z2') 29 | 30 | print str(child3) 31 | 32 | child4 = child3.get('H2') 33 | 34 | print str(child4) 35 | 36 | ############################################################################## 37 | # Insert the second key list and then trace it down the tree 38 | print "*** INSERT SECOND KEY ***" 39 | 40 | btree_root.insert_data(list(key_list2), SZ) 41 | 42 | print str(btree_root) 43 | 44 | child1 = btree_root.get('S2d') 45 | 46 | print str(child1) 47 | 48 | child2 = child1.get('Z1') 49 | 50 | print str(child2) 51 | 52 | child3 = child2.get('X2') 53 | 54 | print str(child3) 55 | 56 | child4 = child3.get('S1d') 57 | 58 | print str(child4) 59 | 60 | ############################################################################## 61 | # Search for the next child at the top level 62 | print "*** SEARCH FOR NEXT CHILD ***" 63 | 64 | next_child = btree_root.find_next_child('H1') 65 | 66 | print str(next_child) 67 | 68 | ############################################################################## 69 | # Find first inserted node 70 | print "*** FIND KEY 1 ***" 71 | 72 | first_leaf = btree_root.find_data(key_list1) 73 | 74 | print str(first_leaf) 75 | 76 | ############################################################################## 77 | # Find second inserted node 78 | print "*** FIND KEY 2 ***" 79 | 80 | second_leaf = btree_root.find_data(key_list2) 81 | 82 | print str(second_leaf) 83 | 84 | ############################################################################## 85 | # Find leftmost child 86 | print "*** FIND LEFTMOST CHILD ***" 87 | 88 | left1 = btree_root.find_leftmost_child() 89 | 90 | print str(left1) 91 | 92 | left2 = child3.find_leftmost_child() 93 | 94 | print str(left2) 95 | 96 | ############################################################################## 97 | # Find successor 98 | print "*** FIND SUCCESSOR ***" 99 | 100 | successor = first_leaf.find_successor() 101 | 102 | print str(successor) 103 | -------------------------------------------------------------------------------- /scratch/scratch_controlled_rz_decompose.py: -------------------------------------------------------------------------------- 1 | # Scratch file to test Kitaev's decomposition of an arbitrary unitary into 2 | # two 180 degree rotations (see Figure S8.2b in his book) 3 | 4 | from skc.operator import * 5 | from skc.utils import * 6 | 7 | import numpy 8 | import scipy.linalg 9 | 10 | pi8_matrix = matrixify([[1, 0], [0, numpy.exp(numpy.pi*(1.0j/8.0))]]) 11 | 12 | pi8_op = Operator("pi8", pi8_matrix) 13 | pi8d_op = pi8_op.dagger() 14 | 15 | A_matrix = tensor_chain([pi8_op.matrix, I2.matrix]) 16 | C_matrix = tensor_chain([pi8d_op.matrix, pi8_op.matrix]) 17 | 18 | A_op = Operator("A", A_matrix) 19 | C_op = Operator("C", C_matrix) 20 | 21 | print "A_op= " + str(A_op.matrix) 22 | print "C_op= " + str(C_op.matrix) 23 | 24 | CNot12_matrix = matrixify([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]]) 25 | CNot12 = Operator(name="CNOT12", matrix=CNot12_matrix) 26 | 27 | D_op = A_op.multiply(CNot12) 28 | E_op = D_op.multiply(C_op) 29 | F_op = E_op.multiply(CNot12) 30 | 31 | print "D_op= " + str(D_op.matrix) 32 | print "E_op= " + str(E_op.matrix) 33 | print "F_op= " + str(F_op.matrix) -------------------------------------------------------------------------------- /scratch/scratch_decompose_single.py: -------------------------------------------------------------------------------- 1 | from skc.basis import * 2 | from skc.compose import * 3 | from skc.decompose import * 4 | from skc.trig import * 5 | from skc.rotate import * 6 | from skc.operator import * 7 | 8 | import math 9 | 10 | B2 = get_hermitian_basis(d=2) 11 | 12 | (matrix_U, axis_U, angle_U) = get_random_unitary(B2, angle_lower=0, angle_upper=PI_HALF) 13 | print "U= " + str(matrix_U) 14 | print "axis_U= " + str(axis_U) 15 | print "angle_U= " + str(angle_U) 16 | 17 | a = matrix_U[(0,0)] 18 | b = matrix_U[(0,1)] 19 | c = matrix_U[(1,0)] 20 | d = matrix_U[(1,1)] 21 | 22 | mag_A = numpy.abs(a) 23 | mag_B = numpy.abs(b) 24 | mag_C = numpy.abs(c) 25 | mag_D = numpy.abs(d) 26 | 27 | # We recover angles alpha, beta, and gamma using Kitaev's terminology from 28 | # KSV p. 205 and p. 97 of my own notes 29 | 30 | # Verify that all the beta's 31 | beta_A = math.acos(mag_A)*2 32 | beta_B = math.asin(mag_B)*2 33 | beta_C = math.asin(mag_C)*2 34 | beta_D = math.acos(mag_D)*2 35 | 36 | # Make sure that these recovered angles are consistent 37 | assert_approx_equals(beta_A, beta_B) 38 | assert_approx_equals(beta_B, beta_C) 39 | assert_approx_equals(beta_C, beta_D) 40 | beta = beta_A 41 | print "beta= " + str(beta) 42 | 43 | phase_A = a / mag_A 44 | phase_B = b / mag_B 45 | phase_C = c / mag_C 46 | phase_D = d / mag_D 47 | 48 | angle_A = recover_angle(phase_A.real, phase_A.imag) 49 | angle_D = recover_angle(phase_D.real, phase_D.imag) 50 | angle_C = recover_angle(phase_C.real, phase_C.imag) 51 | angle_B = recover_angle(phase_B.real, phase_B.imag) 52 | 53 | alpha = angle_B - angle_A - THREE_PI_HALF 54 | delta = angle_C - angle_A - THREE_PI_HALF 55 | print "alpha= " + str(alpha) 56 | print "delta= " + str(delta) 57 | 58 | phi_A = angle_A + (alpha/2) + (delta/2) 59 | phi_B = angle_D - (alpha/2) - (delta/2) 60 | 61 | print "phi_A= " + str(phi_A) 62 | print "phi_B= " + str(phi_B) 63 | assert_approx_equals(phi_A, 0) 64 | assert_approx_equals(phi_B, TWO_PI) 65 | phi = 0 66 | 67 | # Reconstruct the matrix manually 68 | def reconstruct_from_angle_mag(angle, mag): 69 | phase = numpy.exp(1j*angle) 70 | return mag * phase 71 | 72 | a = reconstruct_from_angle_mag(-alpha/2.0 - delta/2.0 + phi, math.cos(beta/2.0)) 73 | b = reconstruct_from_angle_mag(alpha/2.0 - delta/2.0 + THREE_PI_HALF + phi, math.sin(beta/2.0)) 74 | c = reconstruct_from_angle_mag(-alpha/2.0 + delta/2.0 + THREE_PI_HALF + phi, math.sin(beta/2.0)) 75 | d = reconstruct_from_angle_mag(alpha/2.0 + delta/2.0 + phi, math.cos(beta/2.0)) 76 | 77 | matrix_U3 = matrixify([[a,b],[c,d]]) 78 | print "matrix_U3= " + str(matrix_U3) 79 | dist = fowler_distance(matrix_U3, matrix_U) 80 | print "dist(U,U2)= " + str(dist) 81 | assert_approx_equals(0, dist) 82 | 83 | # Now let's recompose the matrix from Euler angle decomposition 84 | matrix_Z1 = rotate_Z(delta) 85 | print "Rot_Z(delta)= " + str(matrix_Z1) 86 | matrix_X = rotate_X(beta) 87 | print "Rot_X(beta)= " + str(matrix_X) 88 | matrix_Z2 = rotate_Z(alpha) 89 | print "Rot_Z(alpha)= " + str(matrix_Z2) 90 | 91 | matrix_U2 = matrix_Z1 * matrix_X * matrix_Z2 92 | print "matrix_U2= " + str(matrix_U2) 93 | dist = fowler_distance(matrix_U2, matrix_U) 94 | print "dist(U,U2)= " + str(dist) 95 | 96 | (axis_U2, K2, matrix_H2) = unitary_to_axis(matrix_U2, B2) 97 | angle_U2 = K2/2.0 98 | print "axis_U2= " + str(axis_U2) 99 | print "angle_U2= " + str(angle_U2) 100 | 101 | assert_approx_equals(0, dist) -------------------------------------------------------------------------------- /scratch/scratch_kdtree_su2.py: -------------------------------------------------------------------------------- 1 | from skc.kdtree import * 2 | from skc.basic_approx.file import * 3 | from skc.compose import * 4 | from skc.decompose import * 5 | from skc.basis import * 6 | from skc.basic_approx.process import * 7 | from skc.basic_approx.search import * 8 | 9 | import time 10 | 11 | # Really, we should save the basis to a file with the instruction set, maybe 12 | basis = get_hermitian_basis(d=2) 13 | 14 | base_dir = "pickles/su2" 15 | file_upper = 16 16 | 17 | tree = build_kdtree(base_dir+"/gen-g", file_upper, "-1.pickle") 18 | 19 | #process_kdtree(base_dir, file_upper) 20 | 21 | #tree = load_kdtree(base_dir, str(file_upper)) 22 | 23 | # This is the random matrix that we are looking for 24 | (search_U, components, angle) = get_random_unitary(basis) 25 | op = search_kdtree(tree, search_U, basis) 26 | 27 | print "op= " + str(op) 28 | #print "op.dims= " + str(op.dimensions) 29 | print "fowler_dist(op,U)= " + str(fowler_distance(op.matrix, search_U)) 30 | print "trace_dist(op,U)= " + str(trace_distance(op.matrix, search_U)) 31 | -------------------------------------------------------------------------------- /scratch/scratch_kdtree_su4.py: -------------------------------------------------------------------------------- 1 | from skc.kdtree import * 2 | from skc.basic_approx.file import * 3 | from skc.compose import * 4 | from skc.decompose import * 5 | from skc.basis import * 6 | from skc.basic_approx.process import * 7 | from skc.basic_approx.search import * 8 | 9 | import time 10 | 11 | # Really, we should save the basis to a file with the instruction set, maybe 12 | basis = get_hermitian_basis(d=4) 13 | 14 | base_dir = "pickles/su4" 15 | file_upper = 6 16 | 17 | tree = build_kdtree(base_dir+"/gen-g", file_upper, "-1.pickle") 18 | 19 | #process_kdtree(base_dir, file_upper) 20 | 21 | #tree = load_kdtree(base_dir, str(file_upper)) 22 | 23 | # This is the random matrix that we are looking for 24 | (search_U, components, angle) = get_random_unitary(basis) 25 | op = search_kdtree(tree, search_U, basis) 26 | 27 | print "op= " + str(op) 28 | #print "op.dims= " + str(op.dimensions) 29 | print "fowler_dist(op,U)= " + str(fowler_distance(op.matrix, search_U)) 30 | print "trace_dist(op,U)= " + str(trace_distance(op.matrix, search_U)) 31 | -------------------------------------------------------------------------------- /scratch/scratch_orthogonal_axis.py: -------------------------------------------------------------------------------- 1 | # Scratch file to test finding an arbitrary orthogonal axis from a point 2 | 3 | import math 4 | import scipy.linalg 5 | import numpy 6 | 7 | from skc.utils import * 8 | from skc.compose import * 9 | from skc.basis import * 10 | from skc.hypersphere import * 11 | from super.precompile import * 12 | 13 | ############################################################################## 14 | # Same as above function, but much more roundabout using spherical coordinates 15 | # Also, it is kinda awkward that I don't have an cart3d_to_hspherical, so 16 | # I just take a matrix argument and ignore the rotation angle argument 17 | def find_orthogonal_axis_spher(matrix_U, axis_U, basis_B2): 18 | # Convert to spherical coordinates so we can find an orthogonal axis 19 | # This is a slow and stupid method, I just include it here for my amusement 20 | hsphere_coords = unitary_to_hspherical(matrix_U, basis_B2) 21 | axis_U_theta = hsphere_coords[1] 22 | axis_U_phi = hsphere_coords[2] 23 | print "hsphere= " + str(hsphere_coords) 24 | 25 | # Convert the axis \hat{u} to an array so we can do vector operations on it 26 | axis_U_array = basis_B2.sort_canonical_order(axis_U) 27 | print "axis_U_array= " + str(axis_U_array) 28 | 29 | # Just verify that we understand how trigonometry works here 30 | axis_U_z = numpy.cos(axis_U_theta) 31 | axis_U_y = numpy.sin(axis_U_theta) * numpy.cos(axis_U_phi) 32 | axis_U_x = numpy.sin(axis_U_theta) * numpy.sin(axis_U_phi) 33 | axis_U_array2 = [axis_U_x, axis_U_y, axis_U_z] 34 | print "axis_U_array2= " + str(axis_U_array2) 35 | 36 | # And assert that they are equal 37 | axis_U = [axis_U_x, axis_U_y, axis_U_z] 38 | assert_vectors_approx_equal(axis_U_array, axis_U) 39 | 40 | # Add PI/2 to get an orthogonal axis (along theta coordinate) 41 | axis_W_theta = (axis_U_theta + PI_HALF) % PI 42 | axis_W_phi = axis_U_phi 43 | 44 | # Reconstruct axis for \hat{w}, which is orthogonal to \hat{a} and \hat{u} 45 | axis_W_z = numpy.cos(axis_W_theta) 46 | axis_W_y = numpy.sin(axis_W_theta) * numpy.cos(axis_W_phi) 47 | axis_W_x = numpy.sin(axis_W_theta) * numpy.sin(axis_W_phi) 48 | axis_W_array = [axis_W_x, axis_W_y, axis_W_z] 49 | 50 | print "axis_W_array= " + str(axis_W_array) 51 | 52 | # Take inner product to verify axis_W and axis_U are orthogonal 53 | inner_W_U = numpy.inner(axis_U_array2, axis_W_array) 54 | assert_approx_equals(0, inner_W_U) 55 | return axis_W_array 56 | 57 | # As usual, get a hermitian basis first 58 | B2 = get_hermitian_basis(d=2) 59 | 60 | # Generate a random unitary for us to decompose to get an axis 61 | # This is a bit heavyweight, but I don't want to write a function get generate 62 | # a random normalized axis 63 | (the_matrix_U, the_axis_U, the_angle) = get_random_unitary(B2) 64 | print "U= " + str(the_matrix_U) 65 | print "axis_U= " + str(the_axis_U) 66 | print "angle= " + str(the_angle) 67 | 68 | # Convert the axis \hat{u} to an array so we can do vector operations on it 69 | the_axis_U_array = B2.sort_canonical_order(the_axis_U) 70 | print "axis_U_array= " + str(the_axis_U_array) 71 | 72 | # Test the trigonometric method 73 | the_axis_Q_array = find_orthogonal_axis_trig(the_axis_U_array) 74 | print "axis_Q_array= " + str(the_axis_Q_array) 75 | 76 | # This doesn't work for now, probably some special cases of converting back 77 | # from spherical to cartesian coordinates. Commented out for now. 78 | # You can comment back in later if you are motivated to fix this. 79 | ## Test the spherical coord method 80 | #the_axis_Q2_array = find_orthogonal_axis_spher(the_matrix_U, the_axis_U, B2) 81 | #print "axis_Q2_array= " + str(the_axis_Q2_array) 82 | -------------------------------------------------------------------------------- /scratch/scratch_rotations_commute.py: -------------------------------------------------------------------------------- 1 | # Scratch file to determine whether two rotations commute or not, and if not, 2 | # why not 3 | 4 | from skc.operator import * 5 | from skc.utils import * 6 | from skc.compose import * 7 | from skc.basis import * 8 | 9 | RXY = SX.matrix * SY.matrix 10 | RYX = SY.matrix * SX.matrix 11 | dist = fowler_distance(RXY, RYX) 12 | # Equal to a phase factor! This is very telling... 13 | assert_approx_equals(dist, 0) 14 | 15 | # As usual, get a hermitian basis first 16 | B2 = get_hermitian_basis(d=2) 17 | 18 | # Generate a random unitary for us to decompose 19 | # Randomness is the spice of life 20 | (matrix_U, axis_U, angle) = get_random_unitary(B2) 21 | (matrix_U2, axis_U2, angle2) = get_random_unitary(B2) 22 | 23 | R12 = matrix_U * matrix_U2 24 | R21 = matrix_U2 * matrix_U 25 | dist = fowler_distance(R12, R21) 26 | # Not equal, even up to a phase factor 27 | assert_approx_not_equals(dist, 0) 28 | -------------------------------------------------------------------------------- /scratch/scratch_rotations_compose.py: -------------------------------------------------------------------------------- 1 | # Scratch file to determine whether two rotations commute or not, and if not, 2 | # why not 3 | 4 | from skc.operator import * 5 | from skc.utils import * 6 | from skc.compose import * 7 | from skc.decompose import * 8 | from skc.basis import * 9 | 10 | B2 = get_hermitian_basis(d=2) 11 | 12 | axis_X_array = [1,0,0] 13 | axis_Y_array = [0,1,0] 14 | axis_Z_array = [0,0,1] 15 | 16 | axis_X = B2.unsort_canonical_order(axis_X_array) 17 | axis_Y = B2.unsort_canonical_order(axis_Y_array) 18 | 19 | print "x_axis= " + str(axis_X) 20 | print "y_axis= " + str(axis_Y) 21 | print "pi/4= " + str(PI/4.0) 22 | 23 | RX = axis_to_unitary(axis_X, PI/4.0, B2) 24 | # Note that the conjugating rotation angle shouldn't effect the composed 25 | # rotation angle 26 | RY = axis_to_unitary(axis_Y, PI/6.0, B2) 27 | RXY = RY.H * RX * RY 28 | (axis_xy, K_xy, matrix_H_xy) = unitary_to_axis(RXY, B2) 29 | 30 | angle_xy = K_xy/2.0 31 | assert_approx_equals(angle_xy, PI/4.0) 32 | 33 | print "axis_xy= " + str(axis_xy) 34 | print "angle_xy= " + str(angle_xy) 35 | print "matrix_H_xy= " + str(matrix_H_xy) 36 | 37 | axis_xy_array = B2.sort_canonical_order(axis_xy) 38 | dot_x_xy = numpy.dot(axis_xy_array, axis_X_array) 39 | dot_y_xy = numpy.dot(axis_xy_array, axis_Y_array) 40 | dot_z_xy = numpy.dot(axis_xy_array, axis_Z_array) 41 | assert_approx_equals(math.sin(PI/6.0), dot_x_xy) 42 | assert_approx_equals(math.cos(PI/6.0), dot_z_xy) 43 | assert_approx_equals(0, dot_y_xy) 44 | 45 | ############################################################################## 46 | # Let's use the axis_XY above as a fixed, non-random, non-orthogonal axis 47 | RXXY = RXY.H * RX * RXY 48 | (axis_XXY, K_XXY, matrix_H_XXY) = unitary_to_axis(RXXY, B2) 49 | 50 | angle_XXY = K_XXY/2.0 51 | assert_approx_equals(angle_XXY, PI/4.0) 52 | 53 | print "axis_XXY= " + str(axis_XXY) 54 | print "angle_XXY= " + str(angle_XXY) 55 | print "matrix_H_XXY= " + str(matrix_H_XXY) 56 | 57 | (matrix_U, axis_U, angle) = get_random_unitary(B2) 58 | axis_U = { 59 | ('f', (2, 1)): 0.26839308905806225, 60 | ('f', (1, 2)): 0.82690780394690444, 61 | ('h', (2, 2)): 0.49415446321730094 62 | } 63 | 64 | print "axis_U= " + str(axis_U) 65 | axis_U_array = B2.sort_canonical_order(axis_U) 66 | axis_T_array = numpy.cross(axis_X_array, axis_U_array) 67 | norm_T = scipy.linalg.norm(axis_T_array) 68 | axis_T_array = axis_T_array / norm_T 69 | 70 | axis_T = B2.unsort_canonical_order(axis_T_array) 71 | 72 | print "axis_U_array= " + str(axis_U_array) 73 | print "axis_X_array= " + str(axis_X_array) 74 | 75 | dot_U_X = numpy.inner(axis_U_array, axis_X_array) 76 | 77 | print "dot_U_X= " + str(dot_U_X) 78 | 79 | angle_U_X = math.acos(dot_U_X) 80 | 81 | print "axis_T= " + str(axis_T) 82 | print "angle_T= " + str(angle_U_X) 83 | 84 | angle = -(angle_U_X/2.0); #-math.pi/10; 85 | print "angle= " + str(angle) 86 | 87 | matrix_T = axis_to_unitary(axis_T, angle, B2) 88 | RTX = matrix_T.H * RX * matrix_T 89 | (axis_TX, K_TX, matrix_H_TX) = unitary_to_axis(RTX, B2) 90 | print "axis_TX= " + str(axis_TX) 91 | axis_TX_array = B2.sort_canonical_order(axis_TX) 92 | assert_vectors_approx_equal(axis_TX_array, axis_U_array) -------------------------------------------------------------------------------- /scratch/scratch_super_phase_prob.py: -------------------------------------------------------------------------------- 1 | # Scratch test to generate probabilities from a random eigenvalue 2 | 3 | from random import random 4 | from skc.trig import recover_angle 5 | import math 6 | 7 | # Set the value of n here, the resolution for performing phase simulation 8 | # phi = k / 2^n 9 | n = 8 10 | 11 | n_exp = 2**n; 12 | 13 | # Get a random k within the range 0 to 2^n 14 | k = int(random()*n_exp) 15 | 16 | print "k= " + str(k) 17 | 18 | phi_k = (k * 1.0) / n_exp 19 | 20 | print "phi_k= " + str(phi_k) 21 | 22 | cos_k = math.cos(2*math.pi*phi_k) 23 | sin_k = math.sin(2*math.pi*phi_k) 24 | 25 | print "cos_k= " + str(cos_k) 26 | print "sin_k= " + str(sin_k) 27 | 28 | # Divide the uniform distribution over [0,1] into intervals to 29 | # return non-uniform distributions 30 | cos_partition = (1 - cos_k)/2.0 31 | sin_partition = (1 + sin_k)/2.0 32 | 33 | print "cos_partition= " + str(cos_partition) 34 | print "sin_partition= " + str(sin_partition) 35 | 36 | # Number of Bernoulli trials to recover a value of cos(phi_k) or sin(phi_k) 37 | s = 1000000; 38 | 39 | # Initialize counting ones for both the cosine and sine series 40 | cos_count = 0 41 | sin_count = 0 42 | 43 | for i in range(s): 44 | # get a random number from uniform distrib over [0,1] and convert 45 | r = random() 46 | if (r < cos_partition): 47 | cos_count += 1 48 | if (r < sin_partition): 49 | sin_count += 1 50 | 51 | print "cos_count= " + str(cos_count) 52 | print "sin_count= " + str(sin_count) 53 | 54 | cos_prob = (cos_count * 1.0) / s 55 | sin_prob = (sin_count * 1.0) / s 56 | 57 | print "cos_prob= " + str(cos_prob) 58 | print "sin_prob= " + str(sin_prob) 59 | 60 | cos_phi_k_2 = 1-(cos_prob*2) 61 | sin_phi_k_2 = (sin_prob*2) - 1 62 | 63 | print "cos_phi_k_2= " + str(cos_phi_k_2) 64 | print "sin_phi_k_2= " + str(sin_phi_k_2) 65 | 66 | # Recover phi_k from the outcomes of the Bernoulli trials, treating the count 67 | # of ones as an approximation of the probability 68 | phi_k_cos_2 = math.acos(cos_phi_k_2) / (2*math.pi) 69 | phi_k_sin_2 = math.asin(sin_phi_k_2) / (2*math.pi) 70 | 71 | print "phi_k_cos_2= " + str(phi_k_cos_2) 72 | print "phi_k_sin_2= " + str(phi_k_sin_2) 73 | 74 | k_cos_2 = round(phi_k_cos_2 * n_exp) 75 | k_sin_2 = round(phi_k_sin_2 * n_exp) 76 | 77 | print "k_cos_2= " + str(k_cos_2) 78 | print "k_sin_2= " + str(k_sin_2) 79 | 80 | two_pi_phi_k_2 = recover_angle(cos_phi=cos_phi_k_2, sin_phi=sin_phi_k_2, 81 | tolerance=1e-2) 82 | 83 | phi_k_2 = two_pi_phi_k_2 / (2*math.pi) 84 | 85 | print "phi_k_2= " + str(phi_k_2) 86 | 87 | k_2 = round(phi_k_2 * n_exp) 88 | 89 | print "k_2= " + str(k_2) -------------------------------------------------------------------------------- /scratch/sk_dawson_su2.py: -------------------------------------------------------------------------------- 1 | from skc.operator import * 2 | from skc.dawson.factor import * 3 | from skc.dawson import * 4 | from skc.compose import * 5 | from skc.basis import * 6 | import math 7 | 8 | H2 = get_hermitian_basis(d=2) 9 | theta = math.pi / 4 # 45 degrees 10 | axis = cart3d_to_h2(x=1, y=1, z=1) 11 | 12 | # Compose a unitary to compile 13 | matrix_U = axis_to_unitary(axis, theta, H2) 14 | op_U = Operator(name="U", matrix=matrix_U) 15 | 16 | n = 2 17 | print "U= " + str(matrix_U) 18 | print "n= " + str(n) 19 | 20 | # Prepare the compiler 21 | sk_set_factor_method(dawson_group_factor) 22 | sk_set_basis(H2) 23 | sk_set_axis(X_AXIS) 24 | sk_build_tree("su2", 15) 25 | 26 | Un = solovay_kitaev(op_U, n) 27 | print "Approximated U: " + str(Un) 28 | 29 | print "Un= " + str(Un.matrix) 30 | 31 | print "trace_dist(U,Un)= " + str(trace_distance(Un.matrix, op_U.matrix)) 32 | print "fowler_dist(U,Un)= " + str(fowler_distance(Un.matrix, op_U.matrix)) 33 | -------------------------------------------------------------------------------- /scratch/sk_dawson_su4.py: -------------------------------------------------------------------------------- 1 | from skc.operator import * 2 | from skc.dawson import * 3 | from skc.group_factor import * 4 | from skc.compose import * 5 | from skc.basis import * 6 | import math 7 | 8 | H4 = get_hermitian_basis(d=4) 9 | theta = math.pi / 4 # 45 degrees 10 | 11 | axis = pick_random_axis(H4) 12 | # Compose a unitary to compile 13 | matrix_U = axis_to_unitary(axis, theta, H4) 14 | op_U = Operator(name="U", matrix=matrix_U) 15 | 16 | n = 4 17 | print "U= " + str(matrix_U) 18 | print "n= " + str(n) 19 | 20 | # Prepare the compiler 21 | sk_set_factor_method(aram_diagonal_factor) 22 | sk_set_basis(H4) 23 | # We don't need this for Aram's factoring method 24 | #sk_set_axis(X_AXIS) 25 | sk_build_tree("su4", 6) 26 | 27 | Un = solovay_kitaev(op_U, n) 28 | print "Approximated U: " + str(Un) 29 | 30 | print "Un= " + str(Un.matrix) 31 | 32 | print "trace_dist(U,Un)= " + str(trace_distance(Un.matrix, op_U.matrix)) 33 | print "fowler_dist(U,Un)= " + str(fowler_distance(Un.matrix, op_U.matrix)) 34 | -------------------------------------------------------------------------------- /skc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/learner-long-life/skc-python/8a023f82ab99eeb25214f490bc4e50eda1723160/skc/__init__.py -------------------------------------------------------------------------------- /skc/basic_approx/__init__.py: -------------------------------------------------------------------------------- 1 | from skc.simplify import * 2 | 3 | class BasicApproxSettings: 4 | 5 | def __init__(self): 6 | self.identity = None 7 | self.iset = [] 8 | self.iset_dict = {} 9 | self.simplify_engine = None # simplify engine 10 | 11 | def set_iset(self, new_iset): 12 | self.check_iset(new_iset) 13 | self.iset = new_iset 14 | for insn in new_iset: 15 | self.iset_dict[insn.name] = insn 16 | 17 | def set_identity(self, new_identity): 18 | self.identity = new_identity 19 | self.iset_dict[new_identity.name] = new_identity 20 | 21 | def print_iset(self): 22 | print "INSTRUCTION SET" 23 | for insn in self.iset: 24 | print str(insn) 25 | 26 | ############################################################################## 27 | # Verify that all operators in iset are the same size and shape 28 | def check_iset(self, iset): 29 | m = len(iset) 30 | print str(m) + " instructions found" 31 | 32 | first_op = iset[0] # Python uses 0-based indexing 33 | first_shape = first_op.matrix.shape 34 | 35 | if (len(first_shape) != 2): 36 | msg = "First operator is not a matrix! Shape = " +str(shape) 37 | raise RuntimeError(msg) 38 | 39 | d = first_shape[0] # d=2 for qubits 40 | if (d != first_shape[1]): 41 | msg = "First operator is not a square matrix! Shape = " + str(shape) 42 | raise RuntimeError(msg) 43 | 44 | for i in range(m): 45 | i_shape = iset[i].matrix.shape 46 | if (i_shape != first_shape): 47 | msg = "Operator " + str(i) + "'s shape does not match first shape: " + str(i_shape) 48 | raise RuntimeError(msg) 49 | 50 | def simplify(self, sequence): 51 | return self.simplify_engine.simplify(sequence) 52 | 53 | # Initialize the global simplify engine with the given rules for all 54 | # subsequent generation of basic approximations 55 | def init_simplify_engine(self, rules): 56 | self.simplify_engine = SimplifyEngine(rules) 57 | -------------------------------------------------------------------------------- /skc/basic_approx/file.py: -------------------------------------------------------------------------------- 1 | # Utilities for managing sequence generation, file chunking, and stats 2 | 3 | import glob 4 | import time 5 | import cPickle 6 | 7 | ############################################################################## 8 | # GLOBAL VARIABLES 9 | global_sequences = [] 10 | chunk_size = 100000 # number of sequences to chunk together into a file 11 | file_counter = 1 12 | filename_prefix = "" 13 | filename_suffix = "" 14 | global_count = 0 15 | global_length = 0 16 | generation_count = 0 17 | generation_length = 0 18 | print_update_interval = 1000 19 | 20 | ############################################################################## 21 | def set_filename_prefix(new_filename_prefix): 22 | global filename_prefix 23 | filename_prefix = new_filename_prefix 24 | 25 | ############################################################################## 26 | def set_filename_suffix(new_filename_suffix): 27 | global filename_suffix 28 | filename_suffix = new_filename_suffix 29 | 30 | ############################################################################## 31 | def read_from_file(filename): 32 | #filename = filename_prefix + "-" + filename_suffix +".pickle" 33 | print "Begin reading file: " + filename 34 | f = open(filename, 'rb') 35 | 36 | begin_time = time.time() 37 | 38 | object = cPickle.load(f) 39 | 40 | write_time = time.time() - begin_time 41 | print "Reading time: " + str(write_time) 42 | 43 | f.close() 44 | return object 45 | 46 | ############################################################################## 47 | def dump_to_file(object, custom_filename=""): 48 | if (len(custom_filename) > 0): 49 | custom_filename = "-" + custom_filename 50 | filename = filename_prefix + "-" + filename_suffix + custom_filename + ".pickle" 51 | 52 | print "Begin writing file: " + filename 53 | f = open(filename, 'wb') 54 | 55 | begin_time = time.time() 56 | 57 | # Write the approximations 58 | cPickle.dump(object, f, cPickle.HIGHEST_PROTOCOL) 59 | 60 | write_time = time.time() - begin_time 61 | print "Writing time: " + str(write_time) 62 | 63 | print "Writing done, closing file." 64 | 65 | f.close() 66 | 67 | ############################################################################## 68 | def chunk_sequences_to_file(): 69 | # Do chunking if necessary 70 | if (len(global_sequences) >= chunk_size): 71 | save_chunk_to_file() 72 | 73 | ############################################################################## 74 | # Returns true if a file already exists for the given generation number 75 | def generation_file_exists(generation_num): 76 | filename_pattern = filename_prefix + "-g" + str(generation_num) \ 77 | + "*.pickle" 78 | filenames = glob.glob(filename_pattern) 79 | return (len(filenames) > 0) 80 | 81 | ############################################################################## 82 | def map_to_file_chunks(generation_num, callback): 83 | filename_pattern = filename_prefix + "-g" + str(generation_num) \ 84 | + "*.pickle" 85 | filenames = glob.glob(filename_pattern) 86 | if (len(filenames) == 0): 87 | raise RuntimeError("No files found for generation " + str(generation_num)) 88 | for filename in filenames: 89 | sequences = read_from_file(filename) 90 | #print "Sequences loaded from " + filename 91 | #for sequence in sequences: 92 | # print str(sequence) 93 | callback(sequences) 94 | 95 | ############################################################################## 96 | # Force saving global sequences to file, without checking chunksize 97 | def save_chunk_to_file(): 98 | global file_counter 99 | print "save_chunk_to_file" 100 | #for sequence in global_sequences: 101 | # print str(sequence) 102 | assert(len(global_sequences) > 0) 103 | dump_to_file(global_sequences, str(file_counter)) 104 | file_counter += 1 105 | reset_global_sequences() 106 | 107 | ############################################################################## 108 | # Garbage collect the sequences 109 | def reset_global_sequences(): 110 | global global_sequences 111 | global_sequences = [] 112 | 113 | ############################################################################## 114 | def reset_generation_stats(): 115 | global generation_count 116 | global generation_length 117 | generation_count = 0 118 | generation_length = 0 119 | 120 | ############################################################################## 121 | def reset_file_counter(): 122 | global file_counter 123 | file_counter = 1 124 | 125 | ############################################################################## 126 | def reset_global_stats(): 127 | global global_count 128 | global global_length 129 | global_count = 0 130 | global_length = 0 131 | 132 | ############################################################################## 133 | def print_generation_stats(generation_num=0): 134 | if (generation_num > 0): 135 | print "Generation " + str(generation_num) + " Stats" 136 | print str(generation_count) + " sequences generated so far" 137 | print str(generation_length) + " total length generated so far" 138 | 139 | ############################################################################## 140 | def print_global_stats(): 141 | print "GLOBAL STATS" 142 | print str(global_count) + " sequences generated so far" 143 | print str(global_length) + " total length generated so far" 144 | 145 | ############################################################################## 146 | def update_stats(sequence_count, total_length): 147 | global global_count, generation_count, global_length, generation_length 148 | generation_count += sequence_count 149 | generation_length += total_length 150 | global_count += sequence_count 151 | global_length += total_length 152 | 153 | ############################################################################## 154 | # Check if the global sequences is too big, and if so, chunk to file and 155 | # garbage collect / reset the global sequences 156 | def append_sequences(new_sequences): 157 | global global_sequences 158 | global total_length 159 | 160 | global_sequences.extend(new_sequences) 161 | 162 | total_length = 0 163 | for sequence in new_sequences: 164 | total_length += len(sequence.ancestors) 165 | update_stats(len(new_sequences), total_length) 166 | 167 | if ((generation_count % print_update_interval) == 0): 168 | print str(generation_count) + " sequences\b" 169 | 170 | chunk_sequences_to_file() 171 | 172 | ############################################################################## 173 | # Check if the global sequences is too big, and if so, chunk to file and 174 | # garbage collect / reset the global sequences 175 | def append_sequence(new_op): 176 | global global_sequences 177 | 178 | global_sequences.append(new_op) 179 | update_stats(1, len(new_op.ancestors)) 180 | 181 | if ((generation_count % print_update_interval) == 0): 182 | print str(generation_count) + " sequences\b" 183 | 184 | chunk_sequences_to_file() 185 | -------------------------------------------------------------------------------- /skc/basic_approx/generate.py: -------------------------------------------------------------------------------- 1 | # Top level module for generating basic approximations 2 | #import time 3 | #import cPickle 4 | #import types 5 | import numpy 6 | 7 | from skc.basic_approx.file import * 8 | from skc.utils import * 9 | from skc.decompose import * 10 | from skc.utils import * 11 | 12 | ############################################################################## 13 | # GLOBAL VARIABLES 14 | settings = None 15 | 16 | ############################################################################## 17 | # Recursive helper method which enumerates all possible combinations of 18 | # operators, given prefix an array containing all operators so far concatenated 19 | # (multiplied into a single matrix) 20 | def gen_basic_approx_generation(prefixes): 21 | # Start with a clean slate 22 | reset_global_sequences() 23 | reset_generation_stats() 24 | 25 | simplified_ancestors = [] 26 | 27 | # Simplifies the given "new" prefix within this generation. 28 | # If it simplifies to something already generated, then 29 | # return True so that the outer loop knows to continue. 30 | # otherwise returns False, so that we generate from this 31 | # (now simplified) prefix 32 | def simplify_new(new_op): 33 | # Simplify the prefix before we generate from it 34 | # since we may already have done it this level 35 | (simplify_length, simplified_sequence) = \ 36 | settings.simplify(new_op.ancestors) 37 | #if (simplify_length > 0): 38 | # print "Simplified " + str(new_op.ancestors) + \ 39 | # " to " + str(simplified_sequence) 40 | ancestor_string = list_as_string(simplified_sequence) 41 | already_done = (ancestor_string in simplified_ancestors) 42 | #if (already_done): 43 | # print "Already did " + str(ancestor_string) 44 | if (simplified_sequence == [settings.identity.name] or already_done): 45 | return True 46 | 47 | # Only assign to operator if we have a valid, new, simplified sequence 48 | new_op.ancestors = simplified_sequence 49 | close_to_identity = new_op.matrix_from_ancestors(settings.iset_dict, settings.identity) 50 | # If this matrix is close to identity, return True to skip it 51 | if (close_to_identity): 52 | return True 53 | 54 | # Decompose into R^{d^2} for later processing into search trees 55 | (components, K, matrix_H) = unitary_to_axis(new_op.matrix, settings.basis) 56 | dimensions = settings.basis.sort_canonical_order(components) 57 | 58 | # Take absolute value of components 59 | for i in range(len(dimensions)): 60 | dimensions[i] = numpy.abs(dimensions[i]) 61 | 62 | # If all dimensions are negative, then flip them to have positive components 63 | # and shift the negative angle by 2pi to make angle positive 64 | sign = -1 65 | for dimension in dimensions: 66 | if (numpy.sign(dimension) > 0): 67 | sign = +1 68 | if (sign < 0): 69 | for i in range(len(dimensions)): 70 | dimensions[i] *= -1 71 | K = -K 72 | dimensions.append(K) # add the angle as the last component 73 | new_op.dimensions = dimensions 74 | 75 | # Add this to our list so we don't add it again this generation 76 | simplified_ancestors.append(ancestor_string) 77 | #print "ancestor_string= " + ancestor_string 78 | return False 79 | #------------------------------------------------------------------------ 80 | for prefix in prefixes: 81 | # Enumerate over iset, appending one op to end of prefix each 82 | for insn in settings.iset: 83 | new_op = prefix.add_ancestors(insn) 84 | #(simplify_length, new_sequence) = \ 85 | # settings.simplify(new_op.ancestors) 86 | 87 | already_done = simplify_new(new_op) 88 | if (already_done): 89 | continue 90 | #if (simplify_length > 0): 91 | # print str(simplify_length) + " simplified" 92 | append_sequence(new_op) 93 | #print "matrix= " + str(new_op.matrix) 94 | #print str(new_op.ancestors) 95 | 96 | # Dump whatever's left this generation into one last file 97 | save_chunk_to_file() 98 | 99 | ############################################################################## 100 | ## Generate table of basic approximations as preprocessing 101 | # l_0 - fixed length of sequences to generate for preprocessing table 102 | def basic_approxes(l_0, new_settings): 103 | global settings 104 | 105 | settings = new_settings 106 | 107 | reset_global_stats() 108 | 109 | set_filename_suffix("g1") 110 | # Kick things off by generating the first generation 111 | # (one-operator sequences for each instruction) 112 | # Passing an empty list will cause iset to be used as prefixes 113 | gen_basic_approx_generation([settings.identity]) 114 | print_generation_stats(1) 115 | 116 | # Iterate over 1 to l_0, generating sequences of increasing length 117 | for i in range(1,l_0): 118 | # Set the generation's filename suffix 119 | set_filename_suffix("g" + str(i+1)) 120 | 121 | # Check whether an generation file already exists for the current 122 | # generation that we can use 123 | already_exists = generation_file_exists(i+1) 124 | if (already_exists): 125 | print "Yay! Generation " + str(i+1) + " file already found, skipping" 126 | # less work for us. Assume it's correct. 127 | continue 128 | # Generate a new generation using the previous one as prefixes 129 | # we pass in the generation num of the one that just passed 130 | reset_file_counter() 131 | map_to_file_chunks(i, gen_basic_approx_generation) 132 | print_generation_stats(i+1) 133 | 134 | print_global_stats() 135 | 136 | ############################################################################## 137 | # Externally visible function, does top-level timing, etc. 138 | def generate_approxes(l0, settings): 139 | 140 | set_filename_suffix("iset") 141 | settings.print_iset() 142 | dump_to_file(settings.iset) 143 | 144 | # Start the generate timer 145 | begin_time = time.time() 146 | 147 | # Do it! 148 | basic_approxes(l0, settings) 149 | 150 | gen_time = time.time() - begin_time 151 | print "Generation time: " + str(gen_time) 152 | 153 | -------------------------------------------------------------------------------- /skc/basic_approx/process.py: -------------------------------------------------------------------------------- 1 | from skc.basic_approx.file import * 2 | from skc.decompose import * 3 | from skc.operator import * 4 | from skc.kdtree import * 5 | 6 | def components_to_kdpoint(components, basis, angle): 7 | point = basis.sort_canonical_order(components) 8 | point.append(angle) # add the angle as the last component 9 | return point 10 | 11 | def unitary_to_kdpoint(matrix_U): 12 | (components, K, matrix_H) = unitary_to_axis(matrix_U, basis) 13 | return components_to_kdpoint(components, basis, K) 14 | 15 | # Load the sequences from files beginning with filename_prefix and ending with 16 | # filename_suffix with the numbers 1 to filename_upper (inclusive) in between. 17 | # Constructs a kdtree from all the loaded sequences and returns it for searching. 18 | def build_kdtree(filename_prefix, filecount_upper, filename_suffix): 19 | filenames = [] 20 | for i in range(1,filecount_upper+1): 21 | filenames.append(filename_prefix+str(i)+filename_suffix) 22 | 23 | # This is the data that we load from a file 24 | sequences = [] 25 | for filename in filenames: 26 | new_sequences = read_from_file(filename) 27 | sequences.extend(new_sequences) 28 | 29 | data = [] 30 | # Process this to produce the format the kdtree expects, namely a list of components in each dimension 31 | for operator in sequences: 32 | #print "op= " + str(operator) 33 | #print "matrix= " + str(operator.matrix) 34 | #operator.dimensions = unitary_to_kdpoint(operator.matrix) 35 | #print "dimensions= " + str(operator.dimensions) 36 | # Now dimensions is in R^{d^2} 37 | data.append(operator) 38 | 39 | 40 | # Build it! Kablooey 41 | tree = KDTree.construct_from_data(data) 42 | return tree 43 | 44 | def process_kdtree(base_dir, filecount_upper): 45 | # Start the generate timer 46 | begin_time = time.time() 47 | 48 | tree = build_kdtree(base_dir+"/gen-g", filecount_upper, "-1.pickle") 49 | 50 | set_filename_prefix(base_dir+"/kdt") 51 | set_filename_suffix(str(filecount_upper)) 52 | 53 | build_time = time.time() - begin_time 54 | print "Build time: " + str(build_time) 55 | 56 | dump_to_file(tree) 57 | 58 | -------------------------------------------------------------------------------- /skc/basic_approx/search.py: -------------------------------------------------------------------------------- 1 | # Functions for searching basic approximations 2 | from skc.decompose import * 3 | from skc.basic_approx.file import * 4 | from skc.basic_approx.process import * 5 | from skc.operator import * 6 | 7 | import time 8 | 9 | def load_kdtree(base_dir, filename_suffix): 10 | filename = base_dir+"/kdt-"+filename_suffix+".pickle" 11 | tree = read_from_file(filename) 12 | return tree 13 | 14 | def search_kdtree(tree, search_U, basis): 15 | 16 | begin_time = time.time() 17 | 18 | (components, K, matrix_H) = unitary_to_axis(search_U, basis) 19 | # Re-center angles from 0 to 2pi, instead of -pi to pi 20 | if (K < 0): 21 | for (k,v) in components.items(): 22 | components[k] = v * -1 23 | K *= -1 24 | search_op = Operator(name="Search", matrix=search_U) 25 | search_op.dimensions = components_to_kdpoint(components, basis, K) 26 | print "search.dimensions= " + str(search_op.dimensions) 27 | 28 | nearest = tree.query(search_op, t=1) # find nearest 4 points 29 | 30 | end_time = time.time() 31 | print "Search time: " + str(end_time - begin_time) 32 | return nearest[0] 33 | 34 | ############################################################################## 35 | # Find the closest basic approximation in approxes to arbitrary unitary u 36 | # Based on operator norm distance 37 | def find_basic_approx(approxes, u, distance): 38 | min_dist = numpy.finfo(numpy.float32).max # set to max float value at first 39 | closest_approx = None 40 | found = False 41 | for approx in approxes: 42 | #print "approx= " + str(approx) 43 | #print "u= " + str(u) 44 | current_dist = distance(approx.matrix,u.matrix) 45 | #print "current_dist= " + str(current_dist) 46 | #print "min_dist= " + str(min_dist) 47 | if (current_dist < min_dist): 48 | found = True 49 | min_dist = current_dist 50 | closest_approx = approx 51 | 52 | if (not found): 53 | raise RuntimeError("No closest approximation found.") 54 | 55 | return (closest_approx, min_dist) 56 | -------------------------------------------------------------------------------- /skc/basis.py: -------------------------------------------------------------------------------- 1 | # Module for constructing complete, standard, orthogonal unitary bases 2 | # for arbitrary SU(d) 3 | # (Generalized Pauli matrices) 4 | 5 | import numpy 6 | import math 7 | import random 8 | 9 | from skc.utils import * 10 | from skc.operator import * 11 | 12 | ############################################################################## 13 | # A complete basis for d x d matrices. 14 | # May be Hermitian, Unitary, or neither. 15 | class Basis: 16 | 17 | def __init__(self, d, basis_dict, identity_key): 18 | self.d = d 19 | self.basis_dict = basis_dict 20 | self.keys_minus_identity = list(basis_dict.keys()) 21 | self.keys_minus_identity.remove(identity_key) 22 | self.identity_key = identity_key 23 | self.identity = basis_dict[identity_key] 24 | assert(self.identity != None) 25 | 26 | def is_hermitian(self): 27 | return ('hermitian' in dir(self)) 28 | 29 | def is_unitary(self): 30 | return ('unitary' in dir(self)) 31 | 32 | def items(self): 33 | return self.basis_dict.items() 34 | 35 | def items_minus_identity(self): 36 | no_id = dict(self.basis_dict) 37 | del no_id[self.identity_key] 38 | return no_id.items() 39 | 40 | def get(self, key): 41 | return self.basis_dict[key] 42 | 43 | def map(self, map_function): 44 | for gate in self.basis_dict.values(): 45 | map_function(gate.matrix) 46 | 47 | # Return a list of the given component dictionary values 48 | # ordered in the canonical key order of this basis 49 | def sort_canonical_order(self, components): 50 | array = [] 51 | for key in self.keys_minus_identity: 52 | array.append(components[key]) 53 | return array 54 | 55 | # Return a dictionary of the given list of values, 56 | # assuming they are in the same key order as this basis 57 | def unsort_canonical_order(self, array): 58 | dict = {} 59 | keys = self.keys_minus_identity 60 | array_len = len(array) 61 | for i in range(array_len): 62 | dict[keys[i]] = array[i] 63 | return dict 64 | 65 | def print_string(self): 66 | print "SU("+str(self.d)+") Basis" 67 | for gate in self.basis_dict.values(): 68 | print str(gate) 69 | print str(gate.matrix) 70 | 71 | ############################################################################## 72 | # Get the standard vector basis for R^n, that is, n n-dimensional vectors 73 | # { v[i] } where v[i] has a 1 in the ith component and 0 everywhere else 74 | def get_standard_vector_basis(d): 75 | 76 | # Get standard basis orthonormal basis vectors for Z_d 77 | # v[i] will have 1 in the ith component, zero everywhere else 78 | v = [] 79 | for i in range(d): 80 | new_vec = numpy.zeros(d) 81 | # Set one of the components to be 1, the rest stay zeros 82 | new_vec[i] = 1 83 | v.append(new_vec) 84 | #print "v["+str(i)+"]= " + str(new_vec) 85 | return v 86 | 87 | ############################################################################## 88 | # Returns a dictionary of generalized hermitian 89 | # Pauli matrices as a basis for SU(d), i.e. Gell-Mann matrices 90 | # i.e. d x d orthonormal matrices that are a complete basis for C^{dxd} 91 | # From http://en.wikipedia.org/wiki/Generalizations_of_Pauli_matrices#Construction 92 | def get_hermitian_basis(d): 93 | 94 | #print "Getting Hermitian basis for SU("+str(d)+")" 95 | 96 | range_d = range(d) 97 | 98 | # Base case of recursion, just pass back h_{1,1} 99 | if (d==1): 100 | return {('h', (1,1)):matrixify(1)} 101 | 102 | # Returned basis is a dictionary of key tuples like 103 | # ('h',(1,d)), ... , ('h', (d,d)), 104 | # ('f',(k,j)) where k < j 105 | # ('f',(j,k)) where k > j 106 | B = {} 107 | 108 | # Recursively get the hermitian basis for d-1 109 | B1 = get_hermitian_basis(d-1) 110 | 111 | # Generate the h functions 112 | # h_{1,d} is always I_d 113 | op_matrix = matrixify(numpy.eye(d)) 114 | op_name = "h_{1,"+str(d)+"}" 115 | B[('h',(1,d))] = Operator(name=op_name, matrix=op_matrix) 116 | 117 | # h_{k,d} for 1 < k < d 118 | for k in range(2,d): 119 | h_k_d1 = B1.get(('h',(k,d-1))) 120 | op_matrix = matrix_direct_sum(h_k_d1.matrix, matrixify(0)) 121 | op_name = "h_{"+str(k)+","+str(d)+"}" 122 | B[('h', (k,d))] = Operator(name=op_name, matrix=op_matrix) 123 | 124 | # h_{d,d} 125 | scale = math.sqrt(2.0/(d*(d-1))) 126 | #print "scale= " + str(scale) 127 | Id1 = matrixify(numpy.eye(d-1)) 128 | op_matrix = scale * matrix_direct_sum(Id1, matrixify(1-d)) 129 | op_name = "h_{"+str(d)+","+str(d)+"}" 130 | B[('h',(d,d))] = Operator(name=op_name, matrix=op_matrix) 131 | #print op_name + "= " + str(op_matrix) 132 | 133 | # It is easier to generate Ejk with outer product of standard vector basis 134 | v = get_standard_vector_basis(d) 135 | 136 | # Generate the f functions 137 | # First, for 1 < k < j <= d 138 | # subtract one from vector indices since math indices begin at 1 139 | # but computer indices begin at 0 140 | for j in range(1,(d+1)): 141 | for k in range(1,j): 142 | E_kj = matrixify(numpy.outer(v[k-1],v[j-1])) 143 | E_jk = matrixify(numpy.outer(v[j-1],v[k-1])) 144 | op_matrix = E_kj + E_jk 145 | op_name = "f_{"+str(k)+","+str(j)+"}" 146 | B[('f',(k,j))] = Operator(name=op_name, matrix=op_matrix) 147 | 148 | # Next, for 1 < k < j <= d 149 | # subtract one from vector indices since math indices begin at 1 150 | # but computer indices begin at 0 151 | for k in range(1,(d+1)): 152 | for j in range(1,k): 153 | E_kj = matrixify(numpy.outer(v[k-1],v[j-1])) 154 | E_jk = matrixify(numpy.outer(v[j-1],v[k-1])) 155 | op_matrix = -1j*(E_jk - E_kj) 156 | op_name = "f_{"+str(k)+","+str(j)+"}" 157 | B[('f',(k,j))] = Operator(name=op_name, matrix=op_matrix) 158 | 159 | basis = Basis(d = d, basis_dict = B, identity_key = ('h',(1,d))) 160 | 161 | # Sanity check that all returned elements are indeed Hermitian 162 | for gate in B.values(): 163 | assert_matrix_hermitian(gate.matrix) 164 | assert_matrix_nonempty(gate.matrix) 165 | 166 | # We use this to check whether the basis is Hermitian 167 | basis.hermitian = True 168 | 169 | return basis 170 | 171 | ############################################################################## 172 | # Returns a dictionary of generalized unitary 173 | # Pauli matrices as a basis for SU(d) 174 | # i.e. d x d orthonormal matrices that are a complete basis for C^{dxd} 175 | def get_unitary_basis(d): 176 | 177 | range_d = range(d) # Just call it once here 178 | 179 | v = get_standard_vector_basis(d) 180 | 181 | zeta = numpy.exp(2j*math.pi / d) 182 | #print "zeta= " + str(zeta) 183 | 184 | S = {} 185 | for j in range_d: 186 | for k in range_d: 187 | sum = 0; 188 | for m in range_d: 189 | outer_prod = numpy.outer(v[m],v[(m+k) % d]) 190 | #print "v["+str(m)+"]v["+str((m+k)%d)+"]= " + str(outer_prod) 191 | jkm_term = (zeta**(j*m)) * outer_prod 192 | #print "("+str(j)+","+str(k)+","+str(m)+")= " + str(jkm_term) 193 | sum += jkm_term 194 | sum = matrixify(sum) 195 | name = "S_{" + str(j) + "," + str(k) + "}" 196 | new_op = Operator(name=name, matrix=sum) 197 | S[(j,k)] = new_op 198 | # Add these special members here for excluding identity later 199 | #new_op.j = j 200 | #new_op.k = k 201 | #print str(new_op) 202 | #print str(new_op.matrix) 203 | 204 | basis = Basis(d = d, basis_dict = S, identity_key = (0,0)) 205 | 206 | # Sanity check that all returned elements are indeed unitary 207 | for gate in S.values(): 208 | assert_matrix_unitary(gate.matrix) 209 | assert_matrix_nonempty(gate.matrix) 210 | 211 | # How do we check if this basis is complete? 212 | 213 | # We use this to check whether the basis is Hermitian 214 | basis.unitary = True 215 | 216 | return basis 217 | 218 | ############################################################################## 219 | # Hilbert-Schmidt inner product 220 | # For defining orthogonality on operators in Hilbert space 221 | # From http://en.wikipedia.org/wiki/Hilbert%E2%80%93Schmidt_operator 222 | def hs_inner_product(matrix_A, matrix_B): 223 | matrix_A_dag = numpy.transpose(numpy.conjugate(matrix_A)) 224 | return numpy.trace(matrix_A_dag * matrix_B) 225 | 226 | ############################################################################## 227 | # Perform equivalent of matrix inner product, for testing orthogonality 228 | # i.e. sum of element-wise products 229 | def matrix_inner_product(matrix_A, matrix_B): 230 | shape_A = matrix_A.shape 231 | shape_B = matrix_B.shape 232 | assert(shape_A == shape_B) # matrices should have same shape 233 | # Reshape matrices into one long array/vector 234 | matrix_length = shape_A[0] * shape_A[1] 235 | #print "matrix_length= " + str(matrix_length) 236 | reshaped_A = matrix_A.reshape(matrix_length) 237 | reshaped_B = matrix_B.reshape(matrix_length) 238 | #print "reshaped_A= " + str(reshaped_A) 239 | #print "reshaped_B= " + str(reshaped_B) 240 | # Now the reshaped_X look like matrix([[a,b],[c,d]]) 241 | # Is there a better way than converting via a one-row array? 242 | array_A = numpy.asarray(reshaped_A) 243 | array_B = numpy.asarray(reshaped_B) 244 | #print "array_A= " + str(array_A) 245 | #print "array_B= " + str(array_B) 246 | # Now the array_X look like array([a,b,c,d]) 247 | # Now we can just do normal vector inner product 248 | return numpy.dot(array_A, array_B) 249 | 250 | ############################################################################## 251 | # Convert x,y,z 3D Cartesians coordinates to a dictionary of components 252 | # from the Hermitian (Gell-Mann) basis for SU(2) 253 | def cart3d_to_h2(x, y, z): 254 | components = {} 255 | components[('f',(1,2))] = x 256 | components[('f',(2,1))] = y 257 | components[('h',(2,2))] = z 258 | norm = scipy.linalg.norm(components.values()) 259 | for k,v in components.items(): 260 | components[k] /= norm 261 | return components 262 | 263 | ############################################################################## 264 | # Currently this is only used in Dawson factoring to supply a random axis 265 | def pick_random_axis(basis): 266 | (random_k,random_v) = random.choice(basis.items_minus_identity()) 267 | components = {} 268 | for k,v in basis.items_minus_identity(): 269 | if (k == random_k): 270 | components[k] = 1 271 | else: 272 | components[k] = 0 273 | return components 274 | 275 | ############################################################################## 276 | # Module constants 277 | H2 = get_hermitian_basis(d=2) 278 | X_AXIS = cart3d_to_h2(x=1, y=0, z=0) 279 | -------------------------------------------------------------------------------- /skc/compose.py: -------------------------------------------------------------------------------- 1 | # Module for composing / constructing matrices from a given basis 2 | # Either random or non-random, unitary or hermitian 3 | 4 | import random 5 | import scipy.linalg 6 | import numpy 7 | 8 | from skc.utils import * 9 | from skc.diagonalize import * 10 | 11 | ############################################################################## 12 | def get_random_hermitian(basis): 13 | # Vector of elements corresponding to components from basis 14 | components = {} 15 | for k,v in basis.items_minus_identity(): 16 | components[k] = random.random() 17 | 18 | norm = scipy.linalg.norm(components.values()) 19 | 20 | for k in components: 21 | components[k] /= norm 22 | 23 | # Check that we have actually normalized this vector 24 | assert_approx_equals(scipy.linalg.norm(components.values()), 1) 25 | 26 | d = basis.d 27 | sum = matrixify(numpy.zeros([d,d])) 28 | 29 | for k,v in basis.items_minus_identity(): 30 | #print str(k) + " => " + str(v) 31 | sum = sum + (components[k] * basis.get(k).matrix) 32 | #print str(sum) 33 | 34 | assert_matrix_hermitian(sum) 35 | 36 | return (sum, components) 37 | 38 | ############################################################################## 39 | def exp_hermitian_to_unitary(matrix_H, angle, basis): 40 | #print "exp_hermitian_to_unitary angle= " + str(angle) 41 | (matrix_V, matrix_W) = diagonalize(matrix_H, basis) 42 | Udiag = matrix_exp_diag(-1j*angle*matrix_W) 43 | assert_matrix_unitary(Udiag) 44 | # Now translate it back to its non-diagonal form 45 | U = matrix_V * Udiag * matrix_V.I 46 | assert_matrix_unitary(U) 47 | return U 48 | 49 | ############################################################################## 50 | # Compose a random unitary by first creating a random Hermitian 51 | # from the given Hermitian basis, and exponentiating it. 52 | # Return the unitary matrix and its original Hermitian components 53 | # This works for general SU(d), where d is given by the hermitian basis. 54 | def get_random_unitary(basis_H, angle_lower=-PI_HALF, angle_upper=PI_HALF): 55 | (matrix_H, components_H) = get_random_hermitian(basis_H) 56 | # Choose a random angle between the angle_lower and angle_upper 57 | angle_range = angle_upper - angle_lower 58 | angle = (random.random() * angle_range) + angle_lower 59 | 60 | matrix_U = exp_hermitian_to_unitary(matrix_H, angle, basis_H) 61 | return (matrix_U, components_H, angle) 62 | 63 | ############################################################################## 64 | # Compose a matrix given components from a basis 65 | # Does not check whether components are normalized, or even from the basis 66 | def matrix_from_components(components, basis): 67 | assert_approx_equals(scipy.linalg.norm(components.values()), 1) 68 | d = basis.d 69 | sum = matrixify(numpy.zeros([d,d])) 70 | for k,v in components.items(): 71 | #print str(k) + " => " + str(v) 72 | sum = sum + (v * basis.get(k).matrix) 73 | #print str(sum) 74 | return sum 75 | 76 | ############################################################################## 77 | def axis_to_unitary(axis_components, angle, basis): 78 | # Check that the axis is normalized 79 | assert_approx_equals(scipy.linalg.norm(axis_components.values()), 1) 80 | matrix_H = matrix_from_components(axis_components, basis) 81 | matrix_U = exp_hermitian_to_unitary(matrix_H, angle, basis) 82 | return matrix_U 83 | 84 | ############################################################################## 85 | def get_random_axis(basis): 86 | for k,v in basis.items_minus_identity(): 87 | components[k] = random.random() 88 | 89 | norm = scipy.linalg.norm(components.values()) 90 | 91 | for k in components: 92 | components[k] /= norm 93 | 94 | # Check that we have actually normalized this vector 95 | assert_approx_equals(scipy.linalg.norm(components.values()), 1) 96 | return components -------------------------------------------------------------------------------- /skc/dawson/__init__.py: -------------------------------------------------------------------------------- 1 | # Solovay-Kitaev compiler using Dawson's group factor 2 | 3 | from skc.operator import * 4 | from skc.utils import * 5 | from skc.basic_approx.search import * 6 | 7 | ############################################################################## 8 | # Global variables 9 | the_basis = None 10 | # Not currently used with aram_diagonal_factor method 11 | the_axis = None 12 | the_factor_method = None 13 | the_tree = None 14 | 15 | # Build the search tree. Kablooey! 16 | def sk_build_tree(subdir, filecount_upper): 17 | global the_tree 18 | the_tree = build_kdtree("pickles/"+subdir+"/gen-g", filecount_upper, "-1.pickle") 19 | 20 | def sk_search_tree(op_U): 21 | op = search_kdtree(the_tree, op_U.matrix, the_basis) 22 | return op 23 | 24 | ############################################################################## 25 | def sk_set_axis(axis): 26 | global the_axis 27 | the_axis = axis 28 | print "the_axis= " + str(the_axis) 29 | 30 | ############################################################################## 31 | # Not currently used with aram_diagonal_factor method 32 | def sk_set_basis(basis): 33 | global the_basis 34 | the_basis = basis 35 | print "the_basis= " + str(the_basis) 36 | 37 | ############################################################################## 38 | def sk_set_factor_method(factor_method): 39 | global the_factor_method 40 | the_factor_method = factor_method 41 | print "the_factor_method= " + str(the_factor_method.__name__) 42 | 43 | ############################################################################## 44 | def solovay_kitaev(U, n, id="U", ancestry=""): 45 | print "*******************************************************************" 46 | print str(id)+"_"+str(n) 47 | print ancestry 48 | print "-------------------------------------------------------------------" 49 | 50 | if (n == 0): 51 | basic_approx = sk_search_tree(U) 52 | # discard min_dist for now. but just you wait... 53 | print "Returning basic approx: " + str(basic_approx) 54 | return basic_approx 55 | else: 56 | print "Beginning level " + str(n) 57 | U_n1 = solovay_kitaev(U, n-1, 'U', ancestry+id) # U_{n-1} 58 | print "U_"+str(n-1)+": " + str(U_n1) 59 | U_n1_dagger = U_n1.dagger() 60 | U_U_n1_dagger = U.multiply(U_n1_dagger).matrix 61 | V_matrix,W_matrix = the_factor_method(U_U_n1_dagger, the_basis, the_axis) 62 | print "V: " + str(V_matrix) 63 | print "W: " + str(W_matrix) 64 | V = Operator(name="V", matrix=V_matrix) 65 | W = Operator(name="W", matrix=W_matrix) 66 | V_n1 = solovay_kitaev(V, n-1, 'V', ancestry+id) # V_{n-1} 67 | print "V_"+str(n-1)+": " + str(V_n1) 68 | V_n1_dagger = V_n1.dagger() 69 | W_n1 = solovay_kitaev(W, n-1, 'W', ancestry+id) # W_{n-1} 70 | print "W_"+str(n-1)+": " + str(W_n1) 71 | W_n1_dagger = W_n1.dagger() 72 | V_n1_dagger_W_n1_dagger = V_n1_dagger.multiply(W_n1_dagger) 73 | V_n1_W_n1 = V_n1.multiply(W_n1) 74 | delta = V_n1_W_n1.multiply(V_n1_dagger_W_n1_dagger) 75 | U_n = delta.multiply(U_n1) 76 | print "delta_"+str(n)+": " + str(U_n) 77 | print "Ending level " + str(n) 78 | return U_n 79 | -------------------------------------------------------------------------------- /skc/dawson/factor.py: -------------------------------------------------------------------------------- 1 | from skc.utils import * 2 | from skc.decompose import * 3 | from skc.basis import * 4 | 5 | import math 6 | import numpy 7 | 8 | ############################################################################## 9 | # The similarity matrix is the rotation to get from A to B? 10 | def find_similarity_matrix_su2(components_A, components_B, 11 | angle_A, angle_B, basis): 12 | 13 | #print "*********************FIND_SIMILARITY_MATRIX_SU2" 14 | 15 | #print "components_a= " + str(components_A) 16 | #print "components_b= " + str(components_B) 17 | 18 | #angle_a = scale_A #/ 2.0 19 | #angle_b = scale_B #/ 2.0 20 | 21 | #print "angle_a= " + str(angle_a) 22 | #print "angle_b= " + str(angle_b) 23 | 24 | # angle_a and angle_b should be the same 25 | assert_approx_equals(numpy.abs(angle_a), numpy.abs(angle_b)) 26 | 27 | vector_a = basis.sort_canonical_order(components_A) 28 | vector_b = basis.sort_canonical_order(components_B) 29 | #print "vector_a= " + str(vector_a) 30 | #print "vector_b= " + str(vector_b) 31 | 32 | norm_a = scipy.linalg.norm(vector_a) 33 | norm_b = scipy.linalg.norm(vector_b) 34 | 35 | # Rotation axes should be unit vectors 36 | assert_approx_equals(norm_a, 1) 37 | assert_approx_equals(norm_b, 1) 38 | 39 | # ab = a . b (vector dot product) 40 | ab_dot_product = numpy.dot(vector_a, vector_b) 41 | 42 | # s = b x a (vector cross product), perpendicular to both a & b 43 | vector_s = numpy.cross(vector_b, vector_a) 44 | #print "vector_s = " + str(vector_s) 45 | 46 | # what is the interpretation of the cross product here? did we need to 47 | # normalize this? oops 48 | norm_s = scipy.linalg.norm(vector_s) 49 | if (abs(norm_s) < TOLERANCE): 50 | # The vectors are parallel or anti parallel 51 | # i am just pretending they are parallel so fix this. 52 | return basis.identity.matrix; 53 | 54 | #angle_s = math.acos(ab_dot_product / (norm_a * norm_b)) 55 | # Occasionally the lengths of these vectors will drift, so renormalize here 56 | angle_s = math.acos(ab_dot_product / (norm_a * norm_b)) 57 | 58 | assert((angle_s > 0) and (angle_s < PI)) 59 | for i in range(len(vector_s)): 60 | vector_s[i] /= norm_s 61 | 62 | # compose angle and axis of rotation into a matrix 63 | components_S = basis.unsort_canonical_order(vector_s) 64 | #print "components_S = " + str(components_S) 65 | matrix_U = axis_to_unitary(components_S, angle_s/2.0, basis) 66 | #print "matrix_U= " + str(matrix_U) 67 | 68 | return matrix_U 69 | 70 | ############################################################################## 71 | # The similarity matrix is the rotation to get from A to B? 72 | def find_similarity_matrix(matrix_A, matrix_B, basis): 73 | 74 | #print "*********************FIND_SIMILARITY_MATRIX" 75 | 76 | #print "matrix_A= " + str(matrix_A) 77 | #print "matrix_B= " + str(matrix_B) 78 | 79 | (components_A, scale_A, hermitian_A) = unitary_to_axis(matrix_A, basis) 80 | (components_B, scale_B, hermitian_B) = unitary_to_axis(matrix_B, basis) 81 | #print "components_a= " + str(components_A) 82 | #print "components_b= " + str(components_B) 83 | 84 | angle_a = scale_A #/ 2.0 85 | angle_b = scale_B #/ 2.0 86 | 87 | #print "angle_a= " + str(angle_a) 88 | #print "angle_b= " + str(angle_b) 89 | 90 | # angle_a and angle_b should be the same 91 | assert_approx_equals(numpy.abs(angle_a), numpy.abs(angle_b)) 92 | 93 | vector_a = basis.sort_canonical_order(components_A) 94 | vector_b = basis.sort_canonical_order(components_B) 95 | #print "vector_a= " + str(vector_a) 96 | #print "vector_b= " + str(vector_b) 97 | 98 | norm_a = scipy.linalg.norm(vector_a) 99 | norm_b = scipy.linalg.norm(vector_b) 100 | 101 | # Rotation axes should be unit vectors 102 | assert_approx_equals(norm_a, 1) 103 | assert_approx_equals(norm_b, 1) 104 | 105 | # ab = a . b (vector dot product) 106 | ab_dot_product = numpy.dot(vector_a, vector_b) 107 | 108 | # s = b x a (vector cross product), perpendicular to both a & b 109 | vector_s = numpy.cross(vector_b, vector_a) 110 | #print "vector_s = " + str(vector_s) 111 | 112 | # what is the interpretation of the cross product here? did we need to 113 | # normalize this? oops 114 | norm_s = scipy.linalg.norm(vector_s) 115 | if (abs(norm_s) < TOLERANCE): 116 | # The vectors are parallel or anti parallel 117 | # i am just pretending they are parallel so fix this. 118 | return basis.identity.matrix; 119 | 120 | #angle_s = math.acos(ab_dot_product / (norm_a * norm_b)) 121 | # Occasionally the lengths of these vectors will drift, so renormalize here 122 | angle_s = math.acos(ab_dot_product / (norm_a * norm_b)) 123 | 124 | assert((angle_s > 0) and (angle_s < PI)) 125 | for i in range(len(vector_s)): 126 | vector_s[i] /= norm_s 127 | 128 | # compose angle and axis of rotation into a matrix 129 | components_S = basis.unsort_canonical_order(vector_s) 130 | #print "components_S = " + str(components_S) 131 | matrix_U = axis_to_unitary(components_S, angle_s/2.0, basis) 132 | #print "matrix_U= " + str(matrix_U) 133 | 134 | return matrix_U 135 | 136 | ############################################################################# 137 | def dawson_x_group_factor_su2(matrix_U, basis): 138 | 139 | #print "*********************DAWSON_X_GROUP_FACTOR_SU2" 140 | 141 | (components_U, scale_U, hermitian_U) = unitary_to_axis(matrix_U, basis) 142 | 143 | angle_u = scale_U #/ 2.0 144 | 145 | #print "angle_u= " + str(angle_u) 146 | 147 | # st = pow(0.5 - 0.5*sqrt(1 - a[1]*a[1]),0.25); 148 | # a[0] = cos(phi/2), from the I component above 149 | # st = sin(theta/2) = 4th root of (1/2 - 1/2 * cos(phi/2)) 150 | ni = math.cos(angle_u/2) 151 | st = math.pow(0.5 - 0.5*ni, 0.25); 152 | # ct = cos(theta/2), from cos^2 + sin^2 = 1 153 | ct = math.sqrt(1-(st**2)); 154 | # This converts to spherical coordinations, theta = pitch, alpha = yaw 155 | theta = 2*math.asin(st); 156 | alpha = math.atan(st); 157 | 158 | #print "st= " + str(st) 159 | #print "ct= " + str(ct) 160 | #print "theta= " + str(theta) 161 | #print "alpha= " + str(alpha) 162 | 163 | ax = st*math.cos(alpha); # x component 164 | bx = ax 165 | ay = st*math.sin(alpha); # y component 166 | by = ay 167 | az = ct; # z components 168 | bz = -az; # a and b have opposite z components 169 | 170 | components_a = cart3d_to_h2(x=ax, y=ay, z=az) 171 | components_b = cart3d_to_h2(x=bx, y=by, z=bz) 172 | #print "vector_a= " + str(components_a) 173 | #print "vector_b= " + str(components_b) 174 | 175 | #matrix_A = axis_to_unitary(vector_a, theta, basis) 176 | #matrix_B = axis_to_unitary(vector_b, theta, basis) 177 | 178 | # Find similarity between A and B^\dagger 179 | matrix_C = find_similarity_matrix(components_A, components_B, 180 | angle_A, -angle_B, basis) 181 | 182 | return [matrix_B, matrix_C] 183 | 184 | ############################################################################# 185 | def dawson_x_group_factor(matrix_U, basis): 186 | 187 | #print "*********************DAWSON_X_GROUP_FACTOR" 188 | 189 | (components_U, scale_U, hermitian_U) = unitary_to_axis(matrix_U, basis) 190 | 191 | angle_u = scale_U #/ 2.0 192 | 193 | #print "angle_u= " + str(angle_u) 194 | 195 | # st = pow(0.5 - 0.5*sqrt(1 - a[1]*a[1]),0.25); 196 | # a[0] = cos(phi/2), from the I component above 197 | # st = sin(theta/2) = 4th root of (1/2 - 1/2 * cos(phi/2)) 198 | ni = math.cos(angle_u/2) 199 | st = math.pow(0.5 - 0.5*ni, 0.25); 200 | # ct = cos(theta/2), from cos^2 + sin^2 = 1 201 | ct = math.sqrt(1-(st**2)); 202 | # This converts to spherical coordinations, theta = pitch, alpha = yaw 203 | theta = 2*math.asin(st); 204 | alpha = math.atan(st); 205 | 206 | #print "st= " + str(st) 207 | #print "ct= " + str(ct) 208 | #print "theta= " + str(theta) 209 | #print "alpha= " + str(alpha) 210 | 211 | ax = st*math.cos(alpha); # x component 212 | bx = ax 213 | ay = st*math.sin(alpha); # y component 214 | by = ay 215 | az = ct; # z components 216 | bz = -az; # a and b have opposite z components 217 | 218 | vector_a = cart3d_to_h2(x=ax, y=ay, z=az) 219 | vector_b = cart3d_to_h2(x=bx, y=by, z=bz) 220 | #print "vector_a= " + str(vector_a) 221 | #print "vector_b= " + str(vector_b) 222 | 223 | if (approx_equals(theta, 0)): 224 | # Too close to zero, just return two identities 225 | return [basis.identity.matrix, basis.identity.matrix] 226 | #raise RuntimeError("Theta too close to zero!") 227 | 228 | matrix_A = axis_to_unitary(vector_a, theta/2.0, basis) 229 | matrix_B = axis_to_unitary(vector_b, theta/2.0, basis) 230 | 231 | # Find similarity between A and B^\dagger 232 | matrix_C = find_similarity_matrix(matrix_A, matrix_B.H, basis) 233 | 234 | return [matrix_B, matrix_C] 235 | 236 | ############################################################################# 237 | def dawson_group_factor(matrix_U, basis, x_axis): 238 | 239 | dist = fowler_distance(matrix_U, basis.identity.matrix) 240 | if (approx_equals_tolerance(dist, 0, TOLERANCE10)): 241 | # Too close to identity, just return a pair of identities 242 | return [basis.identity.matrix, basis.identity.matrix] 243 | 244 | #print "*********************DAWSON_GROUP_FACTOR" 245 | 246 | # U is a rotation about some axis, find out by how much, then make 247 | # that a rotation about the X-axis. 248 | #print "matrix_U= " + str(matrix_U) 249 | 250 | (components_U, scale_U, hermitian_U) = unitary_to_axis(matrix_U, basis) 251 | angle_u = scale_U / 2.0; 252 | 253 | # do the same rotation about the x axis (n is an angle) 254 | matrix_XU = axis_to_unitary(x_axis, angle_u, basis) 255 | 256 | #print "matrix_XU= " + str(matrix_XU) 257 | 258 | # oh!! the similarity matrix is what S stands for! 259 | # and it is the rotation to get from U to XU 260 | matrix_S = find_similarity_matrix(matrix_U, matrix_XU, basis); 261 | matrix_S_dag = numpy.conjugate(numpy.transpose(matrix_S)) 262 | 263 | #print "matrix_S= " + str(matrix_S) 264 | 265 | # now then wtf is this?! the real bgc-decompose 266 | [matrix_A, matrix_B] = dawson_x_group_factor(matrix_XU, basis); 267 | 268 | #print "matrix_A= " + str(matrix_A) 269 | #print "matrix_B= " + str(matrix_B) 270 | 271 | V = matrix_S * matrix_A * matrix_S_dag 272 | W = matrix_S * matrix_B * matrix_S_dag 273 | 274 | return [ V,W ] 275 | -------------------------------------------------------------------------------- /skc/decompose.py: -------------------------------------------------------------------------------- 1 | # Methods for decomposing unitaries 2 | 3 | from skc.diagonalize import * 4 | from skc.compose import * 5 | 6 | ############################################################################## 7 | # Given a Hermitian matrix H which is the logarithm of the unitary U 8 | # and a basis, return the different components of H in that basis 9 | # Also return the global constant factor K 10 | def get_basis_components(matrix_H, basis): 11 | d = basis.d 12 | 13 | # Create a dictionary with the same keys as basis_dict 14 | component_dict = {} 15 | 16 | # Iterate over basis elements to pick out components in H 17 | # Don't include the identity element here 18 | for key,gate in basis.items_minus_identity(): 19 | kc_alpha = numpy.trace(matrix_H * gate.matrix) 20 | component_dict[key] = kc_alpha.real 21 | #print str(key) + "=> " + str(kc_alpha) 22 | # Components should be real, with negligible imaginary parts 23 | msg = str(matrix_H) 24 | #assert_approx_equals(kc_alpha.imag, 0, message=msg) 25 | 26 | # Norm will always be positive, so we have to fix up the sign that 27 | # we return below, b/c it will be interpreted as (two times) an angle 28 | norm = scipy.linalg.norm(component_dict.values()) 29 | #print "norm= " + str(norm) 30 | 31 | sign_plus = 0 32 | sign_minus = 0 33 | 34 | # Go through components, count how many are positive and negative 35 | # and scale by norm 36 | for key,value in component_dict.items(): 37 | # If a value is close to zero, don't use it to determine sign 38 | if (numpy.abs(value) > TOLERANCE): 39 | if (value > 0): 40 | sign_plus += 1 41 | elif (value < 0): 42 | sign_minus += 1 43 | #value /= norm 44 | #print str(key) + " => " + str(value) 45 | component_dict[key] = value / norm 46 | 47 | # Take a majority vote on the sign of the angle 48 | if (sign_plus > sign_minus): 49 | sign = +1 50 | else: 51 | sign = -1 52 | 53 | # For now, angle is always positive, let's see how this breaks things 54 | sign = +1 55 | #print "sign_plus= " + str(sign_plus) 56 | #print "sign_minus= " + str(sign_minus) 57 | 58 | # Fix sign of components so that they are all positive 59 | #for k,v in component_dict.items(): 60 | # component_dict[k] = numpy.abs(v) 61 | # assert(component_dict[k] >= 0) 62 | 63 | # Check that we really normalized the values 64 | assert_approx_equals(scipy.linalg.norm(component_dict.values()), 1) 65 | 66 | # Verify that we can compose the matrix again from these components 67 | M = matrix_from_components(component_dict, basis) 68 | dist = fowler_distance(M, matrix_H) 69 | #if (dist > TOLERANCE3): 70 | #print "dist= " + str(dist) 71 | #print "M= " + str(M) 72 | #assert_approx_equals(dist, 0) 73 | 74 | # Fix the scale factor to have the right sign 75 | K = sign*norm 76 | #print "sign= " + str(sign) 77 | 78 | return (component_dict, K) 79 | 80 | ########################################################################## 81 | def unitary_to_axis(matrix_U, basis): 82 | (matrix_V, matrix_W) = diagonalize(matrix_U, basis) 83 | 84 | #print "V= " + str(matrix_V) 85 | #print "W= " + str(matrix_W) 86 | 87 | matrix_ln = get_matrix_logarithm(matrix_W) 88 | 89 | #print "matrix_ln= " + str(matrix_ln) 90 | 91 | # Reconjugate to transform into iH 92 | matrix_iH = matrix_V * matrix_ln * matrix_V.I 93 | 94 | # Factor out -i (since we used -i in exp_hermitian_to_unitary) 95 | matrix_H = (-1.0/1j) * matrix_iH 96 | 97 | #print "matrix_H= " + str(matrix_H) 98 | trace_norm = numpy.trace(matrix_H * matrix_H.H) 99 | #print "trace_norm(H)= " + str(trace_norm) 100 | 101 | # Compare the calculated components with our original 102 | (components2, K) = get_basis_components(matrix_H, basis) 103 | #print "K= " + str(K) 104 | #angle = K/2.0 105 | # Scale matrix by our calculated angle 106 | #matrix_H = matrix_H / angle 107 | return (components2, K, matrix_H) -------------------------------------------------------------------------------- /skc/diagonalize.py: -------------------------------------------------------------------------------- 1 | # Functions for diagonalizing matrices 2 | 3 | import scipy.linalg 4 | import numpy 5 | 6 | from skc.utils import * 7 | 8 | ############################################################################## 9 | # Diagonalize the given unitary in the given basis, returning the 10 | # diagonal matrix W, and the unitary matrix V such that 11 | # V * U * V^{-1} = W 12 | # Equivalently, you can call conjugate(W, V) to recover U 13 | # from skc_group_factor 14 | def diagonalize(matrix_U, basis): 15 | d = basis.d 16 | #print "U= " + str(matrix_U) 17 | 18 | (eig_vals, eig_vecs) = scipy.linalg.eig(matrix_U) 19 | 20 | #print "eig_vals= " + str(eig_vals) 21 | #print "eig_vecs= " + str(eig_vecs) 22 | 23 | eig_length = len(eig_vecs) 24 | assert(len(eig_vals) == eig_length) 25 | 26 | # Create the diagonalization matrix V 27 | matrix_V = numpy.matrix(eig_vecs) #numpy.matrix(rows) 28 | 29 | #print "V= " + str(matrix_V) 30 | 31 | # Get adjoint 32 | #matrix_V_dag = numpy.transpose(numpy.conjugate(matrix_V)) 33 | 34 | # Eigenvector matrix should be unitary if we are to have 35 | # V dagger be the same as V inverse 36 | #assert_matrix_unitary(matrix_V, TOLERANCE6, message=str()) 37 | 38 | 39 | # Multiply V^{-1} * U * V to diagonalize 40 | matrix_W = matrix_V.I * matrix_U * matrix_V 41 | 42 | # Assert that we can recover matrix U 43 | #matrix_U2 = matrix_V * matrix_W * matrix_V.I 44 | #assert_matrices_approx_equal(matrix_U, matrix_U2, trace_distance) 45 | 46 | #print "W= " + str(matrix_W) 47 | 48 | # Construct the diagonalized matrix that we want 49 | matrix_diag = numpy.matrix(numpy.eye(d), dtype=numpy.complex) 50 | for i in range(eig_length): 51 | matrix_diag[(i,i)] = eig_vals[i] 52 | 53 | # Verify that off-diagonal elements are close to zero 54 | for i in range(eig_length): 55 | for j in range(eig_length): 56 | if (i != j): 57 | assert_approx_equals(matrix_W[(i,j)], 0) 58 | 59 | for i in range(eig_length): 60 | assert_approx_equals(matrix_W[(i,i)], eig_vals[i]) 61 | 62 | return (matrix_V, matrix_W) 63 | 64 | ############################################################################## 65 | # Return a diagonal matrix whose diagonal elements are natural logarithms 66 | # of the corresponding diagonal elements in the input matrix 67 | def get_matrix_logarithm(matrix_diag): 68 | d = matrix_diag.shape[0] 69 | # Copy the matrix 70 | matrix_ln = numpy.matrix(numpy.zeros([d,d], dtype=numpy.complex)) 71 | 72 | # Substitute diagonal elements for their natural logarithm 73 | for i in range(d): 74 | matrix_ln[(i,i)] = numpy.log(matrix_diag[(i,i)]) 75 | 76 | return matrix_ln 77 | -------------------------------------------------------------------------------- /skc/group_factor.py: -------------------------------------------------------------------------------- 1 | from skc.utils import * 2 | from skc.dawson.factor import * 3 | from skc.basis import * 4 | 5 | ############################################################################## 6 | def get_group_commutator(matrix_V, matrix_W): 7 | return matrix_V * matrix_W * matrix_V.H * matrix_W.H 8 | 9 | ############################################################################## 10 | def conjugate(matrix_A, matrix_B): 11 | return matrix_B * matrix_A * matrix_B.H 12 | 13 | ############################################################################## 14 | def create_diagonal_submatrices(matrix_D, d): 15 | # Divide up matrix_D into 2x2 SU(2) unitaries, U_i 16 | submatrices_D = [] 17 | print "D= " + str(matrix_D) 18 | for i in range(d/2): 19 | i2 = i*2 20 | i21 = (2*i)+1 21 | #print "i2= " + str(i2) 22 | #print "i21= " + str(i21) 23 | #print "D("+str(i2)+","+str(i2)+")= " + str(matrix_D[(i2,i2)]) 24 | a11 = matrix_D[(i2,i2)] 25 | a12 = 0.0 #matrix_D[(i,i+1)] 26 | a21 = 0.0 #matrix_D[(i+1,i)] 27 | a22 = matrix_D[(i21,i21)] 28 | submatrix_D = matrixify([[a11,a12],[a21,a22]]) 29 | print "D_"+str(i)+ "= " + str(submatrix_D) 30 | submatrices_D.append(submatrix_D) 31 | # These don't have to be unitary, but we'd like them close to I 32 | #assert_matrix_unitary(submatrix_D) 33 | return submatrices_D 34 | 35 | ############################################################################## 36 | def reconstruct_diagonal_matrix(submatrices, d): 37 | matrix_D = matrixify(numpy.eye(d)) 38 | 39 | for i in range(d/2): 40 | i2 = i*2 41 | i21 = (2*i)+1 42 | submatrix_D = submatrices[i] 43 | #print "U_"+str(i)+ "= " + str(submatrix_U) 44 | matrix_D[(i2,i2)] = submatrix_D[(0,0)] 45 | matrix_D[(i21,i21)] = submatrix_D[(1,1)] 46 | 47 | return matrix_D 48 | 49 | ############################################################################## 50 | def create_group_commutator_submatrices(submatrices_D, basis, axis): 51 | submatrices_V = [] 52 | submatrices_W = [] 53 | 54 | for submatrix_U in submatrices_D: 55 | print "U_i= " + str(submatrix_U) 56 | (submatrix_V, submatrix_W) = \ 57 | dawson_group_factor(submatrix_U, basis, axis) 58 | print "V_i= " + str(submatrix_V) 59 | print "W_i= " + str(submatrix_W) 60 | delta = get_group_commutator(submatrix_V, submatrix_W) 61 | print "delta= " + str(delta) 62 | dist = trace_distance(submatrix_U, delta) 63 | print "dist(U_i, delta)= " + str(dist) 64 | assert_approx_equals_tolerance(dist, 0, 1) 65 | #assert_matrices_approx_equal(submatrix_U, delta, trace_distance) 66 | # Write an assert_group_factor method here from skc_group_factor 67 | submatrices_V.append(submatrix_V) 68 | submatrices_W.append(submatrix_W) 69 | 70 | return (submatrices_V, submatrices_W) 71 | 72 | ############################################################################## 73 | # We don't currently use axis, I need to find a better way to make this 74 | # interoperable with dawson_group_factor 75 | def aram_diagonal_factor(matrix_U, basis, axis): 76 | # Get the SU(d) unitary matrix_U in diagonal form (matrix_D) 77 | (matrix_P, matrix_D) = diagonalize(matrix_U, basis) 78 | 79 | submatrices_U = create_diagonal_submatrices(matrix_D, basis.d) 80 | 81 | matrix_U2 = reconstruct_diagonal_matrix(submatrices_U, basis.d) 82 | trace_dist = trace_distance(matrix_D, matrix_U2) 83 | assert_approx_equals(trace_dist, 0) 84 | 85 | # Find balanced group commutator for each submatrix 86 | 87 | # We pass in the basis and x-axis for SU(2) here, since that was the whole 88 | # point of this diagonalization scheme 89 | submatrices_V, submatrices_W = \ 90 | create_group_commutator_submatrices(submatrices_U, H2, X_AXIS) 91 | 92 | # Construct the big group commutator from the subcommutators 93 | matrix_V = reconstruct_diagonal_matrix(submatrices_V, basis.d) 94 | matrix_W = reconstruct_diagonal_matrix(submatrices_V, basis.d) 95 | 96 | matrix_D2 = get_group_commutator(matrix_V, matrix_W) 97 | trace_dist = trace_distance(matrix_D, matrix_D2) 98 | print "dist(D,D2)= " + str(trace_dist) 99 | 100 | # Undiagonalize 101 | matrix_Vt = conjugate(matrix_V, matrix_P) 102 | matrix_Wt = conjugate(matrix_W, matrix_P) 103 | 104 | # Verify that we can multiply it all back again 105 | matrix_U3 = get_group_commutator(matrix_Vt, matrix_Wt) 106 | trace_dist = trace_distance(matrix_U, matrix_U3) 107 | print "dist(U,U3)= " + str(trace_dist) 108 | return (matrix_Vt, matrix_Wt) -------------------------------------------------------------------------------- /skc/kdtree.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # encoding: utf-8 3 | 4 | """ KDTree implementation. 5 | 6 | Features: 7 | 8 | - nearest neighbours search 9 | 10 | Matej Drame [matej.drame@gmail.com] 11 | """ 12 | 13 | __version__ = "1r11.1.2010" 14 | __all__ = ["KDTree"] 15 | 16 | from skc.utils import * 17 | 18 | EPS_0 = 1.0 / 32 19 | 20 | def eliminate_close_children(op_list, this_matrix): 21 | while (op_list): 22 | child = op_list[0] 23 | dist = fowler_distance(child.matrix, this_matrix) 24 | if (dist < EPS_0): 25 | print "eliminated with dist= " + str(dist) 26 | del op_list[0] 27 | else: 28 | break 29 | return op_list 30 | 31 | def square_distance(pointA, pointB): 32 | # squared euclidean distance 33 | distance = 0 34 | dimensions = len(pointA) # assumes both points have the same dimensions 35 | for dimension in range(dimensions): 36 | distance += (pointA[dimension] - pointB[dimension])**2 37 | return distance 38 | 39 | class KDTreeNode(): 40 | def __init__(self, op, left, right): 41 | self.left = left 42 | self.right = right 43 | self.op = op 44 | 45 | def is_leaf(self): 46 | return (self.left == None and self.right == None) 47 | 48 | class KDTreeNeighbours(): 49 | """ Internal structure used in nearest-neighbours search. 50 | """ 51 | def __init__(self, query_op, t): 52 | self.query_op = query_op 53 | self.t = t # neighbours wanted 54 | self.largest_distance = 0 # squared 55 | self.current_best = [] 56 | 57 | def calculate_largest(self): 58 | if self.t >= len(self.current_best): 59 | self.largest_distance = self.current_best[-1][1] 60 | else: 61 | self.largest_distance = self.current_best[self.t-1][1] 62 | 63 | def add(self, op): 64 | sd = fowler_distance(op.matrix, self.query_op.matrix) 65 | # run through current_best, try to find appropriate place 66 | for i, e in enumerate(self.current_best): 67 | if i == self.t: 68 | return # enough neighbours, this one is farther, let's forget it 69 | if e[1] > sd: 70 | self.current_best.insert(i, [op, sd]) 71 | self.calculate_largest() 72 | return 73 | # append it to the end otherwise 74 | self.current_best.append([op, sd]) 75 | self.calculate_largest() 76 | 77 | def get_best(self): 78 | return [element[0] for element in self.current_best[:self.t]] 79 | 80 | class KDTree(): 81 | """ KDTree implementation. 82 | 83 | Example usage: 84 | 85 | from kdtree import KDTree 86 | 87 | data = # iterable of points (which are also iterable, same length) 88 | point = 89 | 90 | tree = KDTree.construct_from_data(data) 91 | nearest = tree.query(point, t=4) # find nearest 4 points 92 | """ 93 | 94 | def __init__(self, data): 95 | def build_kdtree(op_list, depth): 96 | # code based on wikipedia article: http://en.wikipedia.org/wiki/Kd-tree 97 | if not op_list: 98 | return None 99 | 100 | # select axis based on depth so that axis cycles through all valid values 101 | axis = depth % len(op_list[0].dimensions) # assumes all points have the same dimension 102 | #print "axis= " + str(axis) 103 | 104 | # sort point list and choose median as pivot point, 105 | # TODO: better selection method, linear-time selection, distribution 106 | op_list.sort(key=lambda point: point.dimensions[axis]) 107 | median = len(op_list)/2 # choose median 108 | op_this = op_list[median] 109 | # create node and recursively construct subtrees 110 | #print "median= " + str(median) 111 | #print "point[median].dims= " + str(op_list[median].dimensions) 112 | # Eliminate children that are closer than eps_0 113 | left_children = op_list[0:median] #eliminate_close_children(op_list[0:median], op_this.matrix) 114 | right_children = op_list[median+1:] #eliminate_close_children(op_list[median+1:], op_this.matrix) 115 | node = KDTreeNode(op=op_this, 116 | left=build_kdtree(left_children, depth+1), 117 | right=build_kdtree(right_children, depth+1) 118 | ) 119 | node.axis = axis 120 | node.dim_val = op_this.dimensions[axis] 121 | return node 122 | 123 | self.root_node = build_kdtree(data, depth=0) 124 | # Garbage collect the dimensions, since we don't need them anymore 125 | # although we will if we want to combine trees later 126 | for op in data: 127 | del op.dimensions 128 | 129 | @staticmethod 130 | def construct_from_data(data): 131 | tree = KDTree(data) 132 | return tree 133 | 134 | def query(self, query_op, t=1): 135 | statistics = {'nodes_visited': 0, 'far_search': 0, 'leafs_reached': 0} 136 | 137 | def nn_search(node, query_op, t, depth, best_neighbours): 138 | if node == None: 139 | return 140 | 141 | #statistics['nodes_visited'] += 1 142 | 143 | # if we have reached a leaf, let's add to current best neighbours, 144 | # (if it's better than the worst one or if there is not enough neighbours) 145 | if node.is_leaf(): 146 | #statistics['leafs_reached'] += 1 147 | best_neighbours.add(node.op) 148 | return 149 | 150 | # this node is no leaf 151 | 152 | # select dimension for comparison (based on current depth) 153 | axis = depth % len(query_op.dimensions) 154 | 155 | # figure out which subtree to search 156 | near_subtree = None # near subtree 157 | far_subtree = None # far subtree (perhaps we'll have to traverse it as well) 158 | 159 | # compare query_point and point of current node in selected dimension 160 | # and figure out which subtree is farther than the other 161 | if query_op.dimensions[axis] < node.dim_val: 162 | near_subtree = node.left 163 | far_subtree = node.right 164 | else: 165 | near_subtree = node.right 166 | far_subtree = node.left 167 | 168 | # recursively search through the tree until a leaf is found 169 | nn_search(near_subtree, query_op, t, depth+1, best_neighbours) 170 | 171 | # while unwinding the recursion, check if the current node 172 | # is closer to query point than the current best, 173 | # also, until t points have been found, search radius is infinity 174 | best_neighbours.add(node.op) 175 | 176 | # check whether there could be any points on the other side of the 177 | # splitting plane that are closer to the query point than the current best 178 | if (node.dim_val - query_op.dimensions[axis])**2 < best_neighbours.largest_distance: 179 | #statistics['far_search'] += 1 180 | nn_search(far_subtree, query_op, t, depth+1, best_neighbours) 181 | 182 | return 183 | 184 | # if there's no tree, there's no neighbors 185 | if self.root_node != None: 186 | neighbours = KDTreeNeighbours(query_op, t) 187 | nn_search(self.root_node, query_op, t, depth=0, best_neighbours=neighbours) 188 | result = neighbours.get_best() 189 | else: 190 | result = [] 191 | 192 | #print statistics 193 | return result 194 | -------------------------------------------------------------------------------- /skc/main.py: -------------------------------------------------------------------------------- 1 | from skc.basic_approx import * 2 | from skc.operator import * 3 | 4 | import cPickle 5 | import time 6 | 7 | f = open('basic_approxes.pickle', 'rb') 8 | 9 | begin_time = time.time() 10 | 11 | iset = cPickle.load(f) 12 | 13 | iset_time = time.time() - begin_time 14 | print "Loaded instruction set in: " + str(iset_time) 15 | print "Iset = " + str(iset) 16 | 17 | begin_time = time.time() 18 | 19 | basic_approxes = cPickle.load(f) 20 | #basic_approxes = [I2] 21 | 22 | approx_time = time.time() - begin_time 23 | 24 | print "Loaded basic approximations in: " + str(approx_time) 25 | print "Number of BA: " + str(len(basic_approxes)) 26 | 27 | def solovay_kitaev(U, n): 28 | if (n == 0): 29 | basic_approx, min_dist = find_basic_approx(basic_approxes, U) 30 | # discard min_dist for now. but just you wait... 31 | print "Returning basic approx: " + str(basic_approx) 32 | return basic_approx 33 | else: 34 | print "Beginning level " + str(n) 35 | U_n1 = solovay_kitaev(U, n-1) # U_{n-1} 36 | print "U_"+str(n-1)+": " + str(U_n1) 37 | U_n1_dagger = U_n1.dagger() 38 | V,W = bgc_decompose(U.multiply(U_n1_dagger)) 39 | print "V: " + str(V) 40 | print "W: " + str(W) 41 | V_n1 = solovay_kitaev(V, n-1) # V_{n-1} 42 | print "V_"+str(n-1)+": " + str(V_n1) 43 | V_n1_dagger = V_n1.dagger() 44 | W_n1 = solovay_kitaev(W, n-1) # W_{n-1} 45 | print "W_"+str(n-1)+": " + str(W_n1) 46 | W_n1_dagger = W_n1.dagger() 47 | V_n1_dagger_W_n1_dagger = V_n1_dagger.multiply(W_n1_dagger) 48 | V_n1_W_n1 = V_n1.multiply(W_n1) 49 | delta = V_n1_W_n1.multiply(V_n1_dagger_W_n1_dagger) 50 | U_n = delta.multiply(U_n1) 51 | print "delta_"+str(n)+": " + str(U_n) 52 | print "Ending level " + str(n) 53 | return U_n 54 | -------------------------------------------------------------------------------- /skc/operator.py: -------------------------------------------------------------------------------- 1 | # Operator/gate, including annotation and ancestry 2 | # The most basic kind of operator is a basic instruction from a universal gate set 3 | 4 | import numpy; 5 | import math; 6 | import scipy.linalg; 7 | 8 | from skc.utils import * 9 | 10 | class Operator: 11 | 12 | def __init__(self, name, matrix, ancestors=[]): 13 | self.name = name 14 | self.matrix = matrix 15 | if (len(ancestors) == 0): 16 | ancestors = [name] 17 | self.ancestors = ancestors 18 | 19 | def __str__(self): 20 | return "Operator: " + str(self.name) + "\n" \ 21 | " Ancestors: " + str(self.ancestors) 22 | 23 | def __hash__(self): 24 | hash = self.name.__hash__() 25 | for ancestor in self.ancestors: 26 | hash *= ancestor.__hash__() 27 | return hash 28 | 29 | def print_matrix(self): 30 | print " Matrix: " + str(self.matrix) 31 | 32 | def add_ancestors(self, other, new_name=""): 33 | # Append new ancestors to the end of self 34 | new_ancestors = self.ancestors + other.ancestors 35 | new_op = Operator(new_name, None, new_ancestors) 36 | return new_op 37 | 38 | def ancestors_as_string(self): 39 | return list_as_string(self.ancestors) 40 | 41 | # Sets the matrix of this operator by its ancestors taken from a set 42 | # iset - a dictionary of labels to operators for the instruction set 43 | # identity - the identity to start multiplication with 44 | def matrix_from_ancestors(self, iset_dict, identity): 45 | self.matrix = identity.matrix 46 | for ancestor in self.ancestors: 47 | self.matrix = self.matrix * iset_dict[ancestor].matrix 48 | #print "MATRIX IS UNITARY" 49 | assert_matrix_unitary(self.matrix) 50 | msg = "Looks like someone forgot to simplify away this sequence, hmm?" \ 51 | + str(self.ancestors) 52 | dist = trace_distance(self.matrix, identity.matrix) 53 | close_to_identity = approx_equals(dist, 0) 54 | named_identity = (self.name == identity.name) 55 | return close_to_identity and (not named_identity) 56 | # Uncomment the following two lines if you want to catch identities 57 | #assert_matrices_approx_not_equal(self.matrix, identity.matrix, \ 58 | # message=msg) 59 | 60 | def multiply(self, other, new_name=""): 61 | new_matrix = self.matrix * other.matrix 62 | new_ancestors = self.ancestors + other.ancestors 63 | new_op = Operator(new_name, new_matrix, new_ancestors) 64 | return new_op 65 | 66 | def dagger(self): 67 | new_matrix = self.matrix.H 68 | reversed_ancestors = list(self.ancestors) 69 | reversed_ancestors.reverse() 70 | new_ancestors = [] 71 | for ancestor in reversed_ancestors: 72 | new_ancestors.append(ancestor+"d") 73 | new_name = self.name + "d" 74 | return Operator(new_name, new_matrix, new_ancestors) 75 | 76 | def scale(self, scalar, new_name=""): 77 | new_matrix = self.matrix * scalar 78 | new_ancestors = self.ancestors + [scalar] 79 | return Operator(new_name, new_matrix, new_ancestors) 80 | 81 | def __eq__(self, other): 82 | return (self.name == other.name) and (self.ancestors == other.ancestors) 83 | 84 | def get_identity(d): 85 | return Operator("I", matrixify(numpy.eye(d))) 86 | 87 | ############################################################################## 88 | # SU(2) constants 89 | # 2x2 identity matrix 90 | I2 = get_identity(2) 91 | 92 | # Pauli X matrix 93 | SX_matrix = matrixify([[0, 1], [1, 0]]) 94 | SX = Operator("SX", SX_matrix) 95 | 96 | # Pauli Y matrix 97 | SY_matrix = matrixify([[0, -1j], [1j, 0]]) 98 | SY = Operator("SY", SY_matrix) 99 | 100 | # Pauli Z matrix 101 | SZ_matrix = matrixify([[1, 0], [0, -1]]) 102 | SZ = Operator("SZ", SZ_matrix) 103 | 104 | # Hadamard gate 105 | H_matrix = (1/math.sqrt(2)) * matrixify([[1, 1], [1, -1]]) 106 | H = Operator("H", H_matrix) 107 | 108 | # pi/8 gate 109 | T_matrix = matrixify([[1, 0], [0, numpy.exp(1j * math.pi / 4)]]) 110 | T = Operator("T", T_matrix) 111 | 112 | # Inverse pi/8 gate 113 | T_inv = Operator("Td", T.matrix.I) 114 | -------------------------------------------------------------------------------- /skc/rotate.py: -------------------------------------------------------------------------------- 1 | # Module for single-qubit rotations 2 | 3 | from skc.utils import * 4 | 5 | import numpy 6 | import math 7 | 8 | # Create the SU(2) matrix for rotating about the x-axis by theta radians 9 | def rotate_X(theta): 10 | a = math.cos(theta/2.0) 11 | b = -1j*math.sin(theta/2.0) 12 | c = -1j*math.sin(theta/2.0) 13 | d = math.cos(theta/2.0) 14 | return matrixify([[a,b],[c,d]]) 15 | 16 | # Create the SU(2) matrix for rotating about the y-axis by theta radians 17 | def rotate_Y(theta): 18 | a = math.cos(theta/2.0) 19 | b = -math.sin(theta/2.0) 20 | c = math.sin(theta/2.0) 21 | d = math.cos(theta/2.0) 22 | return matrixify([[a,b],[c,d]]) 23 | 24 | # Create the SU(2) matrix for rotating about the z-axis by theta radians 25 | def rotate_Z(theta): 26 | a = math.cos(theta/2.0) - 1j*math.sin(theta/2.0) 27 | b = 0 28 | c = 0 29 | d = math.cos(theta/2.0) + 1j*math.sin(theta/2.0) 30 | return matrixify([[a,b],[c,d]]) -------------------------------------------------------------------------------- /skc/simplify.py: -------------------------------------------------------------------------------- 1 | from skc.utils import * 2 | 3 | class SimplifyEngine: 4 | 5 | #------------------------------------------------------------------------- 6 | def __init__(self, rules): 7 | self.rules = rules 8 | self.max_arg_count = 0 # This is a pretty safe heuristic 9 | for rule in self.rules: 10 | if (rule.arg_count > self.max_arg_count): 11 | self.max_arg_count = rule.arg_count 12 | 13 | #------------------------------------------------------------------------- 14 | # If sequence is non-empty at beginning of method, 15 | # transfer one element from sequence to scratch and returns True 16 | # otherwise returns False 17 | def transfer_to_scratch(self, sequence, scratch): 18 | sequence_len = len(sequence) 19 | if (sequence_len > 0): 20 | new_op = sequence.pop(sequence_len-1) 21 | scratch.insert(0, new_op) 22 | return (True, sequence) 23 | else: 24 | return (False, sequence) 25 | #print "transfer_to_scratch= " + str(scratch) 26 | 27 | #------------------------------------------------------------------------- 28 | # Fill the scratch sequence up to the arg_count of any rule 29 | def fill_scratch_sequence(self, sequence, scratch): 30 | long_enough = True 31 | while (long_enough and (len(scratch) < self.max_arg_count)): 32 | (long_enough, sequence) = self.transfer_to_scratch(sequence, scratch) 33 | if (long_enough): 34 | assert(len(scratch) >= self.max_arg_count) 35 | return sequence 36 | 37 | #------------------------------------------------------------------------- 38 | # The main simplify method called from outside 39 | def simplify(self, sequence): 40 | # Get the length of the old sequence for comparison below 41 | simplify_length = len(sequence) 42 | 43 | # Make a defensive copy 44 | sequence = list(sequence) 45 | scratch_sequence = [] 46 | # This is reset in every iteration below, just declare it here so 47 | # we have access to it in the first While test (kludge!) 48 | global_obtains = True 49 | #global_any_obtains = False 50 | 51 | while (global_obtains): 52 | global_obtains = False 53 | #print "Entering while loop!" 54 | 55 | # Prefill the scratch space to min_arg_count 56 | sequence = self.fill_scratch_sequence(sequence, scratch_sequence) 57 | #print "sequence= " + str(sequence) 58 | #print "fill_scratch= " + str(scratch_sequence) 59 | 60 | # Apply the rules repeatedly in scratch_space until none of them 61 | # obtain 62 | #first = True 63 | #any_obtains = False 64 | #any_obtains = False 65 | #first = False 66 | for rule in (self.rules): 67 | #obtains = False 68 | #first_time = True 69 | # If we don't obtain or have enough arguments for this 70 | # rule, skip it 71 | if (len(scratch_sequence) >= rule.arg_count): 72 | #first_time = False 73 | # Set the outer condition to repeat all rules later 74 | # Repeat this rule now, in case it obtains again 75 | split = len(scratch_sequence) - rule.arg_count 76 | scratch_excess = scratch_sequence[:split] 77 | scratch_subset = scratch_sequence[split:] 78 | (obtains, scratch_subset) = rule.simplify(scratch_subset) 79 | scratch_sequence = scratch_excess + scratch_subset 80 | if (obtains): 81 | #print "***" + str(scratch_sequence) 82 | #any_obtains = True 83 | global_obtains = True 84 | #global_any_obtains = True 85 | #print "global_obtains= " + str(global_obtains) 86 | 87 | #if (len(sequence) <= 0): 88 | # break 89 | 90 | # Now the scratch sequence is stale, so let's get a fresh op 91 | #self.transfer_to_scratch(sequence, scratch_sequence) 92 | #print str(global_obtains) 93 | #print str(scratch_sequence) 94 | #print str(sequence) 95 | 96 | # Old sequence could be non-empty, return everything 97 | sequence = sequence+scratch_sequence 98 | simplify_length -= len(sequence) 99 | 100 | return (simplify_length, sequence) 101 | 102 | ############################################################################## 103 | class SimplifyRule: 104 | 105 | def __init__(self, slogan, arg_count): 106 | self.slogan = slogan 107 | self.arg_count = arg_count 108 | 109 | def __str__(self): 110 | return self.slogan + "(" + str(self.arg_count) + ")" 111 | 112 | def simplify(self, arg_list): 113 | # Delegate to subclass-specific implementation 114 | (obtains, C) = self.__simplify__(arg_list) 115 | # Intercept here before returning, and pop the simplified arguments 116 | # off the list 117 | if (obtains): 118 | #print str(self) + " OBTAINS!" 119 | for i in range(self.arg_count): 120 | arg_list.pop(0) 121 | arg_list.insert(0, C) 122 | #print str(arg_list) 123 | return (obtains, arg_list) 124 | 125 | ############################################################################## 126 | class AdjointRule(SimplifyRule): 127 | 128 | def __init__(self, id_sym='I'): 129 | SimplifyRule.__init__(self, "Q*Q\dagger = I", 2) 130 | self.id_sym = id_sym 131 | 132 | def __simplify__(self, arg_list): 133 | # This rule starts out not obtaining by default 134 | activated = False 135 | C = '' 136 | 137 | # Get the first character of the first two arguments 138 | A = arg_list[0] 139 | B = arg_list[1] 140 | len_A1 = len(A)-1 141 | len_B1 = len(B)-1 142 | An = A[len_A1] 143 | Bn = B[len_B1] 144 | # All of A except last character 145 | A0n1 = A[0:len_A1] 146 | # All of B except last character 147 | B0n1 = B[0:len_B1] 148 | if ((A == B0n1) and (Bn == 'd')): 149 | # Test if B is the adjoint of A 150 | C = self.id_sym 151 | #print "A= " + str(A) 152 | #print "B0n1= " + str(B0n1) 153 | #print "Bn= " + str(Bn) 154 | activated = True 155 | elif ((B == A0n1) and (An == 'd')): 156 | # Test if A is the adjoint of B 157 | C = self.id_sym 158 | activated = True 159 | 160 | return (activated, C) 161 | 162 | ############################################################################## 163 | class DoubleIdentityRule(SimplifyRule): 164 | def __init__(self, symbol, id_sym = 'I'): 165 | self.symbol = symbol 166 | self.id_sym = id_sym 167 | SimplifyRule.__init__(self, "Q*Q = I", 2) 168 | 169 | def __simplify__(self, arg_list): 170 | activated = False 171 | A = arg_list[0] 172 | B = arg_list[1] 173 | C = '' 174 | if ((A==self.symbol) and (B==self.symbol)): 175 | activated = True 176 | C = self.id_sym 177 | 178 | return (activated, C) 179 | 180 | ############################################################################## 181 | class IdentityRule(SimplifyRule): 182 | def __init__(self, id_sym='I'): 183 | SimplifyRule.__init__(self, "I*Q = Q", 2) 184 | self.id_sym = id_sym 185 | 186 | def __simplify__(self, arg_list): 187 | activated = False 188 | A = arg_list[0] 189 | B = arg_list[1] 190 | C = '' 191 | if (A==self.id_sym): 192 | activated = True 193 | C = B 194 | elif (B==self.id_sym): 195 | activated = True 196 | C = A 197 | 198 | return (activated, C) 199 | 200 | ############################################################################## 201 | class GeneralRule(SimplifyRule): 202 | def __init__(self, sequence, new_sym = 'I'): 203 | self.sequence = sequence 204 | self.new_sym = new_sym 205 | slogan = '' 206 | for arg in sequence: 207 | slogan += arg 208 | slogan += " = " + new_sym 209 | SimplifyRule.__init__(self, slogan, len(sequence)) 210 | 211 | def __simplify__(self, arg_list): 212 | 213 | for i in range(self.arg_count): 214 | if (self.sequence[i] != arg_list[i]): 215 | # If at any point we have a mismatch, return right away 216 | return (False, '') 217 | 218 | #print "GeneralRule.__simplify__: " + str(arg_list) + " -> " + self.new_sym 219 | # If we made it all the way through, congrats! We have an identity 220 | return (True, self.new_sym) 221 | -------------------------------------------------------------------------------- /skc/tile.py: -------------------------------------------------------------------------------- 1 | from skc_utils import * 2 | from skc_basic_approx import * 3 | 4 | # The leaves of recursive subtiling have at most 3 points in them 5 | TILE_COUNT = 150 6 | MAX_INSERT_DEPTH = 475 7 | 8 | class Tile_SU2: 9 | 10 | # pivot is a tuple of psi, theta, and phi 11 | def __init__(self, psi, theta, phi, width): 12 | self.psi = psi 13 | self.theta = theta 14 | self.phi = phi 15 | self.width = width 16 | self.points = [] 17 | self.count = 0 18 | 19 | # Returns True if this tile has split, False otherwise 20 | def is_split(self): 21 | # Boolean which is True if subtiles exist and False otherwise 22 | subtiles_exist = ('subtiles' in dir(self)) 23 | # Boolean which is True if points is <= TILE_COUNT and False otherwise 24 | points_less = (len(self.points) <= TILE_COUNT) 25 | return (subtiles_exist) 26 | 27 | # Set a function to be called from this tile when it splits into subtiles 28 | def set_split_hook(self, split_hook): 29 | self.split_hook = split_hook 30 | 31 | # Don't call this indiscriminately, or we'll have infinite recursion 32 | def subtile(self): 33 | if ('split_hook' in dir(self)): 34 | self.split_hook() 35 | 36 | psi1_subpivot = self.psi - (self.width/4) 37 | psi2_subpivot = self.psi + (self.width/4) 38 | theta1_subpivot = self.theta - (self.width/4) 39 | theta2_subpivot = self.theta + (self.width/4) 40 | # Tiles are wider in the phi direction b/c of asymmetry in hyperspherical coords 41 | phi1_subpivot = self.phi - (self.width/2) 42 | phi2_subpivot = self.phi + (self.width/2) 43 | subwidth = self.width/2.0 44 | self.subtiles = {}; 45 | subtile111 = Tile_SU2(psi1_subpivot, theta1_subpivot, phi1_subpivot, subwidth) 46 | self.subtiles[(1,1,1)] = subtile111 47 | subtile112 = Tile_SU2(psi1_subpivot, theta1_subpivot, phi2_subpivot, subwidth) 48 | self.subtiles[(1,1,2)] = subtile112 49 | subtile121 = Tile_SU2(psi1_subpivot, theta2_subpivot, phi1_subpivot, subwidth) 50 | self.subtiles[(1,2,1)] = subtile121 51 | subtile122 = Tile_SU2(psi1_subpivot, theta2_subpivot, phi2_subpivot, subwidth) 52 | self.subtiles[(1,2,2)] = subtile122 53 | subtile211 = Tile_SU2(psi2_subpivot, theta1_subpivot, phi1_subpivot, subwidth) 54 | self.subtiles[(2,1,1)] = subtile211 55 | subtile212 = Tile_SU2(psi2_subpivot, theta1_subpivot, phi2_subpivot, subwidth) 56 | self.subtiles[(2,1,2)] = subtile212 57 | subtile221 = Tile_SU2(psi2_subpivot, theta2_subpivot, phi1_subpivot, subwidth) 58 | self.subtiles[(2,2,1)] = subtile221 59 | subtile222 = Tile_SU2(psi2_subpivot, theta2_subpivot, phi2_subpivot, subwidth) 60 | self.subtiles[(2,2,2)] = subtile222 61 | 62 | def find_closest_point(self, operator, distance): 63 | if (not self.is_split()): 64 | # Not split yet, just do a linear search from skc_basic_approx 65 | return find_basic_approx(self.points, operator, distance) 66 | else: 67 | indices = self.get_hspherical_indices(operator) 68 | subtile = self.subtiles[indices] 69 | assert(subtile != None) 70 | return subtile.find_closest_point(operator, distance) 71 | 72 | def get_hspherical_indices(self, operator): 73 | matrix = operator.matrix 74 | unitary = matrix_to_unitary4d(matrix) 75 | [psi, theta, phi] = unitary_to_hspherical(unitary) 76 | 77 | # Decide on psi pivot 78 | if (psi <= self.psi): 79 | psi_index = 1 80 | else: 81 | psi_index = 2 82 | 83 | # Decide on theta pivot 84 | if (theta <= self.theta): 85 | theta_index = 1 86 | else: 87 | theta_index = 2 88 | 89 | # Decide on phi pivot 90 | if (phi <= self.phi): 91 | phi_index = 1 92 | else: 93 | phi_index = 2 94 | return (psi_index, theta_index, phi_index) 95 | 96 | def insert_into_subtiles(self, operator, depth): 97 | (psi_index, theta_index, phi_index) = self.get_hspherical_indices(operator) 98 | 99 | subtile = self.subtiles[(psi_index, theta_index, phi_index)] 100 | assert(subtile != None) 101 | subtile.insert_helper(operator, depth) 102 | 103 | def insert(self, operator): 104 | self.insert_helper(operator, MAX_INSERT_DEPTH) 105 | 106 | def insert_helper(self, operator, depth): 107 | 108 | if (depth <= 0): 109 | print "********************************************" 110 | self.print_string(0) 111 | raise RuntimeError("Exceeded maximum insert depth here") 112 | 113 | # We always keep a cumulative count of this tile and all subtiles recursively here 114 | self.count += 1 115 | 116 | if (self.count < TILE_COUNT): 117 | # We are still a tile of constant size, even with one more addition 118 | #assert(len(self.points) < TILE_COUNT) 119 | self.points.append(operator) 120 | return 121 | elif (not self.is_split()): 122 | # Else if have not split yet, we need to subtile! 123 | # But don't forget to insert this new operator 124 | self.points.append(operator) 125 | self.subtile() 126 | #self.print_string(0) 127 | assert(self.is_split()) 128 | 129 | #print str(self.points) 130 | for point in self.points: 131 | self.insert_into_subtiles(point, depth-1) 132 | 133 | # Clear self.points, since these have now been distributed among our subtiles 134 | self.points = [] 135 | else: 136 | # Otherwise, delegate this new 137 | self.insert_into_subtiles(operator, depth-1) 138 | 139 | def print_string(self, depth): 140 | parent = "Tile (psi,theta,phi) = ("+str(self.psi)+","+str(self.theta)+","+ \ 141 | str(self.phi)+"), has " + \ 142 | str(self.count) + " points" 143 | print_indented(parent, depth) 144 | 145 | if (self.is_split()): 146 | self.print_subtiles(depth+1) 147 | else: 148 | points_string = " Unsplit. Points are = " 149 | for point in self.points: 150 | points_string += str(point) 151 | print_indented(points_string, depth) 152 | 153 | def print_subtiles(self, depth): 154 | for index, subtile in self.subtiles.items(): 155 | subtile.print_string(depth) -------------------------------------------------------------------------------- /skc/trig.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy 3 | 4 | from skc.utils import * 5 | 6 | ############################################################################## 7 | # Recover an angle given the cosine and sine values, taken e.g. from a 8 | # complex value. 9 | # This is tricky because acos usually goes from [0,pi] while 10 | # asin usually goes from [-pi/2,+pi/2], whereas angles in general go from 11 | # [0,2pi]. Using the signs of the cosine and sine value, we can figure out 12 | # the correct original angle 13 | def recover_angle(cos_phi, sin_phi): 14 | # Recover two versions of the angle from the cosine and sine 15 | phi_acos = math.acos(cos_phi) 16 | phi_asin = math.asin(sin_phi) 17 | 18 | sign_cos = numpy.sign(cos_phi) 19 | sign_sin = numpy.sign(sin_phi) 20 | 21 | # phi \in [0, pi/2] 22 | if ((sign_cos > 0) and (sign_sin > 0)): 23 | assert_approx_equals(phi_acos, phi_asin) 24 | assert(phi_acos > 0) 25 | assert(phi_acos < PI_HALF) 26 | return phi_acos 27 | # phi \in [pi/2, pi] 28 | elif ((sign_cos < 0) and (sign_sin > 0)): 29 | # sine is symmetric about pi/2 30 | # phi_asin will be in [0,pi/2], so reflect it into [pi/2,pi] 31 | phi_asin = PI - phi_asin 32 | assert_approx_equals(phi_acos, phi_asin) 33 | assert(phi_acos > PI_HALF) 34 | assert(phi_acos < PI) 35 | return phi_acos 36 | # phi \in [pi,3pi/2] 37 | elif ((sign_cos < 0) and (sign_sin < 0)): 38 | # cosine is symmetric about pi 39 | # phi_acos will be in [pi/2,pi], so reflect it into [pi,3pi/2] 40 | phi_acos = TWO_PI - phi_acos 41 | # phi_asin will be in [-pi/2,0], so reflect it into [pi,3pi/2] 42 | phi_asin = PI - phi_asin 43 | assert_approx_equals(phi_acos, phi_asin) 44 | assert(phi_acos > PI) 45 | assert(phi_acos < THREE_PI_HALF) 46 | return phi_acos 47 | # phi \in [3pi/2,2pi] 48 | elif ((sign_cos > 0) and (sign_sin < 0)): 49 | # phi_acos will be in [0,pi/2], so reflect it into [3pi/2,2pi] 50 | phi_acos = TWO_PI - phi_acos 51 | # phi_asin will be in [-pi/2,0] so shift it into [3pi/2,2pi] 52 | phi_asin = phi_asin + TWO_PI 53 | assert_approx_equals(phi_acos, phi_asin) 54 | assert(phi_acos > THREE_PI_HALF) 55 | assert(phi_acos < TWO_PI) 56 | return phi_acos 57 | else: 58 | # We shouldn't even be here, Mr. Frodo 59 | assert(false) -------------------------------------------------------------------------------- /skc/utils.py: -------------------------------------------------------------------------------- 1 | # Utilities for Solovay-Kitaev compiler 2 | 3 | import math 4 | import numpy 5 | import random 6 | import scipy.linalg 7 | 8 | TOLERANCE = 1e-15 9 | TOLERANCE2 = 1e-14 10 | TOLERANCE3 = 1e-13 11 | TOLERANCE4 = 1e-12 12 | TOLERANCE5 = 1e-11 13 | TOLERANCE6 = 1e-10 14 | TOLERANCE7 = 1e-9 15 | TOLERANCE8 = 1e-8 16 | TOLERANCE9 = 1e-7 17 | TOLERANCE10 = 1e-6 18 | 19 | TOLERANCE_GREATER_THAN = 1e4 20 | 21 | PI = math.pi 22 | PI_HALF = PI / 2 23 | THREE_PI_HALF = 1.5*PI 24 | TWO_PI = 2*PI 25 | 26 | ############################################################################## 27 | def normalize(array): 28 | norm = scipy.linalg.norm(array) 29 | array = numpy.array(array) / norm 30 | assert_approx_equals(scipy.linalg.norm(array), 1.0) 31 | return array 32 | 33 | ############################################################################## 34 | def matrixify(array): 35 | return numpy.matrix(array, dtype=numpy.complex) 36 | 37 | ############################################################################## 38 | def trace_norm(M): 39 | trace = numpy.trace(M * M.H) 40 | return math.sqrt(trace) 41 | 42 | ############################################################################## 43 | def operator_norm(M): 44 | eig_vals = scipy.linalg.eigvals(M) 45 | eig_vals = [numpy.abs(x) for x in eig_vals] 46 | return numpy.max(eig_vals) 47 | 48 | ############################################################################## 49 | def trace_distance(matrix_A, matrix_B): 50 | matrix_diff = matrix_A - matrix_B 51 | matrix_diff_dag = numpy.transpose(numpy.conjugate(matrix_diff)) 52 | product = matrix_diff * matrix_diff_dag 53 | #print "prod= " + str(product) 54 | trace_vals = scipy.linalg.eigvals(product); 55 | return scipy.linalg.norm(trace_vals) 56 | 57 | ############################################################################## 58 | def fowler_distance(matrix_A, matrix_B): 59 | d = matrix_A.shape[0] 60 | assert(matrix_A.shape == matrix_B.shape) 61 | matrix_adjoint = numpy.transpose(numpy.conjugate(matrix_A)) 62 | prod = matrix_adjoint * matrix_B 63 | trace = numpy.trace(prod) 64 | frac = (1.0*(d - numpy.abs(trace))) / d 65 | # Because frac can be negative due to floating point error, take the 66 | # absolute value before taking square root, since we expect real numbers 67 | return math.sqrt(numpy.abs(frac)) 68 | 69 | ############################################################################## 70 | def list_as_string(list_A): 71 | if (len(list_A) == 0): 72 | return "" 73 | string = list_A[0] 74 | for ancestor in list_A[1:]: 75 | string += "-" + ancestor 76 | return string 77 | 78 | ############################################################################## 79 | def assert_and_print(bool_condition, arg_to_stringify, msg_prefix=""): 80 | if (not bool_condition): 81 | print "[ASSERTION FAILED] " + msg_prefix + ": " + str(arg_to_stringify) 82 | assert(bool_condition) 83 | 84 | ############################################################################## 85 | def assert_approx_not_equals_tolerance(value1, value2, tolerance, message=""): 86 | diff = abs(value1 - value2) 87 | assert_and_print(diff >= tolerance, diff, message) 88 | 89 | ############################################################################## 90 | def assert_in_range(value, lower, upper, message=""): 91 | assert_and_print(lower < value, value, "Lower bound= " + str(lower)) 92 | assert_and_print(value < upper, value, "Upper bound= " + str(upper)) 93 | 94 | ############################################################################## 95 | def assert_approx_equals_tolerance(value1, value2, tolerance, message=""): 96 | diff = abs(value1 - value2) 97 | assert_and_print(diff < tolerance, diff, message) 98 | 99 | ############################################################################## 100 | def assert_approx_equals(value1, value2, message=""): 101 | assert_approx_equals_tolerance(value1, value2, TOLERANCE2, message) 102 | 103 | ############################################################################## 104 | def assert_approx_not_equals(value1, value2, message=""): 105 | assert_approx_not_equals_tolerance(value1, value2, TOLERANCE2, message=message) 106 | 107 | ############################################################################## 108 | def approx_equals_tolerance(value1, value2, tolerance): 109 | return (abs(value1 - value2) < tolerance) 110 | 111 | ############################################################################## 112 | def approx_equals(value1, value2): 113 | return (abs(value1 - value2) < TOLERANCE) 114 | 115 | ############################################################################## 116 | # Indented printing based on depth 117 | def print_indented(message, depth): 118 | print (" " * (depth * 2)) + message 119 | 120 | ############################################################################## 121 | # Chain the tensor product of multiple operators 122 | def tensor_chain(op_vector): 123 | if (len(op_vector) == 0): 124 | raise RuntimeError("Cannot chain empty list of operators") 125 | product = op_vector.pop() 126 | for op in op_vector: 127 | #print "op= " + str(op) 128 | #print "product= " + str(product) 129 | product = numpy.kron(product, op) 130 | return product 131 | 132 | ############################################################################## 133 | def vector_distance(vector_A, vector_B): 134 | vector_diff = vector_A - vector_B 135 | return scipy.linalg.norm(vector_diff) 136 | 137 | ############################################################################## 138 | def matrix_direct_sum(matrix_A, matrix_B): 139 | direct_sum = numpy.zeros( numpy.add(matrix_A.shape, matrix_B.shape) ) 140 | direct_sum = matrixify(direct_sum) 141 | direct_sum[:matrix_A.shape[0],:matrix_A.shape[1]] = matrix_A 142 | direct_sum[matrix_A.shape[0]:,matrix_A.shape[1]:] = matrix_B 143 | return direct_sum 144 | 145 | ############################################################################## 146 | def assert_vectors_approx_equal(vector1, vector2, tolerance=TOLERANCE3): 147 | vector_len1 = len(vector1) 148 | vector_len2 = len(vector2) 149 | assert(vector_len1 == vector_len2) 150 | msg = "Vectors not-equal:\n" + str(vector1) + "\n" + str(vector2) 151 | for i in range(0, vector_len1): 152 | assert_approx_equals_tolerance(vector1[i], vector2[i], tolerance, msg) 153 | 154 | ############################################################################## 155 | def assert_matrices_approx_equal(matrix1, matrix2, distance=trace_distance, 156 | tolerance=TOLERANCE3): 157 | dist = distance(matrix1, matrix2) 158 | msg = "Matrices not-equal:\n" + str(matrix1) + "\n" + str(matrix2) 159 | assert_approx_equals_tolerance(dist, 0, tolerance=tolerance, message=msg) 160 | 161 | ############################################################################## 162 | def assert_matrices_approx_not_equal(matrix1, matrix2, distance=trace_distance, 163 | tolerance=TOLERANCE3, message=""): 164 | dist = distance(matrix1, matrix2) 165 | msg = "Matrices equal:\n" + str(matrix1) + "\n" + str(matrix2) 166 | assert_approx_not_equals_tolerance(dist, 0, tolerance=tolerance, \ 167 | message=message) 168 | 169 | ############################################################################## 170 | def assert_matrix_hermitian(matrix): 171 | adjoint = numpy.transpose(numpy.conjugate(matrix)) 172 | assert_matrices_approx_equal(matrix, adjoint, trace_distance) 173 | 174 | ############################################################################## 175 | def assert_matrix_unitary(matrix, tolerance=TOLERANCE3): 176 | d = matrix.shape[0] 177 | identity = matrixify(numpy.eye(d)) 178 | adjoint = numpy.transpose(numpy.conjugate(matrix)) 179 | product = matrix * adjoint 180 | assert_matrices_approx_equal(product, identity, trace_distance, tolerance) 181 | 182 | ############################################################################## 183 | def assert_matrix_nonempty(matrix): 184 | abs_trace = numpy.abs(numpy.trace(matrix*matrix.H)) 185 | if (abs_trace < TOLERANCE3): 186 | print "abs_trace= " + str(abs_trace) 187 | print "Matrix is empty \n" + str(matrix) 188 | assert(abs_trace > TOLERANCE3) 189 | 190 | ############################################################################## 191 | def matrix_exp_diag(matrix): 192 | d = matrix.shape[0] 193 | matrix_exp = matrixify(numpy.eye(d)) 194 | for i in range(d): 195 | matrix_exp[(i,i)] = numpy.exp(matrix[(i,i)]) 196 | return matrix_exp 197 | 198 | ############################################################################## 199 | # Taylor series approximation 200 | def matrix_exp(matrix, steps): 201 | d = matrix.shape[0] 202 | identity = matrixify(numpy.eye(d)) 203 | sum = identity 204 | product = identity 205 | denom = 1 206 | for i in range(steps): 207 | product = product * matrix 208 | denom = denom * (i+1) 209 | #print "prod/denom= " + str(product / denom) 210 | sum = sum + (product / denom) 211 | #print "sum= " + str(product / denom) 212 | return sum 213 | 214 | ############################################################################## 215 | def n_from_eps(eps, eps_0, c_approx): 216 | c_approx_sq = c_approx**2 217 | denom = numpy.log(3.0/2) 218 | eps_c_approx_sq = eps * c_approx_sq 219 | eps_0_c_approx_sq = eps_0 * c_approx_sq 220 | eps_ln = numpy.log(eps_c_approx_sq) 221 | eps_0_ln = numpy.log(eps_0_c_approx_sq) 222 | big_ln = numpy.log(eps_ln / eps_0_ln) / denom 223 | return numpy.ceil(big_ln) 224 | 225 | -------------------------------------------------------------------------------- /super/__init__.py: -------------------------------------------------------------------------------- 1 | # Empty __init__ file so we can import the super module 2 | -------------------------------------------------------------------------------- /super/precompile.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import scipy.linalg 3 | 4 | from skc.utils import * 5 | from skc.compose import * 6 | 7 | ############################################################################## 8 | # Find non-collinear point (relative to the origin) to an axis and return an 9 | # orthogonal vector to the axis going through (in R^3), using trigonometry 10 | # Everything is a 3-vector [x,y,z] 11 | def find_orthogonal_axis_trig(axis_P): 12 | axis_P = numpy.array(axis_P) # Convert to a numpy.array so we can scale it 13 | axis_X = (1,0,0) # arbitrarily choose point on x-axis 14 | # Calculate the dot product between P and X to get cos(theta) 15 | dot_X_P = numpy.dot(axis_P, axis_X) 16 | # Scale axis_P by cos(theta) to project axis_X onto axis_P 17 | axis_R = axis_P * dot_X_P 18 | print "axis_R= " + str(axis_R) 19 | # The orthogonal axis goes from the projection onto axis_P to axis_X 20 | axis_Q = axis_X - axis_R 21 | # Renormalize the vector 22 | norm_Q = scipy.linalg.norm(axis_Q) 23 | axis_Q = axis_Q / norm_Q 24 | assert_approx_equals(scipy.linalg.norm(axis_Q), 1) 25 | 26 | # Verify orthogonality (inner product is zero) 27 | inner_Q_P = numpy.inner(axis_Q, axis_P) 28 | assert_approx_equals(inner_Q_P, 0) 29 | return axis_Q 30 | 31 | ############################################################################## 32 | # Given two axes in R^3, return the matrix which is SU(2) operator corresponding 33 | # to a rotation of axis1 to axis2 34 | def get_rotation_matrix(axis_1, axis_2, basis, angle_sign=-1): 35 | axis_1_array = basis.sort_canonical_order(axis_1) 36 | axis_2_array = basis.sort_canonical_order(axis_2) 37 | axis_T_array = numpy.cross(axis_1_array, axis_2_array) 38 | print "axis_1= " + str(axis_1_array) 39 | print "axis_2= " + str(axis_2_array) 40 | print "axis_T= " + str(axis_T_array) 41 | norm_T = scipy.linalg.norm(axis_T_array) 42 | axis_T_array = numpy.array(axis_T_array) / norm_T 43 | print "axis_T= " + str(axis_T_array) 44 | 45 | axis_T = basis.unsort_canonical_order(axis_T_array) 46 | 47 | dot_1_2 = numpy.inner(axis_1_array, axis_2_array) 48 | 49 | angle_1_2 = math.acos(dot_1_2) 50 | assert(angle_1_2 > 0.0) 51 | 52 | # Make the angle negative because... why? 53 | angle = angle_sign * (angle_1_2/2.0); # divide by 2 for double covering 54 | print "angle= " + str(angle) 55 | matrix_T = axis_to_unitary(axis_T, angle, basis) 56 | return matrix_T 57 | -------------------------------------------------------------------------------- /tests/test_angle.py: -------------------------------------------------------------------------------- 1 | # Test whether we can recover the angle of rotation from a random unitary in SU(d) 2 | 3 | from skc_diagonalize import * 4 | from skc_basis import * 5 | from skc_utils import * 6 | from skc_compose import * 7 | from skc_decompose import * 8 | 9 | import numpy 10 | import math 11 | 12 | def test_decomposing_unitary(d): 13 | print "*******************************************************************" 14 | print "TESTING DECOMPOSITION OF UNITARY IN SU("+str(d)+")" 15 | B = get_hermitian_basis(d) 16 | 17 | (matrix_U, components, angle) = get_random_unitary(B) 18 | 19 | print "U= " + str(matrix_U) 20 | 21 | # Get the components of H in basis B 22 | (components2, K, matrix_H) = unitary_to_axis(matrix_U, B) 23 | print "H= " + str(matrix_H) 24 | 25 | print "K= " + str(K) 26 | print "angle= " + str(angle) 27 | assert_approx_equals_tolerance(numpy.abs(K), numpy.abs(2*angle), TOLERANCE4) 28 | 29 | #id_comp = numpy.trace(matrix_U).real / d 30 | #print "id_comp= " + str(id_comp) 31 | #angle2 = math.acos(id_comp) 32 | #print "acos(id_comp)= " + str(angle2) 33 | #assert_approx_equals(abs(angle), abs(angle2)) 34 | 35 | #print "Renormalizing... " 36 | # Renormalize components 37 | #for key,value in components2.items(): 38 | # components2[key] = value / K 39 | # print "("+str(key)+")= " + str(components2[key]) 40 | 41 | # Assert that the components are now normalized 42 | norm = scipy.linalg.norm(components.values()) 43 | norm2= scipy.linalg.norm(components2.values()) 44 | print "norm= " + str(norm) 45 | print "norm2= " + str(norm2) 46 | assert_approx_equals(norm2, 1) 47 | 48 | for key in components2.keys(): 49 | print str(key) 50 | print " actual= " + str(components[key]) 51 | print " comput= " + str(components2[key]) 52 | ratio = abs(components[key]) / abs(components2[key]) 53 | #print " ratio= " + str(ratio) 54 | assert_approx_equals_tolerance(ratio, 1, TOLERANCE5) 55 | 56 | # Check that we are truly normalized 57 | assert_approx_equals(scipy.linalg.norm(components2.values()), 1) 58 | print "norm3= " + str(scipy.linalg.norm(components2.values())) 59 | 60 | # Recompose unitary from recovered hermitian and angle 61 | H2 = matrix_from_components(components2, B) 62 | # Scale Hermitian by angle (but only for SU(4)?) 63 | if (d==4): 64 | H2 = H2 / angle 65 | assert_matrix_hermitian(H2) 66 | print "H2= " + str(H2) 67 | dist = fowler_distance(H2, matrix_H) 68 | print "dist(H2,H)= " + str(dist) 69 | assert_approx_equals_tolerance(dist, 0, TOLERANCE10) 70 | dist = trace_distance(H2, matrix_H) 71 | assert_approx_equals_tolerance(dist, 0, TOLERANCE5) 72 | 73 | U2 = exp_hermitian_to_unitary(matrix_H=matrix_H, angle=angle, basis=B) 74 | print "U2= " + str(U2) 75 | dist = fowler_distance(matrix_U, U2) 76 | print "dist(U2,U)= " + str(dist) 77 | assert_approx_equals_tolerance(dist, 0, TOLERANCE10) 78 | 79 | test_decomposing_unitary(d=2) 80 | test_decomposing_unitary(d=4) 81 | #test_decomposing_unitary(d=8) 82 | -------------------------------------------------------------------------------- /tests/test_aram_diagonal_factor.py: -------------------------------------------------------------------------------- 1 | from skc_diagonalize import * 2 | from skc_basis import * 3 | from skc_compose import * 4 | from skc_dawson_factor import * 5 | from skc_group_factor import * 6 | 7 | import numpy 8 | 9 | d=4 10 | H4 = get_hermitian_basis(d=d) 11 | H2 = get_hermitian_basis(d=2) 12 | X_AXIS = cart3d_to_h2(x=1, y=0, z=0) 13 | 14 | basis = H4 15 | axis = pick_random_axis(H4) 16 | 17 | (matrix_U, components, angle) = get_random_unitary(basis) 18 | print "matrix_U= " + str(matrix_U) 19 | 20 | (matrix_V, matrix_W) = aram_diagonal_factor(matrix_U, H4) 21 | 22 | matrix_U2 = get_group_commutator(matrix_V, matrix_W) 23 | 24 | print "matrix_U2= " + str(matrix_U2) -------------------------------------------------------------------------------- /tests/test_canonical_order.py: -------------------------------------------------------------------------------- 1 | # Test whether we can recover the angle of rotation from a random unitary in SU(d) 2 | 3 | from skc_diagonalize import * 4 | from skc_basis import * 5 | from skc_utils import * 6 | from skc_compose import * 7 | from skc_decompose import * 8 | 9 | import numpy 10 | import math 11 | 12 | B = get_hermitian_basis(d=4) 13 | 14 | (matrix_U, components, angle) = get_random_unitary(B) 15 | 16 | for k,v in components.items(): 17 | print str(k) + " => " + str(v) 18 | 19 | sorted_components = B.sort_canonical_order(components) 20 | 21 | for x in sorted_components: 22 | print str(x) 23 | 24 | (matrix_U2, components2, angle2) = get_random_unitary(B) 25 | 26 | sorted_components2 = B.sort_canonical_order(components2) 27 | 28 | # Verify that order of components in both vectors correspond to canonical order 29 | for key in components.keys(): 30 | value = components[key] 31 | value2 = components2[key] 32 | 33 | index = sorted_components.index(value) 34 | index2 = sorted_components2.index(value2) 35 | 36 | assert(index == index2) 37 | 38 | # Roundtrip by creating a dictionary from a vector in canonical order 39 | component_dict = B.unsort_canonical_order(sorted_components) 40 | component_dict2 = B.unsort_canonical_order(sorted_components2) 41 | 42 | # Check that our dictionary is the same as before 43 | for key,value in component_dict.items(): 44 | value2 = components[key] 45 | assert_approx_equals(value, value2) 46 | 47 | for key,value in component_dict2.items(): 48 | value2 = components2[key] 49 | assert_approx_equals(value, value2) -------------------------------------------------------------------------------- /tests/test_compose.py: -------------------------------------------------------------------------------- 1 | # Testing that we can compose a unitary by exponentiating a hermitian 2 | 3 | from skc.operator import * 4 | from skc.utils import * 5 | from skc.basis import * 6 | from skc.compose import * 7 | from skc.decompose import * 8 | 9 | import random 10 | import unittest 11 | 12 | # Maximum dimension (2**D) to test 13 | D = 4 14 | 15 | ############################################################################## 16 | def test_pauli_unitary(): 17 | # Pauli matrices are unitary 18 | assert_matrix_unitary(SX.matrix) 19 | assert_matrix_unitary(SY.matrix) 20 | assert_matrix_unitary(SZ.matrix) 21 | assert_matrix_unitary(I2.matrix) 22 | 23 | ############################################################################## 24 | def test_pauli_hermitian(): 25 | # Test for Hermiticity just for good measure 26 | assert_matrix_hermitian(SX.matrix) 27 | assert_matrix_hermitian(SY.matrix) 28 | assert_matrix_hermitian(SZ.matrix) 29 | assert_matrix_hermitian(I2.matrix) 30 | 31 | ############################################################################## 32 | def create_test_case(d): 33 | 34 | class TestComposeCase(unittest.TestCase): 35 | 36 | def setUp(self): 37 | self.basis = get_hermitian_basis(d=2) 38 | 39 | ###################################################################### 40 | def test_compose(self): 41 | # Get a random Hermitian 42 | (H, components) = get_random_hermitian(self.basis) 43 | 44 | #U = exp_hermitian_to_unitary(H, math.pi/2, self.basis) 45 | #print "U(pi/2)= " + str(U) 46 | #assert_matrix_unitary(U) 47 | angle = random.random() * math.pi 48 | U = exp_hermitian_to_unitary(H, angle, self.basis) 49 | #print "U("+str(angle)+")= " + str(U) 50 | assert_matrix_unitary(U) 51 | 52 | ###################################################################### 53 | # Test whether reversing the sign of the angle for the same axis 54 | # results in the adjoint of a unitary 55 | def test_unitary_reverse_angle(self): 56 | (H, components) = get_random_hermitian(self.basis) 57 | angle = random.random() * math.pi 58 | U = exp_hermitian_to_unitary(H, angle, self.basis) 59 | Udag = exp_hermitian_to_unitary(H, -angle, self.basis) 60 | U_Udag = U * Udag 61 | assert_matrices_approx_equal(U_Udag, self.basis.identity.matrix, 62 | trace_distance) 63 | 64 | ###################################################################### 65 | # Test whether reversing the sign of the hermitian for the same angle 66 | # results in the adjoint of a unitary 67 | def test_unitary_reverse_hermitian(self): 68 | (H, components) = get_random_hermitian(self.basis) 69 | angle = random.random() * math.pi 70 | U = exp_hermitian_to_unitary(H, angle, self.basis) 71 | Udag = exp_hermitian_to_unitary(-H, angle, self.basis) 72 | U_Udag = U * Udag 73 | assert_matrices_approx_equal(U_Udag, self.basis.identity.matrix, 74 | trace_distance) 75 | 76 | ###################################################################### 77 | # Test that reversing the sign of the Hermitian 78 | # gives you a non-equal matrix 79 | def test_unitary_reverse_hermitian(self): 80 | (H, components) = get_random_hermitian(self.basis) 81 | angle = random.random() * math.pi 82 | U = exp_hermitian_to_unitary(H, angle, self.basis) 83 | Udag = exp_hermitian_to_unitary(-H, angle, self.basis) 84 | dist = trace_distance(U, Udag) 85 | assert_approx_not_equals(dist, 0) 86 | 87 | ###################################################################### 88 | def test_axis_roundtrip(self): 89 | (matrix_U, components, angle) = get_random_unitary(self.basis) 90 | #print "components= " + str(components) 91 | (components2, K, matrix_H) = unitary_to_axis(matrix_U, self.basis) 92 | #print "components2= " + str(components2) 93 | msg = "test_axis_roundtrip(d="+str(self.basis.d)+") K=" + \ 94 | str(K) + " angle=" + str(angle) 95 | assert_approx_equals_tolerance(K/2.0, abs(angle), TOLERANCE10, 96 | msg) 97 | matrix_U2 = axis_to_unitary(components2, K/2.0, self.basis) 98 | fowler_dist = fowler_distance(matrix_U, matrix_U2) 99 | assert_approx_equals_tolerance(fowler_dist, 0, TOLERANCE10) 100 | trace_dist = trace_distance(matrix_U, matrix_U2) 101 | assert_approx_equals(trace_dist, 0) 102 | 103 | # Check that we recovered the correct components (with sign) 104 | for k,v in components2.items(): 105 | msg = "components not equal: " + \ 106 | "k=" + str(k) + " v=" + \ 107 | str(v) + " v2=" + str(components[k]) 108 | # We just want to check that magnitude and sign is conserved 109 | assert_approx_equals(v, numpy.sign(angle)*components[k], message=msg) 110 | 111 | ###################################################################### 112 | def test_axis_recover_sign(self): 113 | (matrix_U, components, angle) = get_random_unitary(self.basis) 114 | #print "components= " + str(components) 115 | #print "angle= " + str(angle) 116 | (components2, K2, matrix_H2) = unitary_to_axis(matrix_U, self.basis) 117 | #print "components2= " + str(components2) 118 | 119 | # Reverse the sign of a random components 120 | random_key = random.choice(components2.keys()) 121 | components2[random_key] *= -1 122 | #print "components2= " + str(components2) 123 | 124 | matrix_U2 = axis_to_unitary(components2, K2/2.0, self.basis) 125 | (components3, K3, matrix_H3) = unitary_to_axis(matrix_U2, self.basis) 126 | #print "components3= " + str(components3) 127 | 128 | msg = "angles not equal: K3=" + \ 129 | str(K3) + " angle=" + str(angle) 130 | assert_approx_equals_tolerance(K3/2.0, abs(angle), TOLERANCE10, 131 | msg) 132 | 133 | angle_sign = numpy.sign(angle) 134 | for k,v in components3.items(): 135 | msg = "components not equal: " + \ 136 | "k=" + str(k) + " v=" + str(v) + \ 137 | " v2=" + str(components[k]) 138 | if (k == random_key): 139 | assert_approx_equals(v, -angle_sign*components[k], message=msg) 140 | else: 141 | assert_approx_equals(v, angle_sign*components[k], message=msg) 142 | 143 | trace_dist = trace_distance(matrix_U, matrix_U2) 144 | assert_approx_not_equals(trace_dist, 0) 145 | 146 | TestComposeCase.__name__ = "TestComposeCaseD"+str(d) 147 | 148 | return TestComposeCase 149 | 150 | ############################################################################## 151 | def get_suite(): 152 | suite = unittest.TestSuite() 153 | loader = unittest.TestLoader() 154 | 155 | # Create one test case for every SU(d) 156 | for i in range(1,D+1): 157 | d = 2**i 158 | test_case = create_test_case(d) 159 | suite1 = loader.loadTestsFromTestCase(test_case) 160 | suite.addTest(suite1) 161 | 162 | # Add single function tests from above 163 | suite.addTest(unittest.FunctionTestCase(test_pauli_unitary)) 164 | suite.addTest(unittest.FunctionTestCase(test_pauli_hermitian)) 165 | return suite 166 | 167 | ############################################################################## 168 | if (__name__ == '__main__'): 169 | suite = get_suite() 170 | unittest.TextTestRunner(verbosity=3).run(suite) -------------------------------------------------------------------------------- /tests/test_dawson_factor.py: -------------------------------------------------------------------------------- 1 | from skc_dawson_factor import * 2 | from skc_decompose import * 3 | from skc_operator import * 4 | from skc_utils import * 5 | from skc_basis import * 6 | from skc_group_factor import * 7 | 8 | import math 9 | 10 | B2 = get_hermitian_basis(d=2) 11 | #(matrix_U, components, angle) = get_random_unitary(B2) 12 | 13 | axis_U = cart3d_to_h2(x=1, y=1, z=1) 14 | angle_U = math.pi / 12 15 | 16 | matrix_U = axis_to_unitary(axis_U, angle_U/2.0, B2) 17 | 18 | print "U= " + str(matrix_U) 19 | 20 | X_AXIS = cart3d_to_h2(x=1, y=0, z=0) 21 | ############################################################################# 22 | # Test balanced group commutator factoring 23 | [matrix_V, matrix_W] = dawson_group_factor(matrix_U, B2, X_AXIS) 24 | 25 | V = matrix_V 26 | W = matrix_W 27 | V_dag = numpy.transpose(numpy.conjugate(V)) 28 | W_dag = numpy.transpose(numpy.conjugate(W)) 29 | 30 | delta = get_group_commutator(matrix_V, matrix_W) #V * W * V_dag * W_dag 31 | 32 | print "Delta= " + str(delta) 33 | 34 | distance = trace_distance(delta, matrix_U) 35 | print "Trace Distance(Delta, U): " + str(distance) 36 | assert_approx_equals(distance, 0) -------------------------------------------------------------------------------- /tests/test_dawson_x_group_factor.py: -------------------------------------------------------------------------------- 1 | from skc_dawson_factor import * 2 | from skc_decompose import * 3 | from skc_operator import * 4 | from skc_utils import * 5 | from skc_basis import * 6 | 7 | B2 = get_hermitian_basis(d=2) 8 | #(matrix_U, components, angle) = get_random_unitary(B2) 9 | 10 | axis_U = cart3d_to_h2(x=1, y=1, z=1) 11 | angle_U = math.pi / 12 12 | 13 | matrix_U = axis_to_unitary(axis_U, angle_U/2.0, B2) 14 | 15 | print "U= " + str(matrix_U) 16 | 17 | [matrix_A, matrix_B] = dawson_x_group_factor(matrix_U, B2) 18 | 19 | print "A=" 20 | print str(matrix_A) 21 | 22 | matrix_A_true = numpy.matrix([ 23 | [ 0.96674550+0.24723584j, -0.01620469-0.06336385j], 24 | [ 0.01620469-0.06336385j, 0.96674550-0.24723584j]]) 25 | assert_matrices_approx_equal(matrix_A, matrix_A_true, trace_distance) 26 | 27 | print "B=" 28 | print str(matrix_B) 29 | 30 | matrix_B_true = numpy.matrix([ 31 | [ 0.96674550+0.j, -0.24776633+0.06336385j], 32 | [ 0.24776633+0.06336385j, 0.96674550+0.j ]]) 33 | assert_matrices_approx_equal(matrix_B, matrix_B_true, trace_distance) -------------------------------------------------------------------------------- /tests/test_diagonalize.py: -------------------------------------------------------------------------------- 1 | # Test file to see if we can recover hermitian H from unitary U 2 | # By constructing diagonal form explicitly from eigenvalues 3 | import scipy.linalg 4 | 5 | from skc.utils import * 6 | from skc.basis import * 7 | from skc.operator import * 8 | from skc.compose import * 9 | import unittest 10 | 11 | # Maximum dimension (2**D) to test 12 | D=4 13 | 14 | ############################################################################## 15 | def test_diagonalize(d): 16 | B = get_hermitian_basis(d) # SU(2) basis 17 | 18 | (matrix_U, components) = get_random_hermitian(B) 19 | 20 | #print "U= " + str(matrix_U) 21 | 22 | (eig_vals, eig_vecs) = scipy.linalg.eig(matrix_U) 23 | 24 | #print "eig_vals= " + str(eig_vals) 25 | #print "eig_vecs= " + str(eig_vecs) 26 | 27 | # Create rows of the matrix from elements of the eigenvectors, to 28 | # fake creating a matrix from column vectors 29 | eig_length = len(eig_vecs) 30 | assert(len(eig_vals) == eig_length) 31 | 32 | # Verify eigenvalues and eigenvectors, via Av = \lambdav (eigenvalue eqn) 33 | for i in range(eig_length): 34 | col_vec = numpy.matrix(eig_vecs[:,i]).transpose() 35 | scaled_vec1 = matrix_U * col_vec 36 | #print "scaled_vec1= " + str(scaled_vec1) 37 | scaled_vec2 = col_vec * eig_vals[i] 38 | #print "scaled_vec2= " + str(scaled_vec2) 39 | dist = vector_distance(scaled_vec1, scaled_vec2) 40 | assert_approx_equals(dist, 0) 41 | 42 | # Create the diagonalization matrix V 43 | matrix_V = numpy.matrix(eig_vecs) #numpy.matrix(rows) 44 | 45 | #print "V= " + str(matrix_V) 46 | 47 | # Get adjoint 48 | matrix_V_dag = numpy.transpose(numpy.conjugate(matrix_V)) 49 | 50 | # Eigenvector matrix should be unitary if we are to have 51 | # V be its own inverse 52 | assert_matrix_unitary(matrix_V) 53 | 54 | # Conjugate with U to see what we get 55 | #matrix_W = matrix_V * matrix_U * matrix_V_dag 56 | matrix_W = matrix_V.I * matrix_U * matrix_V 57 | 58 | #print "W= " + str(matrix_W) 59 | 60 | # Construct the diagonalized matrix that we want 61 | matrix_diag = numpy.matrix(numpy.eye(d), dtype=numpy.complex) 62 | for i in range(0,eig_length): 63 | matrix_diag[(i,i)] = eig_vals[i] 64 | 65 | #print "diag= " + str(matrix_diag) 66 | 67 | dist = trace_distance(matrix_diag, matrix_W) 68 | 69 | #print "dist(diag,W)= " + str(dist) 70 | assert_approx_equals(dist, 0) 71 | 72 | # Verify that off-diagonal elements are close to zero 73 | for i in range(eig_length): 74 | for j in range(eig_length): 75 | if (i != j): 76 | assert_approx_equals(matrix_W[(i,j)], 0) 77 | 78 | for i in range(0,eig_length): 79 | diff = abs(matrix_W[(i,i)] - eig_vals[i]) 80 | assert_approx_equals(diff, 0) 81 | #print "eig_val("+str(i)+") diff= " + str(diff) 82 | 83 | ############################################################################## 84 | # Class for testing matrix diagonalization for various SU(d) 85 | class TestDiagonalize(unittest.TestCase): 86 | 87 | def test_diagonalize(self): 88 | for i in range(1,D+1): 89 | d = 2**i 90 | test_diagonalize(d=d) 91 | 92 | ############################################################################## 93 | def get_suite(): 94 | suite = unittest.TestSuite() 95 | loader = unittest.TestLoader() 96 | suite1 = loader.loadTestsFromTestCase(TestDiagonalize) 97 | suite.addTest(suite1) 98 | return suite 99 | 100 | if (__name__ == '__main__'): 101 | suite = get_suite() 102 | unittest.TextTestRunner(verbosity=3).run(suite) -------------------------------------------------------------------------------- /tests/test_find_basis.py: -------------------------------------------------------------------------------- 1 | # Test file to try and find an orthonormal,traceless basis for SU(D) 2 | 3 | import numpy 4 | import unittest 5 | import types 6 | 7 | from skc.utils import * 8 | from skc.operator import * 9 | from skc.basis import * 10 | 11 | # Maximum dimension (2**D) to test 12 | D=3 13 | 14 | def test_unitary_identity(basis_dict): 15 | 16 | # Extract the dimension of the matrices, based on (0,0) identity element 17 | element00 = basis_dict[(0,0)] 18 | d = element00.matrix.shape[0] 19 | 20 | #def test_orthogonality(basis): 21 | # I think this property only holds for Pauli matrices and SU(2) 22 | #print "TESTING SELF-PRODUCT TRACE" 23 | # Assert that all basis elements have a trace self-product of d 24 | #for gate in basis_dict.values(): 25 | # print "Testing " + str(gate) +"\n" + str(gate.matrix) 26 | # abs_trace = numpy.abs(numpy.trace(gate.matrix*gate.matrix)) 27 | # assert_approx_equals(abs_trace, d) 28 | 29 | def create_basis_test_case(basis): 30 | class TestBasis(unittest.TestCase): 31 | 32 | def setUp(self): 33 | self.basis = basis 34 | 35 | # Test that the basis identity matrix is equal to standard identity 36 | def test_identity_matrix(self): 37 | identity_element = self.basis.identity 38 | d = self.basis.d 39 | # identity element should be equal to eye(d) 40 | identity = numpy.matrix(numpy.eye(d), dtype=numpy.complex) 41 | id_distance = trace_distance(identity, identity_element.matrix) 42 | assert_approx_equals(id_distance, 0) 43 | 44 | # Test that each basis element is self-traceless 45 | def test_self_traceless(self): 46 | identity_element = self.basis.identity 47 | for gate in self.basis.basis_dict.values(): 48 | if (gate == identity_element): 49 | # Skip the identity, it is not traceless 50 | continue 51 | #print "Testing \n" + str(gate) 52 | assert_approx_equals(numpy.trace(gate.matrix), 0) 53 | 54 | # Test that every basis element is orthogonal to every other 55 | def test_orthogonal(self): 56 | for gate in self.basis.basis_dict.values(): 57 | # Remove the gate from the second list copy to avoid self-multiplication 58 | basis2 = list(self.basis.basis_dict.values()) 59 | #print "Removing \n" + str(gate) 60 | #print "From \n" + str(basis2) 61 | basis2.remove(gate) 62 | for gate2 in basis2: 63 | #print "Testing \n" + str(gate) + "\n " + str(gate.matrix) 64 | #print " vs. \n" + str(gate2) + "\n " + str(gate2.matrix) 65 | inner_product = hs_inner_product(gate.matrix, gate2.matrix) 66 | assert_approx_equals(inner_product, 0) 67 | return TestBasis 68 | 69 | ############################################################################## 70 | def get_suite(): 71 | suite = unittest.TestSuite() 72 | 73 | loader = unittest.TestLoader() 74 | 75 | # Add hermitian basis tests up to D 76 | for i in range(1,D+1): 77 | d = 2**i 78 | 79 | # Add the Hermitian basis test case for SU(d) 80 | basis_H = get_hermitian_basis(d=d) 81 | test_case_H = create_basis_test_case(basis_H) 82 | suite_H = loader.loadTestsFromTestCase(test_case_H) 83 | suite.addTest(suite_H) 84 | 85 | # Add the unitary basis test case for SU(d) 86 | basis_U = get_unitary_basis(d=d) 87 | test_case_U = create_basis_test_case(basis_U) 88 | suite_U = loader.loadTestsFromTestCase(test_case_U) 89 | suite.addTest(suite_U) 90 | 91 | return suite 92 | 93 | ############################################################################## 94 | if (__name__ == '__main__'): 95 | suite = get_suite() 96 | unittest.TextTestRunner(verbosity=3).run(suite) 97 | -------------------------------------------------------------------------------- /tests/test_hypersphere.py: -------------------------------------------------------------------------------- 1 | from skc.basis import * 2 | from skc.decompose import * 3 | from skc.hypersphere import * 4 | 5 | import math 6 | import unittest 7 | 8 | ############################################################################## 9 | def test_hsphere_coords_roundtrip(basis): 10 | 11 | (matrix_U, components, angle) = get_random_unitary(basis) 12 | 13 | axis = basis.sort_canonical_order(components) 14 | print "axis= " + str(axis) 15 | theta = math.pi / 12 16 | print "U=" + str(matrix_U) 17 | hsphere_coords = unitary_to_hspherical(matrix_U, basis) 18 | 19 | print str(hsphere_coords) 20 | 21 | matrix_U2 = hspherical_to_unitary(hsphere_coords, basis) 22 | print "U2=" + str(matrix_U2) 23 | 24 | assert_matrices_approx_equal(matrix_U, matrix_U2, trace_distance) 25 | 26 | ############################################################################## 27 | def test_hsphere_coords_random(basis): 28 | 29 | (matrix_U, components, angle) = get_random_unitary(basis) 30 | theta = math.pi / 12 31 | print "U=" + str(matrix_U) 32 | hsphere_coords = unitary_to_hspherical(matrix_U, basis) 33 | 34 | print str(hsphere_coords) 35 | 36 | ############################################################################## 37 | # Force negative angle to test that edge case 38 | def test_hsphere_coords_random_negative(basis): 39 | 40 | (matrix_U, components, angle) = \ 41 | get_random_unitary(basis, angle_lower = -PI_HALF, angle_upper = 0) 42 | theta = math.pi / 12 43 | print "U=" + str(matrix_U) 44 | hsphere_coords = unitary_to_hspherical(matrix_U, basis) 45 | 46 | print str(hsphere_coords) 47 | 48 | ############################################################################## 49 | def test_hsphere_coords_random_axis(basis): 50 | theta = math.pi / 12 51 | 52 | axis = pick_random_axis(basis) 53 | test_hsphere_coords_axis(theta, axis, basis) 54 | 55 | ############################################################################## 56 | def test_hsphere_coords_axis(angle, axis, basis): 57 | matrix_U = axis_to_unitary(axis, angle/2.0, basis) 58 | print "U=" + str(matrix_U) 59 | hsphere_coords = unitary_to_hspherical(matrix_U, basis) 60 | 61 | print str(hsphere_coords) 62 | 63 | matrix_U2 = hspherical_to_unitary(hsphere_coords, basis) 64 | print "U2=" + str(matrix_U2) 65 | 66 | assert_matrices_approx_equal(matrix_U, matrix_U2, trace_distance) 67 | 68 | axis2 = cart3d_to_h2(x=1, y=1, z=1) 69 | H2 = get_hermitian_basis(d=2) 70 | H4 = get_hermitian_basis(d=4) 71 | 72 | #test_hsphere_coords_axis(angle=math.pi/12, axis=axis2, basis=H2) 73 | #test_hsphere_coords_random_axis(basis=H2) 74 | #test_hsphere_coords_random(basis=H2) 75 | #test_hsphere_coords_random_negative(basis=H2) 76 | #test_hsphere_coords_roundtrip(basis=H2) 77 | #test_hsphere_coords_random_axis(basis=H4) 78 | #test_hsphere_coords_random(basis=H4) 79 | 80 | ############################################################################## 81 | # Class for testing matrix logarithms for various SU(d) 82 | class TestHSphereLastCoord(unittest.TestCase): 83 | 84 | # Degenerate case where phi_1 = phi_2 = 0, then phi_3 should = 0 too 85 | def test_fix_last_hsphere_coord_degen_1(self): 86 | c_n1 = 0 87 | c_n = c_n1 88 | product = 0 89 | phi_n1 = fix_last_hsphere_coord(product=product, c_n=c_n, c_n1=c_n1) 90 | assert_approx_equals(phi_n1, 0) 91 | 92 | def test_fix_last_hsphere_coord_case_1(self): 93 | c_n1 = math.sin(math.pi/24)*(1.0/math.sqrt(3)) 94 | c_n = c_n1 95 | phi_1 = math.pi / 24 96 | phi_2 = math.acos(c_n / math.sin(phi_1)) 97 | product = math.sin(phi_1) * math.sin(phi_2) 98 | phi_n1 = fix_last_hsphere_coord(product=product, c_n=c_n, c_n1=c_n1) 99 | assert_approx_equals_tolerance(phi_n1, 0.785398163397, 1e-12) 100 | 101 | # These hard-coded examples are taken from an SU(2) rotation of math.pi / 12 102 | # about an axis(1,1,1) normalized 103 | # This should return a phi_ni in the range [0, PI_HALF] 104 | def test_fix_last_hsphere_coord_case_1(self): 105 | c_n1 = math.sin(math.pi/24)*(1.0/math.sqrt(3)) 106 | c_n = c_n1 107 | phi_1 = math.pi / 24 108 | phi_2 = math.acos(c_n / math.sin(phi_1)) 109 | product = math.sin(phi_1) * math.sin(phi_2) 110 | phi_n1 = fix_last_hsphere_coord(product=product, c_n=c_n, c_n1=c_n1) 111 | assert_approx_equals_tolerance(phi_n1, 0.785398163397, 1e-12) 112 | 113 | # This should return a phi_ni in the range [PI_HALF, PI] 114 | # by just flipping the sign of the phi_1 115 | def test_fix_last_hsphere_coord_case_2(self): 116 | c_n1 = -math.sin(math.pi/24)*(1.0/math.sqrt(3)) 117 | c_n = -c_n1 118 | phi_1 = math.pi / 24 119 | phi_2 = math.acos(c_n / math.sin(phi_1)) 120 | product = math.sin(phi_1) * math.sin(phi_2) 121 | phi_n1 = fix_last_hsphere_coord(product=product, c_n=c_n, c_n1=c_n1) 122 | correct_phi_n1 = 0.785398163397 + PI_HALF 123 | #correct_phi_n1 = THREE_PI_HALF - 0.785398163397 124 | assert_approx_equals_tolerance(phi_n1, correct_phi_n1, 1e-12) 125 | 126 | # This should return a phi_ni in the range [PI, THREE_PI_HALF] 127 | # by just flipping the sign of the phi_1 128 | def test_fix_last_hsphere_coord_case_3(self): 129 | c_n1 = math.sin(math.pi/24)*(1.0/math.sqrt(3)) 130 | c_n = c_n1 131 | phi_1 = -math.pi / 24 132 | phi_2 = math.acos(c_n / math.sin(phi_1)) 133 | product = math.sin(phi_1) * math.sin(phi_2) 134 | phi_n1 = fix_last_hsphere_coord(product=product, c_n=c_n, c_n1=c_n1) 135 | assert_approx_equals_tolerance(phi_n1, 0.785398163397 + PI, 1e-12) 136 | 137 | # This should return a phi_ni in the range [THREE_PI_HALF, TWO_PI] 138 | # by just flipping the sign of the phi_1 139 | def test_fix_last_hsphere_coord_case_4(self): 140 | c_n1 = -math.sin(math.pi/24)*(1.0/math.sqrt(3)) 141 | c_n = -c_n1 142 | phi_1 = math.pi / 24 143 | phi_2 = math.acos(c_n / math.sin(phi_1)) 144 | product = -math.sin(phi_1) * math.sin(phi_2) 145 | phi_n1 = fix_last_hsphere_coord(product=product, c_n=c_n, c_n1=c_n1) 146 | #correct_phi_n1 = 0.785398163397 + PI 147 | correct_phi_n1 = TWO_PI - 0.785398163397 148 | assert_approx_equals_tolerance(phi_n1, correct_phi_n1, 1e-12) 149 | 150 | ############################################################################## 151 | def get_suite(): 152 | suite = unittest.TestSuite() 153 | loader = unittest.TestLoader() 154 | suite1 = loader.loadTestsFromTestCase(TestHSphereLastCoord) 155 | suite.addTest(suite1) 156 | return suite 157 | 158 | if (__name__ == '__main__'): 159 | suite = get_suite() 160 | unittest.TextTestRunner(verbosity=3).run(suite) -------------------------------------------------------------------------------- /tests/test_matrix_exp.py: -------------------------------------------------------------------------------- 1 | # Testing our own matrix exponentiate, since numpy's doesn't seem to work 2 | 3 | from skc_utils import * 4 | from skc_operator import * 5 | from skc_diagonalize import * 6 | from skc_basis import * 7 | 8 | import numpy 9 | 10 | # Verify that numpy.exp is just element-wise exponentiation, not that useful 11 | RX = numpy.exp(-1j*(math.pi/2)*SX.matrix) 12 | 13 | print "RX= " + str(RX) 14 | 15 | # Pauli X is hermitian (and unitary, but we don't care about that part now) 16 | assert_matrix_hermitian(SX.matrix) 17 | 18 | RX2 = matrix_exp(-1j*(math.pi/2)*SX.matrix, 50) 19 | 20 | print "RX2= " + str(RX2) 21 | 22 | # We can't assert unitarity here, b/c our matrix_exp misformats the matrix 23 | # somehow so that the elements aren't complex, or something. 24 | # Just visually inspect the print str above. 25 | #assert_matrix_unitary(RX2) 26 | 27 | # Verify that exponentiating a Hermitian (via its diagonalized form) 28 | # gives a unitary 29 | B2 = get_hermitian_basis(d=2) 30 | 31 | (matrix_V, matrix_W) = diagonalize(SX.matrix, B2) 32 | 33 | RX3 = matrix_exp_diag(-1j*(math.pi/2)*matrix_W) 34 | 35 | print "RX3= " + str(RX3) 36 | 37 | # Success! Hopefully 38 | assert_matrix_unitary(RX3) 39 | 40 | # Now translate it back to its non-diagonal form 41 | RX4 = matrix_V * RX3 * matrix_V.I 42 | 43 | print "RX4= " + str(RX4) 44 | 45 | # Should still be unitary 46 | assert_matrix_unitary(RX3) 47 | -------------------------------------------------------------------------------- /tests/test_matrix_ln.py: -------------------------------------------------------------------------------- 1 | from skc.diagonalize import * 2 | from skc.basis import * 3 | from skc.utils import * 4 | from skc.compose import * 5 | from skc.decompose import * 6 | 7 | import numpy 8 | import math 9 | import unittest 10 | 11 | # Maximum dimension (2**D) to test 12 | D=4 13 | 14 | ############################################################################## 15 | def test_decomposing_unitary(d): 16 | #print "*******************************************************************" 17 | #print "TESTING DECOMPOSITION OF UNITARY IN SU("+str(d)+")" 18 | B = get_hermitian_basis(d) 19 | 20 | (matrix_U, components, angle) = get_random_unitary(B) 21 | 22 | #print "U= " + str(matrix_U) 23 | 24 | (matrix_V, matrix_W) = diagonalize(matrix_U, B) 25 | 26 | #print "V= " + str(matrix_V) 27 | #print "W= " + str(matrix_W) 28 | 29 | matrix_ln = get_matrix_logarithm(matrix_W) 30 | 31 | #print "matrix_ln= " + str(matrix_ln) 32 | 33 | # Reconjugate to transform into iH 34 | matrix_iH = matrix_V * matrix_ln * matrix_V.I 35 | 36 | # Factor out -i (since we used -i in exp_hermitian_to_unitary) 37 | matrix_H = (-1.0/1j) * matrix_iH 38 | 39 | #print "matrix_H= " + str(matrix_H) 40 | 41 | # Compare the calculated components with our original 42 | (components2, K) = get_basis_components(matrix_H, B) 43 | 44 | #print "K= " + str(K) 45 | #print "angle= " + str(angle) 46 | assert_approx_equals_tolerance(numpy.abs(K), numpy.abs(2*angle), TOLERANCE4) 47 | 48 | #print "Renormalizing... " 49 | # Renormalize components 50 | #for key,value in components2.items(): 51 | # components2[key] = value / K 52 | # print "("+str(key)+")= " + str(components2[key]) 53 | 54 | # Assert that the components are now normalized 55 | norm = scipy.linalg.norm(components.values()) 56 | norm2= scipy.linalg.norm(components2.values()) 57 | #print "norm= " + str(norm) 58 | #print "norm2= " + str(norm2) 59 | assert_approx_equals(norm2, 1) 60 | 61 | for key in components2.keys(): 62 | #print str(key) 63 | #print " actual= " + str(components[key]) 64 | #print " comput= " + str(components2[key]) 65 | ratio = abs(components[key]) / abs(components2[key]) 66 | #print " ratio= " + str(ratio) 67 | assert_approx_equals_tolerance(ratio, 1, TOLERANCE6) 68 | 69 | ############################################################################## 70 | # Class for testing matrix logarithms for various SU(d) 71 | class TestMatrixLogarithm(unittest.TestCase): 72 | 73 | def test_decomposing_unitary(self): 74 | for i in range(1,D+1): 75 | d = 2**i 76 | test_decomposing_unitary(d=d) 77 | 78 | ############################################################################## 79 | def get_suite(): 80 | suite = unittest.TestSuite() 81 | loader = unittest.TestLoader() 82 | suite1 = loader.loadTestsFromTestCase(TestMatrixLogarithm) 83 | suite.addTest(suite1) 84 | return suite 85 | 86 | if (__name__ == '__main__'): 87 | suite = get_suite() 88 | unittest.TextTestRunner(verbosity=3).run(suite) -------------------------------------------------------------------------------- /tests/test_operator.py: -------------------------------------------------------------------------------- 1 | from skc.operator import * 2 | 3 | import math 4 | import unittest 5 | 6 | class TestOperator(unittest.TestCase): 7 | 8 | # Test that hash value implies equality 9 | def test_hash(self): 10 | hash1 = I2.__hash__() 11 | hash2 = H.__hash__() 12 | hash3 = T.__hash__() 13 | msg = "hash(I2)= " + str(hash1) + " hash(H)= " + str(hash2) 14 | self.assertNotEquals(hash1, hash2, msg) 15 | msg = "hash(H)= " + str(hash2) + " hash(T)= " + str(hash3) 16 | self.assertNotEquals(hash2, hash3, msg) 17 | msg = "hash(I2)= " + str(hash1) + " hash(T)= " + str(hash3) 18 | self.assertNotEquals(hash1, hash3, msg) 19 | 20 | def test_ancestors_as_string(self): 21 | op = Operator("", None) 22 | op.ancestors = ['A', 'B', 'C'] 23 | string = op.ancestors_as_string() 24 | self.assertEquals(string, "A-B-C", "Ancestor string incorrect " + string) 25 | 26 | ############################################################################## 27 | def get_suite(): 28 | suite = unittest.TestSuite() 29 | loader = unittest.TestLoader() 30 | suite1 = loader.loadTestsFromTestCase(TestOperator) 31 | suite.addTest(suite1) 32 | return suite 33 | 34 | ############################################################################## 35 | if (__name__ == '__main__'): 36 | suite = get_suite() 37 | unittest.TextTestRunner(verbosity=3).run(suite) 38 | -------------------------------------------------------------------------------- /tests/test_operator_norm.py: -------------------------------------------------------------------------------- 1 | # Test file to print out distances between some standard gates 2 | 3 | #from skc_unitary_decompose import * 4 | from skc_operator import * 5 | from skc_utils import * 6 | import scipy.linalg 7 | 8 | gates = [I2, SX, SY, SZ, H, T] 9 | gates2 = list(gates) 10 | 11 | # These norms should all be 1 12 | print "scipy.linalg.norm is not the operator norm :[" 13 | print "|I2| = " + str(scipy.linalg.norm(I2.matrix)) 14 | print "|SX| = " + str(scipy.linalg.norm(SX.matrix)) 15 | print "|SY| = " + str(scipy.linalg.norm(SY.matrix)) 16 | print "|SZ| = " + str(scipy.linalg.norm(SZ.matrix)) 17 | print "|H| = " + str(scipy.linalg.norm(H.matrix)) 18 | print "|T| = " + str(scipy.linalg.norm(T.matrix)) 19 | 20 | print "trace norm is better?" 21 | print "|I2| = " + str(trace_norm(I2.matrix)) 22 | print "|SX| = " + str(trace_norm(SX.matrix)) 23 | print "|SY| = " + str(trace_norm(SY.matrix)) 24 | print "|SZ| = " + str(trace_norm(SZ.matrix)) 25 | print "|H| = " + str(trace_norm(H.matrix)) 26 | print "|T| = " + str(trace_norm(T.matrix)) 27 | 28 | print "sup norm is better?" 29 | print "|I2| = " + str(operator_norm(I2.matrix)) 30 | print "|SX| = " + str(operator_norm(SX.matrix)) 31 | print "|SY| = " + str(operator_norm(SY.matrix)) 32 | print "|SZ| = " + str(operator_norm(SZ.matrix)) 33 | print "|H| = " + str(operator_norm(H.matrix)) 34 | print "|T| = " + str(operator_norm(T.matrix)) 35 | 36 | for gate1 in gates: 37 | gates2.remove(gate1) 38 | dist_text = "Dist("+gate1.name+",...)" 39 | for gate2 in gates2: 40 | dist_text += "\t" + str(fowler_distance(gate1.matrix, gate2.matrix)) 41 | print dist_text 42 | 43 | #print "Distance(I2,SX) = " + str(I2.distance(SX)) 44 | #print "Distance(I2,SY) = " + str(I2.distance(SY)) 45 | #print "Distance(I2,SZ) = " + str(I2.distance(SZ)) 46 | #print "Distance(I2,H) = " + str(I2.distance(SX)) 47 | #print "Distance(I2,SX) = " + str(I2.distance(SX)) -------------------------------------------------------------------------------- /tests/test_permutations.py: -------------------------------------------------------------------------------- 1 | import scipy 2 | 3 | # The combinatorics function n choose k 4 | def n_choose_k(n, k): 5 | n_fact = scipy.factorial(n) 6 | k_fact = scipy.factorial(k) 7 | n_minus_k_fact = scipy.factorial(n-k) 8 | return n_fact / (k_fact * n_minus_k_fact) 9 | 10 | def swap(array, i, j): 11 | temp = array[i] 12 | array[i] = array[j] 13 | array[j] = temp 14 | 15 | # Return a list of tuples of all distinct pairs in the set {1,2,...,n} 16 | def generate_all_pairs(n): 17 | pairs = [] 18 | for i in range(1,n+1): 19 | for j in range(i+1,n+1): 20 | pairs.append((i,j)) 21 | # As a sanity check, make sure we have all the pairs we were expecting 22 | pair_count = n_choose_k(n,2) 23 | assert(len(pairs) == pair_count) 24 | return pairs 25 | 26 | def generate_permutations(i): 27 | original = range(1,i+1) 28 | 29 | print "pairs(3)= " + str(generate_all_pairs(3)) 30 | print "pairs(4)= " + str(generate_all_pairs(4)) 31 | print "pairs(16)= " + str(generate_all_pairs(16)) 32 | 33 | -------------------------------------------------------------------------------- /tests/test_random.py: -------------------------------------------------------------------------------- 1 | # Test generating random matrices 2 | 3 | import numpy 4 | from skc_utils import * 5 | from skc_operator import * 6 | from skc_basis import * 7 | 8 | # Get SU(2) basis (I plus Pauli matrices) 9 | B2 = get_basis(d=2) 10 | 11 | random_U = get_random_unitary(B2) 12 | print str(random_U) -------------------------------------------------------------------------------- /tests/test_recompose.py: -------------------------------------------------------------------------------- 1 | from skc.basis import * 2 | from skc.decompose import * 3 | from skc.compose import * 4 | from skc.utils import * 5 | 6 | import unittest 7 | 8 | # Maximum dimension (2**D) to test 9 | D = 4 10 | 11 | ############################################################################## 12 | # Test for recovering a sign change in the hermitian from a single 13 | # component 14 | def test_recover_sign(d): 15 | B = get_hermitian_basis(d=d) 16 | (H, components) = get_random_hermitian(B) 17 | 18 | #print "H= " + str(H) 19 | 20 | (components2, norm) = get_basis_components(H, B) 21 | random_key = random.choice(components2.keys()) 22 | components2[random_key] *= -1 23 | 24 | H3 = matrix_from_components(components2, B) 25 | 26 | (components3, norm) = get_basis_components(H, B) 27 | 28 | for k,v in components3.items(): 29 | if (k != random_key): 30 | assert_approx_equals(v, components2[k]) 31 | else: 32 | assert_approx_equals(-v, components2[k]) 33 | 34 | ############################################################################## 35 | # Method for constructing a matrix from basis components 36 | def test_redecompose(d): 37 | B = get_hermitian_basis(d=d) 38 | (H, components) = get_random_hermitian(B) 39 | 40 | #print "H= " + str(H) 41 | 42 | (components2, norm) = get_basis_components(H, B) 43 | 44 | H3 = matrix_from_components(components2, B) 45 | 46 | (components3, norm) = get_basis_components(H, B) 47 | 48 | for k,v in components3.items(): 49 | assert_approx_equals(v, components2[k]) 50 | 51 | ############################################################################## 52 | # Method for constructing a matrix from basis components 53 | def test_recompose(d): 54 | B = get_hermitian_basis(d=d) 55 | (H, components) = get_random_hermitian(B) 56 | 57 | #print "H= " + str(H) 58 | 59 | (components2, norm) = get_basis_components(H, B) 60 | 61 | H3 = matrix_from_components(components2, B) 62 | 63 | #print "H3= " + str(H3) 64 | 65 | dist = trace_distance(H, H3) 66 | assert_approx_equals(dist, 0) 67 | 68 | ############################################################################## 69 | # Class for testing recompose for various SU(d) 70 | class TestRecompose(unittest.TestCase): 71 | 72 | def test_recover_sign(self): 73 | for i in range(1,D+1): 74 | d = 2**i 75 | test_recover_sign(d=d) 76 | 77 | def test_recompose(self): 78 | for i in range(1,D+1): 79 | d = 2**i 80 | test_recompose(d=d) 81 | 82 | def test_redecompose(self): 83 | for i in range(1,D+1): 84 | d = 2**i 85 | test_redecompose(d=d) 86 | 87 | ############################################################################## 88 | def get_suite(): 89 | suite = unittest.TestSuite() 90 | loader = unittest.TestLoader() 91 | suite1 = loader.loadTestsFromTestCase(TestRecompose) 92 | suite.addTest(suite1) 93 | return suite 94 | 95 | if (__name__ == '__main__'): 96 | suite = get_suite() 97 | unittest.TextTestRunner(verbosity=3).run(suite) 98 | -------------------------------------------------------------------------------- /tests/test_rotate.py: -------------------------------------------------------------------------------- 1 | from skc.utils import * 2 | from skc.rotate import * 3 | from skc.operator import * 4 | 5 | import math 6 | import unittest 7 | import random 8 | 9 | class TestRotate(unittest.TestCase): 10 | 11 | def test_rotate_z(self): 12 | matrix_Z = rotate_Z(PI) 13 | #print "matrix_Z= " + str(matrix_Z) 14 | assert_matrices_approx_equal(matrix_Z, SZ.matrix, distance=fowler_distance) 15 | 16 | def test_rotate_y(self): 17 | matrix_Y = rotate_Y(PI) 18 | #print "matrix_Y= " + str(matrix_Y) 19 | assert_matrices_approx_equal(matrix_Y, SY.matrix, distance=fowler_distance) 20 | 21 | def test_rotate_x(self): 22 | matrix_X = rotate_X(PI) 23 | #print "matrix_X= " + str(matrix_X) 24 | assert_matrices_approx_equal(matrix_X, SX.matrix, distance=fowler_distance) 25 | 26 | ############################################################################## 27 | def get_suite(): 28 | suite = unittest.TestSuite() 29 | loader = unittest.TestLoader() 30 | suite1 = loader.loadTestsFromTestCase(TestRotate) 31 | suite.addTest(suite1) 32 | return suite 33 | 34 | ############################################################################## 35 | if (__name__ == '__main__'): 36 | suite = get_suite() 37 | unittest.TextTestRunner(verbosity=3).run(suite) 38 | -------------------------------------------------------------------------------- /tests/test_runner.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import test_recompose 4 | import test_find_basis 5 | import test_utils 6 | import test_matrix_ln 7 | import test_diagonalize 8 | import test_compose 9 | import test_similarity_matrix 10 | import test_simplify 11 | import test_operator 12 | import test_hypersphere 13 | import test_trig 14 | import test_rotate 15 | 16 | loader = unittest.TestLoader() 17 | 18 | suite = unittest.TestSuite() 19 | suite.addTest(test_recompose.get_suite()) 20 | suite.addTest(test_find_basis.get_suite()) 21 | suite.addTest(test_utils.get_suite()) 22 | suite.addTest(test_trig.get_suite()) 23 | suite.addTest(test_rotate.get_suite()) 24 | suite.addTest(test_matrix_ln.get_suite()) 25 | suite.addTest(test_diagonalize.get_suite()) 26 | suite.addTest(test_compose.get_suite()) 27 | suite.addTest(test_similarity_matrix.get_suite()) 28 | suite.addTest(test_simplify.get_suite()) 29 | suite.addTest(test_operator.get_suite()) 30 | suite.addTest(test_hypersphere.get_suite()) 31 | unittest.TextTestRunner(verbosity=2).run(suite) 32 | -------------------------------------------------------------------------------- /tests/test_similarity_matrix.py: -------------------------------------------------------------------------------- 1 | from skc.dawson.factor import * 2 | from skc.basis import * 3 | from skc.operator import * 4 | from skc.utils import * 5 | 6 | import math 7 | import unittest 8 | 9 | # Maximum dimension (2**D) to test 10 | D = 1 11 | 12 | ############################################################################## 13 | def test_similarity(matrix_U1, matrix_U2, basis): 14 | 15 | #print "U1= " + str(matrix_U1) 16 | 17 | #print "U2= " + str(matrix_U2) 18 | 19 | # Test finding similarity matrix 20 | matrix_S = find_similarity_matrix(matrix_U1, matrix_U2, basis) 21 | 22 | #print "S= " 23 | #print str(matrix_S) 24 | 25 | (axis_S, K, matrix_H) = unitary_to_axis(matrix_S, basis) 26 | angle_S = K/2.0 27 | 28 | #print "axis_S: " + str(axis_S) 29 | #print "angle_S: " + str(angle_S) 30 | 31 | # S * U2 * S^\dagger 32 | matrix_U = matrix_S * matrix_U2 * matrix_S.H 33 | 34 | # Now let's conjugate this bitch 35 | #print "Conjugated U=" 36 | #print str(matrix_U) 37 | 38 | distance = trace_distance(matrix_U1, matrix_U) 39 | #print "Distance from U= " + str(distance) 40 | assert_approx_equals(distance, 0) 41 | 42 | # Test that swapping U1 and U2 gives adjoint S 43 | matrix_S2 = find_similarity_matrix(matrix_U2, matrix_U1, basis) 44 | dist2 = trace_distance(matrix_S, matrix_S2.H) 45 | #print "Distance(S,S2_dag)= " + str(dist2) 46 | assert_approx_equals(dist2, 0) 47 | 48 | assert_matrix_unitary(matrix_S) 49 | 50 | #matrix_S_S_dag = matrix_S * matrix_S.H 51 | #assert_matrices_approx_equal(matrix_S_S_dag, basis.identity.matrix, trace_distance) 52 | return matrix_S 53 | 54 | ############################################################################# 55 | # Some definitions common to all the tests below 56 | B2 = get_hermitian_basis(d=2) 57 | x_axis = cart3d_to_h2(x=1,y=0,z=0) 58 | y_axis = cart3d_to_h2(x=0,y=1,z=0) 59 | z_axis = cart3d_to_h2(x=0,y=0,z=1) 60 | 61 | ############################################################################# 62 | # Start with something easy, which we've verified with Chris's C++ compiler 63 | # x-axis to z-axis 64 | def test_x_pi_z_pi(): 65 | theta = math.pi 66 | 67 | unitary_U1 = axis_to_unitary(x_axis, theta/2.0, B2) 68 | assert_matrices_approx_equal(SX.matrix, unitary_U1, fowler_distance) 69 | unitary_U2 = axis_to_unitary(z_axis, theta/2.0, B2) 70 | assert_matrices_approx_equal(SZ.matrix, unitary_U2, fowler_distance) 71 | 72 | #print "======================" 73 | #print "SIMILARITY(X pi, Z pi)" 74 | matrix_S = test_similarity(unitary_U1, unitary_U2, B2) 75 | matrix_S_true = numpy.matrix([ 76 | [ 0.70710678+0.j, -0.70710678+0.j], 77 | [ 0.70710678+0.j, 0.70710678+0.j]]) 78 | assert_matrices_approx_equal(matrix_S, matrix_S_true, trace_distance) 79 | 80 | ############################################################################# 81 | # Start with something easy, which we've verified with Chris's C++ compiler 82 | # x-axis to y-axis 83 | def test_x_pi_y_pi(): 84 | theta = math.pi 85 | 86 | unitary_U1 = axis_to_unitary(x_axis, theta/2.0, B2) 87 | assert_matrices_approx_equal(SX.matrix, unitary_U1, fowler_distance) 88 | unitary_U2 = axis_to_unitary(y_axis, theta/2.0, B2) 89 | assert_matrices_approx_equal(SY.matrix, unitary_U2, fowler_distance) 90 | 91 | #print "======================" 92 | #print "SIMILARITY(X pi, Y pi)" 93 | matrix_S = test_similarity(unitary_U1, unitary_U2, B2) 94 | matrix_S_true = numpy.matrix([ 95 | [ 0.70710678+0.70710678j, 0.00000000+0.j ], 96 | [ 0.00000000+0.j, 0.70710678-0.70710678j]]) 97 | assert_matrices_approx_equal(matrix_S, matrix_S_true, trace_distance) 98 | 99 | ############################################################################# 100 | # Test something more complicated 101 | def test_x_pi_12_y_pi_12(): 102 | theta = math.pi / 12 103 | 104 | # First rotation is about axis (1,0,0) 105 | unitary_U = axis_to_unitary(x_axis, theta, B2) 106 | 107 | # Second rotation is about axis(0,1,0) of same angle 108 | unitary_U2 = axis_to_unitary(y_axis, theta, B2) 109 | 110 | #print "=====" 111 | #print "SIMILARITY(X pi/12, Y pi/12)" 112 | matrix_S = test_similarity(unitary_U, unitary_U2, B2) 113 | matrix_S_true = numpy.matrix([ 114 | [ 0.70710678+0.70710678j, 0.00000000+0.j ], 115 | [ 0.00000000+0.j, 0.70710678-0.70710678j]]) 116 | assert_matrices_approx_equal(matrix_S, matrix_S_true, trace_distance) 117 | 118 | ############################################################################## 119 | def create_test_case(d): 120 | basis = get_hermitian_basis(d=d) 121 | 122 | # Class for testing similarity matrices for various SU(d) 123 | class TestSimilarity(unittest.TestCase): 124 | 125 | def setUp(self): 126 | self.basis = basis 127 | 128 | def test_similarity(self): 129 | # Choose a unitary of some random axis and angle 130 | (matrix_U1, components1, angle) = get_random_unitary(basis) 131 | 132 | # Compose another unitary of the same angle around a 133 | # different random axis 134 | (H, components) = get_random_hermitian(basis) 135 | 136 | matrix_U2 = exp_hermitian_to_unitary(H, angle, basis) 137 | assert_matrix_unitary(matrix_U2) 138 | 139 | test_similarity(matrix_U1, matrix_U2, basis) 140 | 141 | return TestSimilarity 142 | 143 | ############################################################################## 144 | def get_suite(): 145 | suite = unittest.TestSuite() 146 | 147 | loader = unittest.TestLoader() 148 | 149 | for i in range(1,D+1): 150 | d = 2**i 151 | test_case = create_test_case(d) 152 | suite1 = loader.loadTestsFromTestCase(test_case) 153 | suite.addTest(suite1) 154 | 155 | suite.addTest(unittest.FunctionTestCase(test_x_pi_z_pi)) 156 | suite.addTest(unittest.FunctionTestCase(test_x_pi_y_pi)) 157 | suite.addTest(unittest.FunctionTestCase(test_x_pi_12_y_pi_12)) 158 | return suite 159 | 160 | ############################################################################## 161 | if (__name__ == '__main__'): 162 | suite = get_suite() 163 | unittest.TextTestRunner(verbosity=3).run(suite) 164 | -------------------------------------------------------------------------------- /tests/test_simplify.py: -------------------------------------------------------------------------------- 1 | from skc.simplify import * 2 | 3 | import math 4 | import unittest 5 | 6 | ############################################################################## 7 | class TestSimplifyEngine(unittest.TestCase): 8 | 9 | def setUp(self): 10 | rules = [AdjointRule(), DoubleIdentityRule('Q'), IdentityRule(), 11 | GeneralRule(['X','Y','Z']), DoubleAdjointRule()] 12 | self.engine = SimplifyEngine(rules) 13 | 14 | def test_min_arg_count(self): 15 | self.assertEquals(self.engine.max_arg_count, 3) 16 | 17 | def test_simplest(self): 18 | # This should simplify to I 19 | sequence = ['I', 'I'] 20 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 21 | self.assertEqual(new_sequence, ['I']) 22 | self.assertEqual(simplify_length, 1) 23 | 24 | def test_repeated_identity(self): 25 | # This should simplify to I 26 | sequence = ['I', 'I', 'I', 'I', 'I'] 27 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 28 | self.assertEqual(new_sequence, ['I']) 29 | self.assertEqual(simplify_length, 4) 30 | 31 | def test_general_identity_not_enough(self): 32 | # This should simplify QIQ to nothing 33 | sequence = ['D', 'A', 'B'] 34 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 35 | self.assertEqual(new_sequence, ['D', 'A', 'B']) 36 | self.assertEqual(simplify_length, 0) 37 | 38 | def test_general_identity(self): 39 | # This should simplify QIQ to nothing 40 | sequence = ['D', 'X', 'Y', 'Z', 'X', 'Y', 'Z'] 41 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 42 | self.assertEqual(new_sequence, ['D']) 43 | self.assertEqual(simplify_length, 6) 44 | 45 | def test_general_identity_beginning(self): 46 | # This does not simplify XYZ because it works from the end 47 | sequence = ['X', 'Y', 'Z', 'D'] 48 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 49 | self.assertEqual(new_sequence, ['X', 'Y', 'Z', 'D']) 50 | self.assertEqual(simplify_length, 0) 51 | 52 | def test_general_identity_ending(self): 53 | # This should simplify QIQ to nothing 54 | sequence = ['D','X', 'Y', 'Z'] 55 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 56 | self.assertEqual(new_sequence, ['D']) 57 | self.assertEqual(simplify_length, 3) 58 | 59 | def test_identity_middle(self): 60 | # This should simplify QIQ to nothing 61 | sequence = ['X', 'Q', 'I', 'Q', 'Z'] 62 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 63 | self.assertEqual(new_sequence, ['X', 'Q', 'I', 'Q', 'Z']) 64 | self.assertEqual(simplify_length, 0) 65 | 66 | def test_identity(self): 67 | # This should simplify QIQ to nothing 68 | sequence = ['X', 'Q', 'I', 'Q'] 69 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 70 | self.assertEqual(new_sequence, ['X']) 71 | self.assertEqual(simplify_length, 3) 72 | 73 | def test_double_q_middle(self): 74 | # This should simplify 3 Q's down to just Q 75 | sequence = ['X', 'Q', 'Q', 'Q', 'Z'] 76 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 77 | self.assertEqual(new_sequence, ['X', 'Q', 'Q', 'Q', 'Z']) 78 | self.assertEqual(simplify_length, 0) 79 | 80 | def test_double_q(self): 81 | # This should simplify 3 Q's down to just Q 82 | sequence = ['X', 'Q', 'Q', 'Q'] 83 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 84 | self.assertEqual(new_sequence, ['X', 'Q']) 85 | self.assertEqual(simplify_length, 2) 86 | 87 | def test_none_obtains(self): 88 | # This should simplify to I 89 | sequence = ['X', 'Q', 'Y', 'Q', 'Z'] 90 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 91 | self.assertEqual(new_sequence, ['X', 'Q', 'Y', 'Q', 'Z']) 92 | self.assertEqual(simplify_length, 0) 93 | 94 | def test_double_adjoint(self): 95 | sequence = ['X', 'Ydd'] 96 | (simplify_length, new_sequence) = self.engine.simplify(sequence) 97 | self.assertEqual(new_sequence, ['X', 'Y']) 98 | self.assertEqual(simplify_length, 0) 99 | 100 | ############################################################################## 101 | class TestDoubleIdentityRule(unittest.TestCase): 102 | 103 | def setUp(self): 104 | self.rule = DoubleIdentityRule('I') 105 | 106 | def test_simplest(self): 107 | # This should simplify to I 108 | sequence = ['I', 'I'] 109 | (obtains, sequence) = self.rule.simplify(sequence) 110 | self.assertEqual(obtains, True) 111 | self.assertEqual(sequence, ['I']) 112 | 113 | def test_simplest_not(self): 114 | # This should simplify to I 115 | sequence = ['X', 'Z'] 116 | (obtains, sequence) = self.rule.simplify(sequence) 117 | self.assertEqual(obtains, False) 118 | self.assertEqual(sequence, ['X', 'Z']) 119 | 120 | ############################################################################## 121 | class TestIdentityRule(unittest.TestCase): 122 | 123 | def setUp(self): 124 | self.rule = IdentityRule() 125 | 126 | def test_simplest(self): 127 | # This should simplify to I 128 | sequence = ['I', 'I'] 129 | (obtains, sequence) = self.rule.simplify(sequence) 130 | self.assertEqual(obtains, True) 131 | self.assertEqual(sequence, ['I']) 132 | 133 | def test_simplest2(self): 134 | # This should simplify to Q 135 | sequence = ['Q', 'I'] 136 | (obtains, sequence) = self.rule.simplify(sequence) 137 | self.assertEqual(obtains, True) 138 | self.assertEqual(sequence, ['Q']) 139 | 140 | def test_simplest3(self): 141 | # This should simplify to Q 142 | sequence = ['I', 'Q'] 143 | (obtains, sequence) = self.rule.simplify(sequence) 144 | self.assertEqual(obtains, True) 145 | self.assertEqual(sequence, ['Q']) 146 | 147 | def test_simplest_not(self): 148 | # This should not simplify 149 | sequence = ['X', 'Z'] 150 | (obtains,sequence) = self.rule.simplify(sequence) 151 | self.assertEqual(obtains, False) 152 | self.assertEqual(sequence, ['X', 'Z']) 153 | 154 | ############################################################################## 155 | class TestGeneralRule(unittest.TestCase): 156 | 157 | def setUp(self): 158 | self.rule = GeneralRule(sequence=['A','B','C'], new_sym='I4') 159 | 160 | def test_simplest(self): 161 | msg = "Incorrect number of args: " + str(self.rule.arg_count) 162 | self.assertEqual(self.rule.arg_count, 3, msg) 163 | # This should simplify to I 164 | sequence = ['A', 'B', 'C'] 165 | (obtains, sequence) = self.rule.simplify(sequence) 166 | self.assertEqual(obtains, True) 167 | self.assertEqual(sequence, ['I4']) 168 | 169 | def test_simplest2(self): 170 | # This should simplify to Q 171 | sequence = ['A', 'B', 'C'] 172 | (obtains, sequence) = self.rule.simplify(sequence) 173 | self.assertEqual(obtains, True) 174 | self.assertEqual(sequence, ['I4']) 175 | 176 | def test_simplest_not(self): 177 | # This should not simplify 178 | sequence = ['A', 'I', 'Z'] 179 | (obtains, sequence) = self.rule.simplify(sequence) 180 | self.assertEqual(obtains, False) 181 | self.assertEqual(sequence, ['A', 'I', 'Z']) 182 | 183 | ############################################################################## 184 | class TestAdjointRule(unittest.TestCase): 185 | 186 | def setUp(self): 187 | self.rule = AdjointRule() 188 | 189 | # Test that adjoint rule obtains for simplest case 190 | def test_simplest(self): 191 | # This should simplify to I 192 | sequence = ['Q', 'Qd'] 193 | (obtains, sequence) = self.rule.simplify(sequence) 194 | self.assertEqual(obtains, True) 195 | self.assertEqual(sequence, ['I']) 196 | 197 | def test_simplest_reverse(self): 198 | # This should also simplify to I 199 | sequence = ['Qd', 'Q'] 200 | (obtains, sequence) = self.rule.simplify(sequence) 201 | self.assertEqual(obtains, True) 202 | self.assertEqual(sequence, ['I']) 203 | 204 | def test_different_prefixes(self): 205 | sequence = ['X', 'Zd'] 206 | # This should not simplify 207 | (obtains, sequence) = self.rule.simplify(sequence) 208 | self.assertEqual(obtains, False) 209 | self.assertEqual(sequence, ['X', 'Zd']) 210 | 211 | def test_equal_length(self): 212 | # This should not simplify 213 | sequence = ['Ydd', 'Xdd'] 214 | (obtains, sequence) = self.rule.simplify(sequence) 215 | self.assertEqual(obtains, False) 216 | self.assertEqual(sequence, ['Ydd', 'Xdd']) 217 | 218 | def test_repeated_d(self): 219 | sequence = ['Qddd', 'Qdd'] 220 | # This should simplify to I 221 | (obtains, sequence) = self.rule.simplify(sequence) 222 | self.assertEqual(obtains, True) 223 | self.assertEqual(sequence, ['I']) 224 | 225 | ############################################################################## 226 | def get_suite(): 227 | suite = unittest.TestSuite() 228 | loader = unittest.TestLoader() 229 | suite1 = loader.loadTestsFromTestCase(TestAdjointRule) 230 | suite.addTest(suite1) 231 | suite2 = loader.loadTestsFromTestCase(TestIdentityRule) 232 | suite.addTest(suite2) 233 | suite3 = loader.loadTestsFromTestCase(TestDoubleIdentityRule) 234 | suite.addTest(suite3) 235 | suite4 = loader.loadTestsFromTestCase(TestGeneralRule) 236 | suite.addTest(suite4) 237 | suite5 = loader.loadTestsFromTestCase(TestSimplifyEngine) 238 | suite.addTest(suite5) 239 | return suite 240 | 241 | ############################################################################## 242 | if (__name__ == '__main__'): 243 | suite = get_suite() 244 | unittest.TextTestRunner(verbosity=3).run(suite) 245 | -------------------------------------------------------------------------------- /tests/test_skc.py: -------------------------------------------------------------------------------- 1 | from skc_operator import * 2 | from skc_dawson import * 3 | from skc_utils import * 4 | from skc_compose import * 5 | from skc_basis import * 6 | import math 7 | 8 | d = 4 9 | basis = get_hermitian_basis(d=d) 10 | theta = math.pi / 4 # 45 degrees 11 | 12 | axis_U = cart3d_to_h2(x=1, y=1, z=1) 13 | angle_U = math.pi / 12 14 | 15 | #matrix_U = axis_to_unitary(axis_U, theta/2.0, basis) 16 | (matrix_U, components, angle) = get_random_unitary(basis) 17 | 18 | print "U= " + str(matrix_U) 19 | 20 | load_basic_approxes("basic_approxes_su4.pickle") 21 | set_basis(basis) 22 | 23 | random_axis = pick_random_axis(basis) 24 | set_axis(random_axis) 25 | 26 | Uop = Operator(name="U", matrix=matrix_U) 27 | 28 | Un = solovay_kitaev(Uop, 3) 29 | print "Approximated U: " + str(Un) -------------------------------------------------------------------------------- /tests/test_skc_dawson.py: -------------------------------------------------------------------------------- 1 | from skc_operator import * 2 | from skc_dawson import * 3 | from skc_utils import * 4 | from skc_compose import * 5 | from skc_basis import * 6 | from skc_group_factor import * 7 | import math 8 | 9 | d = 4 10 | basis = get_hermitian_basis(d=d) 11 | theta = math.pi / 4 # 45 degrees 12 | 13 | axis_U = cart3d_to_h2(x=1, y=1, z=1) 14 | angle_U = math.pi / 12 15 | 16 | #matrix_U = axis_to_unitary(axis_U, theta/2.0, basis) 17 | (matrix_U, components, angle) = get_random_unitary(basis) 18 | 19 | print "U= " + str(matrix_U) 20 | 21 | load_basic_approxes("basic_approxes_su4.pickle") 22 | set_basis(basis) 23 | 24 | set_factor_method(aram_diagonal_factor) 25 | 26 | Uop = Operator(name="U", matrix=matrix_U) 27 | 28 | Un = solovay_kitaev(Uop, 2, 'U', '') 29 | print "Approximated U: " + str(Un) 30 | 31 | print "Un= " + str(Un.matrix) 32 | 33 | print "dist(U,Un)= " + str(distance(Un.matrix, Uop.matrix)) -------------------------------------------------------------------------------- /tests/test_tiles.py: -------------------------------------------------------------------------------- 1 | from skc_tile import * 2 | import math 3 | 4 | from skc_basic_approx import * 5 | from skc_operator import * 6 | from skc_utils import * 7 | import numpy 8 | #import cPickle 9 | import time 10 | import random 11 | 12 | PI = math.pi 13 | PI_HALF = math.pi / 2 14 | 15 | ur_tile = Tile_SU2(psi = PI_HALF, theta = PI_HALF, phi = PI, width = PI) 16 | 17 | # Global phase factor funniness 18 | H = H.scale(-1j) 19 | T = T.scale(numpy.exp(-1j*math.pi / 8)) 20 | T_inv = T_inv.scale(numpy.exp(1j*math.pi / 8)) 21 | 22 | iset = [H, T, T_inv] 23 | 24 | # Do it! 25 | basic_approxes = [] 26 | 27 | begin_time = time.time() 28 | 29 | # Collect all basic approxes for sequences of length 1 up to length l_0 30 | for i in range(7): 31 | i_approxes = gen_basic_approx(iset, i+1) 32 | for j in i_approxes: 33 | ur_tile.insert(j) 34 | basic_approxes.extend(i_approxes) 35 | #ur_tile.print_string(0) 36 | print "Number of basic approximations so far: " + str(len(basic_approxes)) 37 | 38 | gen_time = time.time() - begin_time 39 | print "Generation time: " + str(gen_time) 40 | 41 | # Compose a random unitary 42 | angle = random.random() * math.pi / 2 43 | nx = random.random() 44 | ny = random.random() 45 | nz = random.random() 46 | 47 | axis = Cart3DCoords(1,0,0) 48 | axis.x = nx 49 | axis.y = ny 50 | axis.z = nz 51 | axis.normalize() 52 | 53 | unitary = axis.to_unitary_rotation(angle) 54 | matrix = unitary.to_matrix() 55 | op = Operator("U", matrix) 56 | 57 | begin_time = time.time() 58 | 59 | (closest_approx, min_dist) = find_basic_approx(basic_approxes, op, trace_distance) 60 | 61 | find_time = time.time() - begin_time 62 | print "List search time: " + str(find_time) 63 | 64 | begin_time = time.time() 65 | 66 | (closest_approx, min_dist) = ur_tile.find_closest_point(op, trace_distance) 67 | 68 | find_time = time.time() - begin_time 69 | print "Tree search time: " + str(find_time) -------------------------------------------------------------------------------- /tests/test_trig.py: -------------------------------------------------------------------------------- 1 | from skc.utils import * 2 | from skc.trig import * 3 | 4 | import math 5 | import unittest 6 | import random 7 | 8 | class TestTrig(unittest.TestCase): 9 | 10 | # Test case 1 for recovering angle between 0 and pi/2 11 | def test_recover_angle_1(self): 12 | angle = (random.random() * PI_HALF) 13 | cos_phi = math.cos(angle) 14 | sin_phi = math.sin(angle) 15 | angle2 = recover_angle(cos_phi, sin_phi) 16 | assert_approx_equals(angle, angle2) 17 | 18 | # Test case 2 for recovering angle between pi/2 and pi 19 | def test_recover_angle_2(self): 20 | angle = (random.random() * PI_HALF) + PI_HALF 21 | cos_phi = math.cos(angle) 22 | sin_phi = math.sin(angle) 23 | angle2 = recover_angle(cos_phi, sin_phi) 24 | assert_approx_equals(angle, angle2) 25 | 26 | # Test case 3 for recovering angle between pi and 3pi/2 27 | def test_recover_angle_3(self): 28 | angle = (random.random() * PI_HALF) + PI 29 | cos_phi = math.cos(angle) 30 | sin_phi = math.sin(angle) 31 | angle2 = recover_angle(cos_phi, sin_phi) 32 | assert_approx_equals(angle, angle2) 33 | 34 | # Test case 4 for recovering angle between 3pi/2 and 2pi 35 | def test_recover_angle_4(self): 36 | angle = (random.random() * PI_HALF) + THREE_PI_HALF 37 | cos_phi = math.cos(angle) 38 | sin_phi = math.sin(angle) 39 | angle2 = recover_angle(cos_phi, sin_phi) 40 | assert_approx_equals(angle, angle2) 41 | 42 | ############################################################################## 43 | def get_suite(): 44 | suite = unittest.TestSuite() 45 | loader = unittest.TestLoader() 46 | suite1 = loader.loadTestsFromTestCase(TestTrig) 47 | suite.addTest(suite1) 48 | return suite 49 | 50 | ############################################################################## 51 | if (__name__ == '__main__'): 52 | suite = get_suite() 53 | unittest.TextTestRunner(verbosity=3).run(suite) 54 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from skc.operator import * 2 | from skc.utils import * 3 | 4 | import math 5 | import unittest 6 | 7 | class TestUtils(unittest.TestCase): 8 | 9 | # Test that fowler_distance is independent of a global phase 10 | def test_fowler_distance(self): 11 | dist = fowler_distance(H.matrix, H.matrix) 12 | assert_approx_equals_tolerance(dist, 0, TOLERANCE9) 13 | 14 | # Shift by a global phase 15 | H2 = numpy.exp(1.0j*math.pi / 2) * H.matrix 16 | 17 | dist = fowler_distance(H2, H.matrix) 18 | assert_approx_equals_tolerance(dist, 0, TOLERANCE9) 19 | 20 | def test_n_from_epsilon(self): 21 | c_approx = 4*math.sqrt(2) 22 | eps_0 = 1.0 / 33.0 23 | n = n_from_eps(eps=0.0001, c_approx=c_approx, eps_0=eps_0) 24 | msg = "level of recursion was: " + str(n) + " but should be 13" 25 | self.assertEqual(n, 13, msg) 26 | 27 | def test_direct_sum(self): 28 | A = matrix_direct_sum(numpy.matrix(1), numpy.matrix(2)) 29 | B = matrixify([[1,0],[0,2]]) 30 | assert_matrices_approx_equal(A, B, trace_distance) 31 | 32 | ############################################################################## 33 | def get_suite(): 34 | suite = unittest.TestSuite() 35 | loader = unittest.TestLoader() 36 | suite1 = loader.loadTestsFromTestCase(TestUtils) 37 | suite.addTest(suite1) 38 | return suite 39 | 40 | ############################################################################## 41 | if (__name__ == '__main__'): 42 | suite = get_suite() 43 | unittest.TextTestRunner(verbosity=3).run(suite) 44 | --------------------------------------------------------------------------------