├── LICENSE ├── README.rst ├── data_example_1 ├── A.npy └── b.npy ├── data_example_2 ├── SnP500_correlation.npy └── SnP500_returns.npy ├── data_example_3 └── A_movies_small.npy └── quantum_inspired.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Quantum-inspired algorithms in practice 2 | ############################################## 3 | 4 | Quantum-inspired algorithms to solve systems of linear equations 5 | and to implement recommendation systems 6 | 7 | The repository contains all source code used to generate results 8 | presented in `"Quantum-inspired algorithms in practice" `_. 9 | 10 | Contents 11 | ======== 12 | 13 | * ``quantum_inspired.py``: a Python module containing all functions composing the 14 | quantum-inspired algorithm. It contains three driver subroutines 15 | to use the implemented algorithms for the following applications: 16 | i) solving a system of linear equations Ax = b, 17 | ii) portfolio optimization, 18 | iii) recommendation systems. 19 | 20 | Usage and examples 21 | ================== 22 | 23 | Below we describe usage of the module to tackle these applications. 24 | First thing to do is to import the module. All input data required to 25 | run these examples have been included in the repository. 26 | 27 | 1. Solving a system linear of equations Ax = b. 28 | 29 | .. code-block:: python 30 | 31 | import quantum_inspired as qi 32 | import numpy as np 33 | 34 | # load a low-rank random matrix A with dimension 500 x 250 35 | A = np.load('A.npy') 36 | # load b vector (500 x 1) defining linear system Ax=b 37 | b = np.load('b.npy') 38 | # rank of matrix A 39 | rank = 3 40 | # Input parameters for the quantum inspired algorithm 41 | r = 200 42 | c = 200 43 | Nsamples = 50 44 | NcompX = 50 45 | sampled_comp, x = qi.linear_eqs(A, b, r, c, rank, Nsamples, NcompX) 46 | 47 | Args: 48 | 49 | * ``A``: In general, a rectangular matrix 50 | * ``b``: right-hand-side vector b 51 | * ``r``: number of sampled rows from matrix A 52 | * ``c``: number of sampled columns from matrix A 53 | * ``rank``: rank of matrix A 54 | * ``Nsamples``: number of stochastic samples performed to estimate coefficients ``lambda_l`` 55 | * ``NcompX``: number of entries to be sampled from the solution vector ``x_tilde`` 56 | 57 | Returns: 58 | Tuple containing arrays with the ``NcompX`` sampled entries and corresponding components of 59 | the solution vector ``x_tilde``. 60 | 61 | 62 | 2. Portfolio optimization. 63 | 64 | .. code-block:: python 65 | 66 | import quantum_inspired as qi 67 | import numpy as np 68 | 69 | # Reading the correlation matrix 70 | corr_mat = np.load('SnP500_correlation.npy') 71 | # Reading vector of historical returns 72 | hist_returns = np.load('SnP500_returns.npy') 73 | 74 | mu = np.mean(hist_returns[:]) 75 | 76 | n_assets = len(hist_returns[:]) 77 | m_rows = n_assets + 1 78 | n_cols = n_assets + 1 79 | 80 | A = np.zeros((m_rows, n_cols)) 81 | # Building the matrix A 82 | # In this case, matrix A is a block squared matrix of dimension 475 x 475 83 | # composed by the vector of historical returns r and correlation matrix \Sigma 84 | # of 474 assets comprised in the S&P 500 stock index. 85 | A[0, 0] = 0 86 | A[0, 1:n_cols] = hist_returns[:] 87 | A[1:m_rows, 0] = hist_returns[:] 88 | 89 | A[1:m_rows, 1:n_cols] = corr_mat[:, :] 90 | 91 | # b is a vector [\mu, \vec 0] with \mu being the expected return 92 | b = np.zeros(m_rows) 93 | b[0] = mu 94 | 95 | # This defines a portfolio optimization problem Ax = b 96 | # where x = [\nu, \vec{\omega}] and \vec{\omega} is the 97 | # portfolio allocation vector 98 | 99 | # low-rank approximation of matrix A 100 | rank = 5 101 | # Input parameters for the quantum inspired algorithm 102 | r = 340 103 | c = 340 104 | Nsamples = 10 105 | NcompX = 10 106 | 107 | # Notice that this function receives "mu" instead of the whole vector "b" 108 | # as the general coefficient reduces to the inner product . 109 | # The latter allow us to reduce significantly the number of stochastic samples performed 110 | # to estimate "lambdas[0:rank]". 111 | sampled_comp, x = qi.linear_eqs_portopt(A, mu, r, c, rank, Nsamples, NcompX) 112 | 113 | Args: 114 | 115 | * ``A``: In general, a rectangular matrix 116 | * ``b``: right-hand-side vector b 117 | * ``r``: number of sampled rows from matrix A 118 | * ``c``: number of sampled columns from matrix A 119 | * ``rank``: rank of matrix A 120 | * ``Nsamples``: number of stochastic samples performed to estimate coefficients ``lambda_l`` 121 | * ``NcompX``: number of entries to be sampled from the solution vector ``x_tilde`` 122 | 123 | Returns: 124 | Tuple containing arrays with the ``NcompX`` sampled entries and corresponding components of 125 | the solution vector ``x_tilde``. 126 | 127 | 3. Recommendation system. 128 | 129 | .. code-block:: python 130 | 131 | import quantum_inspired as qi 132 | import numpy as np 133 | 134 | # load a preference matrix A of dimension m x n encoding the rates 135 | # provided by m = 611 users for n = 9724 movies 136 | A = np.load('A_movies_small.npy') 137 | 138 | # In this example we want to reconstruct the full row of matrix A corresponding 139 | # to a specific user (416 in this case) and use highest components of the 140 | # reconstructed row vector to recommend new movies 141 | user = 416 142 | 143 | # low-rank approximation 144 | rank = 10 145 | # Input parameters for the quantum inspired algorithm 146 | r = 450 147 | c = 4500 148 | Nsamples = 10 149 | NcompX = 10 150 | sampled_comp, x = qi.recomm_syst(A, user, r, c, rank, Nsamples, NcompX) 151 | 152 | Args: 153 | 154 | * ``A``: preference matrix 155 | * ``user``: row index of a specific user in the preference matrix A 156 | * ``r``: number of sampled rows from matrix A 157 | * ``c``: number of sampled columns from matrix A 158 | * ``rank``: rank of matrix A 159 | * ``Nsamples``: number of stochastic samples performed to estimate coefficients ``lambda_l`` 160 | * ``NcompX``: number of entries to be sampled from the solution vector ``A[user, :]`` 161 | 162 | Returns: 163 | Tuple containing arrays with the ``NcompX`` sampled entries and corresponding elements of 164 | the row vector ``A[user, :]``. 165 | 166 | Requirements 167 | ============ 168 | 169 | Python 170 | 171 | Authors 172 | ======= 173 | 174 | Juan Miguel Arrazola, Alain Delgado, Bhaskar Roy Bardhan, Seth Lloyd 175 | 176 | If you are doing any research using this source code, please cite the following paper: 177 | 178 | Juan Miguel Arrazola, Alain Delgado, Bhaskar Roy Bardhan, Seth Lloyd. 179 | Quantum-inspired algorithms in practice. arXiv, 2019. `arXiv:1905.10415 `_ 180 | 181 | License 182 | ======= 183 | 184 | This source code is free and open source, released under the Apache License, Version 2.0. 185 | -------------------------------------------------------------------------------- /data_example_1/A.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XanaduAI/quantum-inspired-algorithms/39c6f70124733cb975a4163cf0bcc8cd05234c73/data_example_1/A.npy -------------------------------------------------------------------------------- /data_example_1/b.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XanaduAI/quantum-inspired-algorithms/39c6f70124733cb975a4163cf0bcc8cd05234c73/data_example_1/b.npy -------------------------------------------------------------------------------- /data_example_2/SnP500_correlation.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XanaduAI/quantum-inspired-algorithms/39c6f70124733cb975a4163cf0bcc8cd05234c73/data_example_2/SnP500_correlation.npy -------------------------------------------------------------------------------- /data_example_2/SnP500_returns.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XanaduAI/quantum-inspired-algorithms/39c6f70124733cb975a4163cf0bcc8cd05234c73/data_example_2/SnP500_returns.npy -------------------------------------------------------------------------------- /data_example_3/A_movies_small.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XanaduAI/quantum-inspired-algorithms/39c6f70124733cb975a4163cf0bcc8cd05234c73/data_example_3/A_movies_small.npy -------------------------------------------------------------------------------- /quantum_inspired.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Xanadu Quantum Technologies Inc. 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import numpy as np 16 | from numpy import linalg as la 17 | import time 18 | import os 19 | 20 | 21 | def ls_probs(m, n, A): 22 | 23 | r"""Function generating the length-squared (LS) probability distributions for sampling matrix A. 24 | 25 | Args: 26 | m (int): number of rows of matrix A 27 | n (int): row n of columns of matrix A 28 | A (array[complex]): most general case is a rectangular complex matrix 29 | 30 | Returns: 31 | tuple: Tuple containing the row-norms, LS probability distributions for rows and columns, 32 | and Frobenius norm. 33 | """ 34 | 35 | # populates array with the row-norms squared of matrix A 36 | row_norms = np.zeros(m) 37 | for i in range(m): 38 | row_norms[i] = np.abs(la.norm(A[i, :]))**2 39 | 40 | # Frobenius norm of A 41 | A_Frobenius = np.sqrt(np.sum(row_norms)) 42 | 43 | LS_prob_rows = np.zeros(m) 44 | 45 | # normalized length-square row probability distribution 46 | for i in range(m): 47 | LS_prob_rows[i] = row_norms[i] / A_Frobenius**2 48 | 49 | LS_prob_columns = np.zeros((m, n)) 50 | 51 | # populates array with length-square column probability distributions 52 | # LS_prob_columns[i]: LS probability distribution for selecting columns from row A[i] 53 | for i in range(m): 54 | LS_prob_columns[i, :] = [np.abs(k)**2 / row_norms[i] for k in A[i, :]] 55 | 56 | return row_norms, LS_prob_rows, LS_prob_columns, A_Frobenius 57 | 58 | 59 | def sample_C(A, m, n, r, c, row_norms, LS_prob_rows, LS_prob_columns, A_Frobenius): 60 | 61 | r"""Function used to generate matrix C by performing LS sampling of rows and columns of matrix A. 62 | 63 | Args: 64 | A (array[complex]): rectangular, in general, complex matrix 65 | m (int): number of rows of matrix A 66 | n (int): number of columns of matrix A 67 | r (int): number of sampled rows 68 | c (int): number of sampled columns 69 | row_norms (array[float]): norm of the rows of matrix A 70 | LS_prob_rows (array[float]): row LS probability distribution of matrix A 71 | LS_prob_columns (array[float]): column LS probability distribution of matrix A 72 | A_Frobenius (float): Frobenius norm of matrix A 73 | 74 | Returns: 75 | tuple: Tuple containing the singular values (sigma), left- (w) and right-singular vectors (vh) of matrix C, 76 | the sampled rows (rows), the column LS prob. distribution (LS_prob_columns_R) of matrix R and split running 77 | times for the FKV algorithm. 78 | """ 79 | 80 | tic = time.time() 81 | # sample row indices from row length_square distribution 82 | rows = np.random.choice(m, r, replace=True, p=LS_prob_rows) 83 | 84 | columns = np.zeros(c, dtype=int) 85 | # sample column indices 86 | for j in range(c): 87 | # sample row index uniformly at random 88 | i = np.random.choice(rows, replace=True) 89 | # sample column from length-square distribution of row A[i] 90 | columns[j] = np.random.choice(n, 1, p=LS_prob_columns[i]) 91 | 92 | toc = time.time() 93 | rt_sampling_C = toc - tic 94 | 95 | # building the lenght-squared distribution to sample columns from matrix R 96 | R_row = np.zeros(n) 97 | LS_prob_columns_R = np.zeros((r, n)) 98 | 99 | for s in range(r): 100 | R_row[:] = A[rows[s], :] * A_Frobenius / (np.sqrt(r) * np.sqrt(row_norms[rows[s]])) 101 | R_row_norm = np.abs(la.norm(R_row[:]))**2 102 | LS_prob_columns_R[s, :] = [np.abs(k)**2 / R_row_norm for k in R_row[:]] 103 | 104 | tic = time.time() 105 | # creates empty array for R and C matrices. We treat R as r x c here, since we only need columns later 106 | R_C = np.zeros((r, c)) 107 | C = np.zeros((r, c)) 108 | 109 | # populates array for matrix R with the submatrix of A defined by sampled rows/columns 110 | for s in range(r): 111 | for t in range(c): 112 | R_C[s, t] = A[rows[s], columns[t]] 113 | 114 | # renormalize each row of R 115 | R_C[s,:] = R_C[s,:] * A_Frobenius / (np.sqrt(r) * np.sqrt(row_norms[rows[s]])) 116 | 117 | # creates empty array of column norms 118 | column_norms = np.zeros(c) 119 | 120 | # computes column Euclidean norms 121 | for t in range(c): 122 | for s in range(r): 123 | column_norms[t] += np.abs(R_C[s, t])**2 124 | 125 | # renormalize columns of C 126 | for t in range(c): 127 | C[:, t] = R_C[:, t] * (A_Frobenius / np.sqrt(column_norms[t])) / np.sqrt(c) 128 | 129 | toc = time.time() 130 | rt_building_C = toc - tic 131 | 132 | tic = time.time() 133 | # Computing the SVD of sampled C matrix 134 | w, sigma, vh = la.svd(C, full_matrices=False) 135 | 136 | toc = time.time() 137 | rt_svd_C = toc - tic 138 | 139 | return w, rows, sigma, vh, LS_prob_columns_R, rt_sampling_C, rt_building_C, rt_svd_C 140 | 141 | 142 | def sample_me_lsyst(A, b, m, n, samples, rank, r, w, rows, sigma, row_norms, LS_prob_rows, LS_prob_columns, A_Frobenius): 143 | 144 | r""" Function to estimate the coefficients :math: '\lambda_l = \langle v^l \vert A^\dagger \vert b \rangle' 145 | 146 | Args: 147 | A (array[complex]): rectangular, in general, complex matrix 148 | m (int): number of rows of matrix A 149 | n (int): number of columns of matrix A 150 | samples (int): number of stochastic samples performed to estimate :math: '\lambda_l' 151 | rank (int): rank of matrix A 152 | r (int): number of sampled rows from matrix A 153 | w (array[complex]): left-singular vectors of matrix C 154 | rows (array[int]): indices of the r sampled rows of matrix A 155 | sigma (array[float]): singular values of matrix C 156 | row_norms (array[float]): row norms of matrix A 157 | LS_prob_rows (array[float]): LS row probability distribution of matrix A 158 | LS_prob_columns (array[float]): LS column probability distribution of matrix A 159 | A_Frobenius (float): Frobenius norm of matrix A 160 | 161 | Returns: 162 | array[float]: Array containing the coefficients :math: '\lambda_l = \langle v^l \vert A^\dagger \vert b \rangle' 163 | """ 164 | 165 | # Number of independent estimates. We take the median of these as the final estimate 166 | reps = 10 167 | 168 | # creates empty array of matrix elements for l=1,2,.., k and many repetitions of the estimates 169 | matrix_elements = np.zeros((reps, rank)) 170 | 171 | for i in range(reps): 172 | 173 | # calculate matrix element for l=1,2,.., k 174 | for l in range(rank): 175 | 176 | # create empty array of sampled matrix elements 177 | X = np.zeros(samples) 178 | 179 | # sample matrix elements 180 | for k in range(samples): 181 | 182 | # sample row index from length-square distribution 183 | sample_i = np.random.choice(m, 1, replace=True, p=LS_prob_rows)[0] 184 | # sample column index from length-square distribution from previously sampled row 185 | sample_j = np.random.choice(n, 1, p=LS_prob_columns[sample_i])[0] 186 | 187 | # j-th entry of right singular vector of matrix R 188 | v_j = 0 189 | 190 | # calculates v_j 191 | for s in range(r): 192 | v_j += A[rows[s], sample_j] * w[s, l] / (np.sqrt(row_norms[rows[s]])) 193 | # print(v_j) 194 | v_j = v_j * A_Frobenius / (np.sqrt(r) * sigma[l]) 195 | 196 | # computes sampled matrix element 197 | X[k] = ((A_Frobenius ** 2 * b[sample_i]) / (A[sample_i, sample_j])) * v_j 198 | 199 | # assigns estimates for each l and repetition 200 | matrix_elements[i, l] = np.mean(X) 201 | 202 | # creates empty array of matrix elements 203 | lambdas = np.zeros(rank) 204 | 205 | # take median of all repeated estimates 206 | for l in range(rank): 207 | lambdas[l] = np.median(matrix_elements[:, l]) 208 | 209 | return lambdas 210 | 211 | 212 | def sample_me_rsys(A, user, n, samples, rank, r, w, rows, sigma, row_norms, LS_prob_columns, A_Frobenius): 213 | 214 | r""" Function to estimate the coefficients :math: '\lambda_l = \langle A_\mathrm{user}^\mathrm{T}, v^l \rangle' 215 | 216 | Args: 217 | A (array[complex]): rectangular, in general, complex matrix 218 | user (int): labels the row index of a specific user in the preference matrix A 219 | n (int): number of columns of matrix A 220 | samples (int): number of stochastic samples performed to estimate :math: '\lambda_l' 221 | rank (int): rank of matrix A 222 | r (int): number of sampled rows from matrix A 223 | w (array[complex]): left-singular vectors of matrix C 224 | rows (array[int]): indices of the r sampled rows of matrix A 225 | sigma (array[float]): singular values of matrix C 226 | row_norms (array[float]): row norms of matrix A 227 | LS_prob_columns (array[float]): LS column probability distribution of matrix A 228 | A_Frobenius (float): Frobenius norm of matrix A 229 | 230 | Returns: 231 | array[float]: Array containing the coefficients :math: '\lambda_l = \langle A_\mathrm{user}^\mathrm{T}, v^l \rangle' 232 | """ 233 | 234 | # Number of independent estimates. We take the median of these as the final estimate 235 | reps = 10 236 | 237 | # creates empty array of the coefficients lambda for l=1,2,.., k and many repetitions of the estimates 238 | coefficients = np.zeros((reps, rank)) 239 | 240 | for i in range(reps): 241 | 242 | # calculate matrix element for l=1,2,..,k 243 | for l in range(rank): 244 | 245 | # create empty array of sampled matrix elements 246 | X = np.zeros(samples) 247 | 248 | # sample matrix elements 249 | for k in range(samples): 250 | 251 | # sample column index from length-square distribution from previously sampled row 252 | sample_j = np.random.choice(n, 1, p=LS_prob_columns[user])[0] 253 | # j-th entry of right singular vector of matrix R 254 | v_j = 0 255 | 256 | # calculates v_j 257 | for s in range(r): 258 | v_j += A[rows[s], sample_j] * w[s, l] / (np.sqrt(row_norms[rows[s]])) 259 | # print(v_j) 260 | v_j = v_j * A_Frobenius / (np.sqrt(r) * sigma[l]) 261 | 262 | # computes sampled matrix element 263 | X[k] = (row_norms[user]*v_j) / (A[user, sample_j]) 264 | 265 | # assigns estimates for each l and repetition 266 | coefficients[i, l] = np.mean(X) 267 | 268 | # creates empty array of coefficients 269 | lambdas = np.zeros(rank) 270 | 271 | # take median of all repeated estimates 272 | for l in range(rank): 273 | lambdas[l] = np.median(coefficients[:, l]) 274 | 275 | return lambdas 276 | 277 | 278 | def sample_from_x(A, r, n, rows, row_norms, LS_prob_columns_R, A_Frobenius, w_vector, w_norm): 279 | 280 | r""" Function to perform LS sampling of the solution vector :math: '\bm{x}' 281 | 282 | Args: 283 | A (array[complex]): rectangular, in general, complex matrix 284 | r (int): number of sampled rows from matrix A 285 | n (int): number of columns of matrix A 286 | rows (array[int]): indices of the r sampled rows of matrix A 287 | row_norms (array[float]): row norms of matrix A 288 | LS_prob_columns_R (array[float]): LS column prob. distribution of matrix R 289 | A_Frobenius (float): Frobenius norm of matrix A 290 | w_vector (array[float]): See paper for different definitions 291 | w_norm (float): norm of vector :math: '\omega' 292 | 293 | Returns: 294 | tuple: Tuple with the index of the sampled component and the number of rejected samples 295 | """ 296 | 297 | keep_going = True 298 | out_j = 0 299 | counter = 0 300 | while keep_going: 301 | 302 | counter += 1 303 | # sample row index uniformly at random 304 | i_sample = np.random.choice(r) 305 | 306 | # sample column index from length-square distribution of corresponding row 307 | j_sample = np.random.choice(n, 1, p=LS_prob_columns_R[i_sample])[0] 308 | 309 | # column j_sample of matrix R 310 | R_j = np.zeros(r) 311 | 312 | # compute entries of R_j 313 | for s in range(r): 314 | R_j[s] = A[rows[s], j_sample] / np.sqrt(row_norms[rows[s]]) 315 | R_j = (A_Frobenius/np.sqrt(r)) * R_j 316 | 317 | # norm of column vector R_j 318 | R_j_norm = la.norm(R_j) 319 | # inner product of R_j and w 320 | Rw_dot = np.dot(R_j, w_vector) 321 | 322 | # probability to select j_sample as output 323 | prob = (Rw_dot / (w_norm * R_j_norm))**2 324 | 325 | # determine if we output j_sample given above probability 326 | coin = np.random.binomial(1, prob) 327 | if coin == 1: 328 | out_j = j_sample 329 | # if we get heads from coin, then stop while loop 330 | keep_going = False 331 | 332 | return int(out_j), counter 333 | 334 | 335 | def vl_vector(l, A, r, w, rows, sigma, row_norms, A_Frobenius): 336 | 337 | r""" Function to reconstruct right-singular vector of matrix A 338 | 339 | Args: 340 | l (int): singular vector index 341 | A (array[complex]): rectangular, in general, complex matrix 342 | r (int): number of sampled rows from matrix A 343 | w (array[complex]): left-singular vectors of matrix C 344 | rows (array[int]): indices of the r sampled rows of matrix A 345 | row_norms (array[float]): row norms of matrix A 346 | A_Frobenius (float): Frobenius norm of matrix A 347 | 348 | Returns: 349 | array[float]: reconstructed right-singular vector 350 | """ 351 | 352 | n = len(A[1, :]) 353 | v_approx = np.zeros(n) 354 | # building approximated v^l vector 355 | factor = A_Frobenius / ( np.sqrt(r) * sigma[l] ) 356 | for s in range(r): 357 | v_approx[:] += ( A[rows[s], :] / np.sqrt(row_norms[rows[s]]) ) * w[s, l] 358 | v_approx[:] = v_approx[:] * factor 359 | 360 | return v_approx 361 | 362 | 363 | def uvl_vector(l, A, r, w, rows, sigma, row_norms, A_Frobenius): 364 | 365 | r""" Function to reconstruct right-singular vector of matrix A 366 | 367 | Args: 368 | l (int): singular vector index 369 | A (array[complex]): rectangular, in general, complex matrix 370 | r (int): number of sampled rows from matrix A 371 | w (array[complex]): left-singular vectors of matrix C 372 | rows (array[int]): indices of the r sampled rows of matrix A 373 | row_norms (array[float]): row norms of matrix A 374 | A_Frobenius (float): Frobenius norm of matrix A 375 | 376 | Returns: 377 | tuple: Tuple with arrays containing approximated singular vectors :math: '\bm{u}^l, \bm{v}^l' 378 | """ 379 | 380 | m, n = A.shape 381 | u_approx = np.zeros(m) 382 | v_approx = np.zeros(n) 383 | # building approximated v^l vector 384 | factor = A_Frobenius / ( np.sqrt(r) * sigma[l] ) 385 | for s in range(r): 386 | v_approx[:] += ( A[rows[s], :] / np.sqrt(row_norms[rows[s]]) ) * w[s, l] 387 | v_approx[:] = v_approx[:] * factor 388 | 389 | u_approx = (A @ v_approx) / sigma[l] 390 | 391 | return u_approx, v_approx 392 | 393 | 394 | # SUBPROGRAM TO COMPUTE APPROXIMATED SOLUTIONS \TILDE X 395 | def approx_solution(A, rank, r, w, rows, sigma, row_norms, A_Frobenius, lambdas, comp): 396 | 397 | r""" Function to compute the approximated value for a specific entry of the solution vector 398 | :math: '\widetilde{x}_\mathrm{comp}' for the system of linear equations :math: 'A \bm{x} = b' 399 | 400 | Args: 401 | A (array[complex]): rectangular, in general, complex matrix 402 | rank (int): rank of matrix A 403 | r (int): number of sampled rows from matrix A 404 | w (array[complex]): left-singular vectors of matrix C 405 | sigma (array[float]): singular values of matrix C 406 | row_norms (array[float]): row norms of matrix A 407 | A_Frobenius (float): Frobenius norm of matrix A 408 | lambdas (array[float]): coefficients :math: '\lambda_l = \langle v^l \vert A^\dagger \vert b \rangle' 409 | comp (int): entry of the solution vector to be evaluated 410 | 411 | Returns: 412 | float: component of the solution vector :math: '\widetilde{x}_\mathrm{comp}' 413 | """ 414 | 415 | approx_value = 0 416 | for l in range(rank): 417 | 418 | # building the component "comp" of vector v^l 419 | v_comp = 0 420 | for s in range(r): 421 | v_comp += A[rows[s], comp] * w[s, l] / np.sqrt( row_norms[ rows[s] ] ) 422 | v_comp = v_comp * A_Frobenius / (np.sqrt(r) * sigma[l]) 423 | 424 | # computing the approximated value for x (\tilde x) 425 | approx_value += v_comp * lambdas[l] / sigma[l]**2 426 | 427 | return approx_value 428 | 429 | 430 | def approx_solution_rsys(A, rank, r, w, rows, sigma, row_norms, A_Frobenius, lambdas, comp): 431 | 432 | r""" Function to compute the matrix element :math: 'A_{\mathrm{user}, \mathrm{comp}}' of the preference matrix A 433 | 434 | Args: 435 | A (array[complex]): rectangular, in general, complex matrix 436 | rank (int): rank of matrix A 437 | r (int): number of sampled rows from matrix A 438 | sigma (array[float]): singular values of matrix C 439 | row_norms (array[float]): row norms of matrix A 440 | A_Frobenius (float): Frobenius norm of matrix A 441 | lambdas (array[float]): coefficients :math: '\lambda_l = \langle A_\mathrm{user}^\mathrm{T}, v^l \rangle' 442 | comp (int): entry of the solution vector to be evaluated 443 | 444 | Returns: 445 | float: the element :math: 'A_{\mathrm{user}, \mathrm{comp}}' 446 | """ 447 | 448 | approx_value = 0 449 | for l in range(rank): 450 | 451 | # building the component "comp" of vector v^l 452 | v_comp = 0 453 | for s in range(r): 454 | v_comp += A[rows[s], comp] * w[s, l] / np.sqrt( row_norms[ rows[s] ] ) 455 | v_comp = v_comp * A_Frobenius / (np.sqrt(r) * sigma[l]) 456 | 457 | # computing the approximated value for x (\tilde x) 458 | approx_value += v_comp * lambdas[l] 459 | 460 | return approx_value 461 | 462 | 463 | def print_output(r, c, rank, sigma, ul_approx, vl_approx, Nsamples, lambdas, NcompX, sampled_comp, x_tilde, 464 | rt_ls_prob, rt_sampling_C, rt_building_C, rt_svd_C, rt_sampling_me, rt_sampling_sol): 465 | 466 | r""" Function printing out numerical results and running times 467 | 468 | Args: 469 | r (int): number of sampled rows from matrix A 470 | c (int): number of sampled columns from matrix A 471 | rank (int): rank of matrix A 472 | sigma (array[float]): singular values of matrix C 473 | vl_approx (array[float]): reconstructed right-singular vectors of matrix A 474 | Nsamples (int): number of stochastic samples performed to estimate :math: '\lambda_l' 475 | lambdas (array[float]): coefficients :math: '\lambda_l' 476 | NcompX (int): number of entries to be sampled from the solution vector 477 | sampled_comp (array[int]): indices with sampled entries 478 | x_tilde (array[float]): stores NcompX values components of the vector solution 479 | rt_ls_prob (float): running time to compute LS prob. distributions 480 | rt_sampling_C (float): running time to sample 'r' rows and 'c' columns of matrix A 481 | rt_building_C (float): running time to build submatrix C 482 | rt_svd_C (float): running time to perform SVD of submatrix C 483 | rt_sampling_me (float): running time to sample all coeffients :math:'lambda_l' 484 | rt_sampling_sol (float): running time to sample entries from the vector solution 485 | """ 486 | 487 | filename = "timing_C_{}_x_{}_Nsamples_{}_rank_{}_NcompX_{}.out".format(r, c, Nsamples, rank, NcompX) 488 | 489 | with open(filename, 'w') as f: 490 | f.write("# r\t c\trt_ls_prob\trt_sampling_C\trt_building_C\trt_svd_C\trt_sampling_me\trt_sampling_sol \n") 491 | f.write(" {:4d} \t {:4d} \t {:6.4f} \t {:6.4f} \t {:6.4f} \t {:6.4f} \t {:6.4f} \t {:6.4f} \n".format( 492 | r, c, rt_ls_prob, rt_sampling_C, rt_building_C, rt_svd_C, rt_sampling_me, rt_sampling_sol)) 493 | 494 | # approximated singular values and right-vectors and coefficients lambda_l 495 | filename = "sigma_l_C_{}_x_{}_rank_{}.out".format(r, c, rank) 496 | 497 | with open(filename, 'w') as f: 498 | f.write("# l\t sigma_l \n") 499 | for l in range(rank): 500 | f.write("{:4d} \t {:20.10f} \n".format(l + 1, sigma[l])) 501 | np.save("v_l_" + str(l), vl_approx[:, l]) 502 | np.save("u_l_" + str(l), ul_approx[:, l]) 503 | 504 | # approximated coefficients lambda_l 505 | filename = "lambda_l_C_{}_x_{}_rank_{}_Nsamples_{}.out".format(r, c, rank, Nsamples) 506 | 507 | with open(filename, 'w') as f: 508 | f.write("# l\t lambda_l \n") 509 | for l in range(rank): 510 | f.write("{:4d} \t {:20.10f} \n".format(l + 1, lambdas[l])) 511 | 512 | # sampled components of the approximate vector solution 513 | 514 | filename = "x_vector_C_{}_x_{}_rank_{}_Nsamples_{}.out".format(r, c, rank, Nsamples) 515 | 516 | with open(filename, 'w') as f: 517 | f.write("# i \t comp[i] \t x[comp[i]] \n") 518 | for t in range(NcompX): 519 | f.write("{:4d} \t {:4d} \t {:12.8f} \n".format(t, sampled_comp[t] + 1, x_tilde[t])) 520 | 521 | return 522 | 523 | 524 | def linear_eqs(A, b, r, c, rank, Nsamples, NcompX): 525 | 526 | r""" Function to solve the the linear system of equations :math:'A \bm{x} = b' using the quantum-inspired 527 | algorithm 528 | 529 | Args: 530 | A (array[complex]): rectangular, in general, complex matrix 531 | b (array[float]): right-hand-side vector b 532 | r (int): number of sampled rows from matrix A 533 | c (int): number of sampled columns from matrix A 534 | rank (int): rank of matrix A 535 | Nsamples (int): number of stochastic samples performed to estimate :math: '\lambda_l' 536 | NcompX (int): number of entries to be sampled from the solution vector :math:'\bm{x}' 537 | 538 | Returns: 539 | tuple: Tuple containing arrays with the sampled entries and corresponding components of 540 | the solution vector :math: '\bm{x}' 541 | """ 542 | 543 | m_rows, n_cols = np.shape(A) 544 | 545 | # 1- Generating LS probability distributions to sample from matrix A 546 | tic = time.time() 547 | 548 | LS = ls_probs(m_rows, n_cols, A) 549 | 550 | toc = time.time() 551 | 552 | rt_ls_prob = toc - tic 553 | 554 | # 2- Building matrix C by sampling "r" rows and "c" columns from matrix A and computing SVD of matrix C 555 | svd_C = sample_C(A, m_rows, n_cols, r, c, *LS[0:4]) 556 | w = svd_C[0] 557 | sigma = svd_C[2] 558 | 559 | # Reconstruction of the right-singular vectors of matrix A 560 | ul_approx = np.zeros((m_rows, rank)) 561 | vl_approx = np.zeros((n_cols, rank)) 562 | for l in range(rank): 563 | ul_approx[:, l], vl_approx[:, l] = uvl_vector(l, A, r, w, svd_C[1], sigma, LS[0], LS[3]) 564 | 565 | # 3- Sampling of the matrix elements lambdas[0:rank] = 566 | tic = time.time() 567 | lambdas = sample_me_lsyst(A, b, m_rows, n_cols, Nsamples, rank, r, *svd_C[0:3], *LS[0:4]) 568 | toc = time.time() 569 | rt_sampling_me = toc - tic 570 | 571 | # 4- Sampling the vector solution 572 | tic = time.time() 573 | 574 | # computes vector w = sum_l lambda_l/sigma_l^3 * w_l 575 | w_vector = np.zeros(r) 576 | for l in range(rank): 577 | w_vector[:] += (lambdas[l] / sigma[l] ** 3) * w[:, l] 578 | 579 | w_norm = la.norm(w_vector) 580 | 581 | # create array to stored the sampled components 582 | sampled_comp = np.zeros(NcompX, dtype=np.uint32) 583 | n_of_rejected_samples = np.zeros(NcompX, dtype=np.uint32) 584 | x_tilde = np.zeros(NcompX) 585 | 586 | for t in range(NcompX): 587 | sampled_comp[t], n_of_rejected_samples[t] = \ 588 | sample_from_x(A, r, n_cols, svd_C[1], LS[0], svd_C[4], LS[3], w_vector, w_norm) 589 | 590 | toc = time.time() 591 | rt_sampling_sol = toc - tic 592 | 593 | for t in range(NcompX): 594 | x_tilde[t] = approx_solution(A, rank, r, w, svd_C[1], svd_C[2], 595 | LS[0], LS[3], lambdas, sampled_comp[t]) 596 | 597 | RT = [rt_ls_prob, *svd_C[5:8], rt_sampling_me, rt_sampling_sol] 598 | 599 | # 5- Printing output of the algorithm 600 | 601 | FKV = [r, c, rank, sigma, ul_approx, vl_approx] 602 | MC = [Nsamples, lambdas] 603 | RS = [NcompX, sampled_comp, x_tilde] 604 | RT = [rt_ls_prob, *svd_C[5:8], rt_sampling_me, rt_sampling_sol] 605 | 606 | print_output(*FKV, *MC, *RS, *RT) 607 | 608 | return sampled_comp, x_tilde 609 | 610 | 611 | def recomm_syst(A, user, r, c, rank, Nsamples, NcompX): 612 | 613 | r""" Function to compute missing entries of preference matrix row :math: 'A_{\mathrm{user}.}' for the user "user" 614 | 615 | Args: 616 | A (array[complex]): rectangular, in general, complex matrix 617 | user (int): labels the row index of a specific user in the preference matrix A 618 | r (int): number of sampled rows from matrix A 619 | c (int): number of sampled columns from matrix A 620 | rank (int): rank of matrix A 621 | Nsamples (int): number of stochastic samples performed to estimate :math: '\lambda_l' 622 | NcompX (int): number of entries to be sampled from the preference matrix row :math:'A_{\mathrm{user}.}' 623 | 624 | Returns: 625 | tuple: Tuple containing arrays with the sampled entries and corresponding elements of 626 | the preference matrix row :math:'A_{\mathrm{user}.}' 627 | """ 628 | 629 | m_rows, n_cols = np.shape(A) 630 | 631 | # 1- Generating LS probability distributions to sample from matrix A 632 | tic = time.time() 633 | 634 | LS = ls_probs(m_rows, n_cols, A) 635 | 636 | toc = time.time() 637 | 638 | rt_ls_prob = toc - tic 639 | 640 | # 2- Building matrix C by sampling "r" rows and "c" columns from matrix A and computing SVD of matrix C 641 | svd_C = sample_C(A, m_rows, n_cols, r, c, *LS[0:4]) 642 | w = svd_C[0] 643 | sigma = svd_C[2] 644 | 645 | # Reconstruction of the right-singular vectors of matrix A 646 | ul_approx = np.zeros((m_rows, rank)) 647 | vl_approx = np.zeros((n_cols, rank)) 648 | for l in range(rank): 649 | ul_approx[:, l], vl_approx[:, l] = uvl_vector(l, A, r, w, svd_C[1], sigma, LS[0], LS[3]) 650 | 651 | # 3- Sampling of the matrix elements lambdas[0:rank] = 652 | tic = time.time() 653 | 654 | lambdas = sample_me_rsys(A, user, n_cols, Nsamples, rank, r, *svd_C[0:3], LS[0], *LS[2:4] ) 655 | 656 | toc = time.time() 657 | rt_sampling_me = toc - tic 658 | 659 | # 4- Sampling the vector solution 660 | tic = time.time() 661 | 662 | # computes vector w = sum_l lambda_l/sigma_l * w_l 663 | w_vector = np.zeros(r) 664 | for l in range(rank): 665 | w_vector[:] += (lambdas[l] / sigma[l]) * w[:, l] 666 | 667 | w_norm = la.norm(w_vector) 668 | 669 | # create array to stored the sampled components 670 | sampled_comp = np.zeros(NcompX, dtype=np.uint32) 671 | n_of_rejected_samples = np.zeros(NcompX, dtype=np.uint32) 672 | x_tilde = np.zeros(NcompX) 673 | 674 | for t in range(NcompX): 675 | sampled_comp[t], n_of_rejected_samples[t] = \ 676 | sample_from_x(A, r, n_cols, svd_C[1], LS[0], svd_C[4], LS[3], w_vector, w_norm) 677 | 678 | toc = time.time() 679 | rt_sampling_sol = toc - tic 680 | 681 | for t in range(NcompX): 682 | x_tilde[t] = approx_solution_rsys(A, rank, r, w, svd_C[1], svd_C[2], 683 | LS[0], LS[3], lambdas, sampled_comp[t]) 684 | 685 | # 5- Printing out extensive information 686 | 687 | FKV = [r, c, rank, sigma, ul_approx, vl_approx] 688 | MC = [Nsamples, lambdas] 689 | RS = [NcompX, sampled_comp, x_tilde] 690 | RT = [rt_ls_prob, *svd_C[5:8], rt_sampling_me, rt_sampling_sol] 691 | 692 | print_output(*FKV, *MC, *RS, *RT) 693 | 694 | return sampled_comp, x_tilde 695 | 696 | 697 | def linear_eqs_portopt(A, mu, r, c, rank, Nsamples, NcompX): 698 | 699 | r""" Function to optimize the portfolio allocation vector for different assets for a given 700 | expected return 701 | 702 | Args: 703 | A (array[complex]): rectangular, in general, complex matrix 704 | mu (float): expected return 705 | r (int): number of sampled rows from matrix A 706 | c (int): number of sampled columns from matrix A 707 | rank (int): low-rank approximation of matrix A 708 | Nsamples (int): number of stochastic samples performed to estimate :math: '\lambda_l' 709 | NcompX (int): number of entries to be sampled from the portfolio allocation vector 710 | 711 | Returns: 712 | tuple: Tuple containing arrays with the sampled entries and corresponding components of 713 | the portfolio allocation vector 714 | """ 715 | 716 | m_rows, n_cols = np.shape(A) 717 | 718 | # 1- Generating LS probability distributions to sample from matrix A 719 | tic = time.time() 720 | 721 | LS = ls_probs(m_rows, n_cols, A) 722 | 723 | toc = time.time() 724 | 725 | rt_ls_prob = toc - tic 726 | 727 | # 2- Building matrix C by sampling "r" rows and "c" columns from matrix A and computing SVD of matrix C 728 | svd_C = sample_C(A, m_rows, n_cols, r, c, *LS[0:4]) 729 | w = svd_C[0] 730 | sigma = svd_C[2] 731 | 732 | # Reconstruction of the right-singular vectors of matrix A 733 | ul_approx = np.zeros((m_rows, rank)) 734 | vl_approx = np.zeros((n_cols, rank)) 735 | for l in range(rank): 736 | ul_approx[:, l], vl_approx[:, l] = uvl_vector(l, A, r, w, svd_C[1], sigma, LS[0], LS[3]) 737 | 738 | # 3- Sampling of the matrix elements lambdas[0:rank] = 739 | tic = time.time() 740 | lambdas = sample_me_rsys(A, 0, n_cols, Nsamples, rank, r, *svd_C[0:3], LS[0], *LS[2:4]) 741 | lambdas = mu*lambdas 742 | toc = time.time() 743 | rt_sampling_me = toc - tic 744 | 745 | # 4- Sampling the vector solution 746 | tic = time.time() 747 | 748 | # computes vector w = sum_l lambda_l/sigma_l^3 * w_l 749 | w_vector = np.zeros(r) 750 | for l in range(rank): 751 | w_vector[:] += (lambdas[l] / sigma[l] ** 3) * w[:, l] 752 | 753 | w_norm = la.norm(w_vector) 754 | 755 | # create array to stored the sampled components 756 | sampled_comp = np.zeros(NcompX, dtype=np.uint32) 757 | n_of_rejected_samples = np.zeros(NcompX, dtype=np.uint32) 758 | x_tilde = np.zeros(NcompX) 759 | 760 | for t in range(NcompX): 761 | sampled_comp[t], n_of_rejected_samples[t] = \ 762 | sample_from_x(A, r, n_cols, svd_C[1], LS[0], svd_C[4], LS[3], w_vector, w_norm) 763 | 764 | toc = time.time() 765 | rt_sampling_sol = toc - tic 766 | 767 | for t in range(NcompX): 768 | x_tilde[t] = approx_solution(A, rank, r, w, svd_C[1], svd_C[2], 769 | LS[0], LS[3], lambdas, sampled_comp[t]) 770 | 771 | RT = [rt_ls_prob, *svd_C[5:8], rt_sampling_me, rt_sampling_sol] 772 | 773 | # 5- Printing out extensive information 774 | 775 | FKV = [r, c, rank, sigma, ul_approx, vl_approx] 776 | MC = [Nsamples, lambdas] 777 | RS = [NcompX, sampled_comp, x_tilde] 778 | RT = [rt_ls_prob, *svd_C[5:8], rt_sampling_me, rt_sampling_sol] 779 | 780 | print_output(*FKV, *MC, *RS, *RT) 781 | 782 | return sampled_comp, x_tilde 783 | 784 | 785 | def linear_eqs_fkv(A, b, r, c, rank): 786 | 787 | r""" Function to solve the the linear system of equations :math:'A \bm{x} = b' using FKV algorithm 788 | and a direct calculation of the coefficients :math: '\lambda_l' and solution vector :math: '\bm{x}' 789 | 790 | Args: 791 | A (array[complex]): rectangular, in general, complex matrix 792 | b (array[float]): right-hand-side vector b 793 | r (int): number of sampled rows from matrix A 794 | c (int): number of sampled columns from matrix A 795 | rank (int): rank of matrix A 796 | 797 | Returns: 798 | array[float]: array containing the components of the solution vector :math: '\bm{x}' 799 | """ 800 | 801 | m_rows, n_cols = np.shape(A) 802 | 803 | # 1- Generating LS probability distributions used to sample rows and columns indices of matrix A 804 | tic = time.time() 805 | 806 | LS = ls_probs(m_rows, n_cols, A) 807 | 808 | toc = time.time() 809 | 810 | rt_ls_prob = toc - tic 811 | 812 | # 2- Building matrix C by sampling "r" rows and "c" columns from matrix A and computing SVD of matrix C 813 | svd_C = sample_C(A, m_rows, n_cols, r, c, *LS[0:4]) 814 | w = svd_C[0] 815 | sigma = svd_C[2] 816 | rt_sampling_C = svd_C[5] 817 | rt_building_C = svd_C[6] 818 | rt_svd_C = svd_C[7] 819 | 820 | # Reconstruction of the right-singular vectors of matrix A 821 | ul_approx = np.zeros((m_rows, rank)) 822 | vl_approx = np.zeros((n_cols, rank)) 823 | for l in range(rank): 824 | ul_approx[:, l], vl_approx[:, l] = uvl_vector(l, A, r, w, svd_C[1], sigma, LS[0], LS[3]) 825 | 826 | # 3- Direct calculation of matrix elements lambdas[rank] = 827 | tic = time.time() 828 | lambdas = np.zeros(rank) 829 | for l in range(rank): 830 | lambdas[l] = np.transpose(vl_approx[:, l]) @ np.transpose(A) @ b 831 | toc = time.time() 832 | rt_dcalc_lambdas = toc - tic 833 | 834 | # 4- Direct calculation of the approximate vector solution \tilde X 835 | tic = time.time() 836 | x_tilde = np.zeros(n_cols) 837 | for l in range(rank): 838 | x_tilde[:] += (lambdas[l]/sigma[l]**2) * vl_approx[:, l] 839 | toc = time.time() 840 | rt_dcalc_x = toc - tic 841 | 842 | # 5- Printing out numerical results 843 | 844 | # timing information 845 | filename = "timing_C_{}_x_{}_rank_{}.out".format(r, c, rank) 846 | with open(filename,'w') as f: 847 | f.write("# r\t c \t rt_ls_prob \t rt_sampling_C \t rt_building_C \t rt_svd_C \t rt_dcalc_lambdas" 848 | "\t rt_dcalc_x \n") 849 | f.write(" {:4d} \t {:4d} \t {:6.4f} \t {:6.4f} \t {:6.4f} \t {:6.4f} \t {:6.4f} \t {:6.4f} \n" 850 | .format(r, c, rt_ls_prob, rt_sampling_C, rt_building_C, rt_svd_C, rt_dcalc_lambdas, rt_dcalc_x)) 851 | 852 | # Approximate singular values and right-singular vectors 853 | filename = "sigma_l_C_{}_x_{}_rank_{}.out".format(r, c, rank) 854 | with open(filename,'w') as f: 855 | f.write("# l \t sigma_l \n") 856 | for l in range(rank): 857 | f.write("{:4d} \t {:20.10f} \n" .format(l + 1, sigma[l])) 858 | np.save("v_l_" + str(l), vl_approx[:, l]) 859 | np.save("u_l_" + str(l), ul_approx[:, l]) 860 | 861 | # Coefficients lambda_l = 862 | filename = "lambda_l_C_{}_x_{}_rank_{}.out".format(r, c, rank) 863 | with open(filename, 'w') as f: 864 | f.write("# l \t lambda_l \n") 865 | for l in range(rank): 866 | f.write("{:4d} \t {:20.10f} \n" .format(l + 1, lambdas[l])) 867 | 868 | # Approximate vector solution 869 | filename = "x_vector_C_{}_x_{}_rank_{}.out".format(r, c, rank) 870 | with open(filename, 'w') as f: 871 | f.write("# i \t X[i] \n") 872 | for ii in range(n_cols): 873 | f.write(" {:i} \t {:20.10f} \n" .format(ii, x_tilde[ii])) 874 | 875 | return x_tilde 876 | --------------------------------------------------------------------------------