├── .gitignore ├── LICENSE ├── NeurIPS_24 ├── algorithms.py ├── main.py └── script.py ├── README.md ├── compress_rtp ├── __init__.py ├── compress_rtp_optimization.py └── utils │ ├── __init__.py │ ├── get_low_dim_basis.py │ ├── get_sparse_only.py │ └── get_sparse_plus_low_rank.py ├── examples ├── fluence_wavelets.ipynb ├── fluence_wavelets.py ├── matrix_spare_plus_low_rank.py ├── matrix_sparse_only.ipynb ├── matrix_sparse_only.py └── matrix_sparse_plus_low_rank.ipynb ├── images ├── Algorithm_RMR.png ├── CompressRTPLogo.png ├── CompressRTPLogo2.PNG ├── FluenceCompress.PNG ├── LowDimRT.png ├── RMR_NeurIPS_Paper.pdf ├── RMR_performance.PNG ├── RMR_vs_Naive.PNG ├── RMR_vs_Native.png ├── RMR_vs_Others.png ├── SLR.PNG ├── SPlusL_Lung_Benefits.png ├── SPlusL_singular_values.png └── Wavelet_Benefits.png └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /NeurIPS_24/algorithms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy 3 | import math 4 | 5 | def Naive(matrix, threshold): 6 | copy_matrix = matrix.copy() 7 | copy_matrix[np.abs(matrix) <= threshold] = 0 8 | copy_matrix = scipy.sparse.csr_matrix(copy_matrix) 9 | return copy_matrix 10 | 11 | def Naive_nonzeros(matrix, threshold): 12 | return np.sum(np.abs(matrix) > threshold) 13 | 14 | def AHK06(matrix, threshold): 15 | copy_matrix = matrix.copy() 16 | n, d = matrix.shape 17 | probs = np.random.random((n, d)) 18 | copy_matrix[np.abs(matrix) < threshold] = 0 19 | copy_matrix[probs < (np.abs(matrix) / threshold) * (np.abs(matrix) < threshold)] = threshold 20 | 21 | copy_matrix = scipy.sparse.csr_matrix(copy_matrix) 22 | return copy_matrix 23 | 24 | def AHK06_nonzeros(matrix, threshold): 25 | n, d = matrix.shape 26 | indices = np.abs(matrix) < threshold 27 | return n * d - np.sum(indices) + np.sum(matrix[indices] / threshold) 28 | 29 | def compute_row_distribution(matrix, s, delta, row_norms): 30 | m, n = matrix.shape 31 | z = row_norms / np.sum(row_norms) 32 | alpha, beta = math.sqrt(np.log((m + n) / delta) / s), np.log((m + n) / delta) / (3 * s) 33 | zeta = 1 34 | rou = (alpha * z / (2 * zeta) + ((alpha * z / (2 * zeta)) ** 2 + beta * z / zeta) ** (1 / 2)) ** 2 35 | sum = np.sum(rou) 36 | while np.abs(sum - 1) > 1e-5: 37 | zeta *= sum 38 | rou = (alpha * z / (2 * zeta) + ((alpha * z / (2 * zeta)) ** 2 + beta * z / zeta) ** (1 / 2)) ** 2 39 | sum = np.sum(rou) 40 | return rou 41 | 42 | def AKL13(matrix, s): 43 | matrix = matrix.T 44 | s = int(s) 45 | n, d = matrix.shape 46 | row_norms = np.linalg.norm(matrix, axis=1, ord=1) 47 | rou = compute_row_distribution(matrix, s, 0.1, row_norms) 48 | nonzero_indices = matrix.nonzero() 49 | data = matrix[nonzero_indices] 50 | row_norms[row_norms == 0] = 1 51 | probs_matrix = rou.reshape((n, 1)) * matrix / row_norms.reshape((n, 1)) 52 | probs = probs_matrix[nonzero_indices] 53 | probs /= np.sum(probs) 54 | indices = np.arange(len(data)) 55 | selected = np.random.choice(indices, s, p=probs, replace=True) 56 | result = np.zeros((n, d)) 57 | np.add.at(result, (nonzero_indices[0][selected], nonzero_indices[1][selected]), data[selected] / (probs[selected] * s)) 58 | result = result.T 59 | matrix = matrix.T 60 | result = scipy.sparse.csr_matrix(result) 61 | return result 62 | 63 | def AKL13_nonzeros(matrix, s): 64 | matrix = matrix.T 65 | s = int(s) 66 | n = matrix.shape[0] 67 | row_norms = np.linalg.norm(matrix, axis=1, ord=1) 68 | rou = compute_row_distribution(matrix, s, 0.1, row_norms) 69 | nonzero_indices = matrix.nonzero() 70 | data = matrix[nonzero_indices] 71 | row_norms[row_norms == 0] = 1 72 | probs_matrix = rou.reshape((n, 1)) * matrix / row_norms.reshape((n, 1)) 73 | probs = probs_matrix[nonzero_indices] 74 | probs /= np.sum(probs) 75 | indices = np.arange(len(data)) 76 | selected = np.random.choice(indices, s, p=probs, replace=True) 77 | matrix = matrix.T 78 | return len(np.unique(selected)) 79 | 80 | def row_operation(copy_row, threshold): 81 | argzero = np.argwhere((np.abs(copy_row) <= threshold) * (copy_row != 0)) 82 | argzero = argzero.reshape(len(argzero),) 83 | argzero_copy = copy_row[argzero] 84 | copy_row[argzero] = 0 85 | sum = np.sum(argzero_copy) 86 | if sum != 0: 87 | k = math.ceil(sum / threshold) 88 | 89 | indices = np.random.choice(argzero, k, p=argzero_copy/sum, replace=True) 90 | np.add.at(copy_row, indices, sum / k) 91 | 92 | def RMR(matrix, threshold): 93 | copy_matrix = matrix.copy() 94 | np.apply_along_axis(row_operation, 1, copy_matrix, threshold) 95 | copy_matrix = scipy.sparse.csr_matrix(copy_matrix) 96 | return copy_matrix 97 | 98 | def RMR_nonzeros(matrix, threshold): 99 | n, d = matrix.shape 100 | sum = 0 101 | for i in range(n): 102 | argzero = np.argwhere(np.abs(matrix[i, :]) <= threshold) 103 | sum2 = np.sum(np.abs(matrix[i, argzero])) 104 | if sum2 != 0: 105 | k = math.ceil(sum2 / threshold) 106 | sum += d - len(argzero) + np.sum(1 - (1 - np.abs(matrix[i, argzero]) / sum2) ** k) 107 | else: 108 | sum += d - len(argzero) 109 | return sum 110 | 111 | def DZ11(matrix, threshold): 112 | copy_matrix = matrix.copy() 113 | n, d = matrix.shape 114 | norm_fro = np.linalg.norm(matrix, ord="fro") 115 | copy_matrix[np.abs(matrix) <= threshold / (n + d)] = 0 116 | s = int(14 * (n + d) * np.log(np.sqrt(2) / 2 * (n + d)) * (norm_fro / threshold) ** 2) 117 | nonzero_indices = copy_matrix.nonzero() 118 | data = copy_matrix[nonzero_indices] 119 | probs_matrix = copy_matrix * copy_matrix 120 | probs = probs_matrix[nonzero_indices] 121 | probs /= np.sum(probs) 122 | indices = np.arange(len(data)) 123 | selected = np.random.choice(indices, s, p=probs, replace=True) 124 | result = np.zeros((n, d)) 125 | np.add.at(result, (nonzero_indices[0][selected], nonzero_indices[1][selected]), data[selected] / (probs[selected] * s)) 126 | result = scipy.sparse.csr_matrix(result) 127 | return result 128 | 129 | def DZ11_nonzeros(matrix, threshold): 130 | copy_matrix = matrix.copy() 131 | n, d = matrix.shape 132 | norm_fro = np.linalg.norm(matrix, ord="fro") 133 | copy_matrix[np.abs(matrix) <= threshold / (n + d)] = 0 134 | s = int(14 * (n + d) * np.log(np.sqrt(2) / 2 * (n + d)) * (norm_fro / threshold) ** 2) 135 | nonzero_indices = copy_matrix.nonzero() 136 | data = copy_matrix[nonzero_indices] 137 | probs_matrix = copy_matrix * copy_matrix 138 | probs = probs_matrix[nonzero_indices] 139 | probs /= np.sum(probs) 140 | indices = np.arange(len(data)) 141 | selected = np.random.choice(indices, s, p=probs, replace=True) 142 | return len(np.unique(selected)) 143 | 144 | def BKKS21(matrix, s): 145 | n, d = matrix.shape 146 | probs = np.random.random((n, d)) 147 | row_norms = np.linalg.norm(matrix, axis=1, ord=1) 148 | col_norms = np.linalg.norm(matrix, axis=0, ord=1) 149 | p1 = np.abs(matrix) / np.sum(np.abs(matrix)) 150 | p2 = np.abs(matrix) * (row_norms / np.sum(row_norms ** 2)).reshape(-1, 1) 151 | p3 = np.abs(matrix) * (col_norms / np.sum(col_norms ** 2)).reshape(1, -1) 152 | p = np.minimum(1, s * np.maximum(p1, np.maximum(p2, p3))) 153 | probs[p == 0] = 1 154 | p[p == 0] = 1 155 | result = (matrix / p) * (probs < p) 156 | 157 | result = scipy.sparse.csr_matrix(result) 158 | return result 159 | 160 | def BKKS21_nonzeros(matrix, s): 161 | row_norms = np.linalg.norm(matrix, axis=1, ord=1) 162 | col_norms = np.linalg.norm(matrix, axis=0, ord=1) 163 | p1 = np.abs(matrix) / np.sum(np.abs(matrix)) 164 | p2 = np.abs(matrix) * (row_norms / np.sum(row_norms ** 2)).reshape(-1, 1) 165 | p3 = np.abs(matrix) * (col_norms / np.sum(col_norms ** 2)).reshape(1, -1) 166 | p = np.minimum(1, s * np.maximum(p1, np.maximum(p2, p3))) 167 | return np.sum(p) -------------------------------------------------------------------------------- /NeurIPS_24/main.py: -------------------------------------------------------------------------------- 1 | import portpy.photon as pp 2 | import algorithms 3 | import numpy as np 4 | import math 5 | import matplotlib.pyplot as plt 6 | 7 | def objective_function_value(x): 8 | obj_funcs = opt_params['objective_functions'] if 'objective_functions' in opt_params else [] 9 | obj = 0 10 | for i in range(len(obj_funcs)): 11 | if obj_funcs[i]['type'] == 'quadratic-overdose': 12 | if obj_funcs[i]['structure_name'] in opt.my_plan.structures.get_structures(): 13 | struct = obj_funcs[i]['structure_name'] 14 | if len(inf_matrix_full.get_opt_voxels_idx(struct)) == 0: # check if there are any opt voxels for the structure 15 | continue 16 | dose_gy = opt.get_num(obj_funcs[i]['dose_gy']) / clinical_criteria.get_num_of_fractions() 17 | dO = np.maximum(A[inf_matrix_full.get_opt_voxels_idx(struct), :] @ x - dose_gy, 0) 18 | obj += (1 / len(inf_matrix_full.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * np.sum(dO ** 2)) 19 | elif obj_funcs[i]['type'] == 'quadratic-underdose': 20 | if obj_funcs[i]['structure_name'] in opt.my_plan.structures.get_structures(): 21 | struct = obj_funcs[i]['structure_name'] 22 | if len(inf_matrix_full.get_opt_voxels_idx(struct)) == 0: 23 | continue 24 | dose_gy = opt.get_num(obj_funcs[i]['dose_gy']) / clinical_criteria.get_num_of_fractions() 25 | dU = np.minimum(A[inf_matrix_full.get_opt_voxels_idx(struct), :] @ x - dose_gy, 0) 26 | obj += (1 / len(inf_matrix_full.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * np.sum(dU ** 2)) 27 | elif obj_funcs[i]['type'] == 'quadratic': 28 | if obj_funcs[i]['structure_name'] in opt.my_plan.structures.get_structures(): 29 | struct = obj_funcs[i]['structure_name'] 30 | if len(inf_matrix_full.get_opt_voxels_idx(struct)) == 0: 31 | continue 32 | obj += (1 / len(inf_matrix_full.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * np.sum((A[inf_matrix_full.get_opt_voxels_idx(struct), :] @ x) ** 2)) 33 | elif obj_funcs[i]['type'] == 'smoothness-quadratic': 34 | [Qx, Qy, num_rows, num_cols] = opt.get_smoothness_matrix(inf_matrix.beamlets_dict) 35 | smoothness_X_weight = 0.6 36 | smoothness_Y_weight = 0.4 37 | obj += obj_funcs[i]['weight'] * (smoothness_X_weight * (1 / num_cols) * np.sum((Qx @ x) ** 2) + 38 | smoothness_Y_weight * (1 / num_rows) * np.sum((Qy @ x) ** 2)) 39 | print("objective function value:", obj) 40 | 41 | def l2_norm(matrix): 42 | values, vectors = np.linalg.eig(np.transpose(matrix) @ matrix) 43 | return math.sqrt(np.max(np.abs(values))) 44 | 45 | if __name__ == '__main__': 46 | import argparse 47 | 48 | parser = argparse.ArgumentParser() 49 | 50 | parser.add_argument( 51 | '--method', type=str, choices=['Naive', 'AHK06', 'AKL13', 'DZ11', 'RMR'], help='The name of method.' 52 | ) 53 | parser.add_argument( 54 | '--patient', type=str, help='Patient\'s name' 55 | ) 56 | parser.add_argument( 57 | '--threshold', type=float, help='The threshold using for the input of algorithm.' 58 | ) 59 | parser.add_argument( 60 | '--solver', type=str, default='SCS', help='The name of solver for solving the optimization problem' 61 | ) 62 | 63 | args = parser.parse_args() 64 | # Use PortPy DataExplorer class to explore PortPy data 65 | data = pp.DataExplorer(data_dir='') 66 | # Pick a patient 67 | data.patient_id = args.patient 68 | # Load ct, structure set, beams for the above patient using CT, Structures, and Beams classes 69 | ct = pp.CT(data) 70 | structs = pp.Structures(data) 71 | beams = pp.Beams(data) 72 | # Pick a protocol 73 | protocol_name = 'Lung_2Gy_30Fx' 74 | # Load clinical criteria for a specified protocol 75 | clinical_criteria = pp.ClinicalCriteria(data, protocol_name=protocol_name) 76 | # Load hyper-parameter values for optimization problem for a specified protocol 77 | opt_params = data.load_config_opt_params(protocol_name=protocol_name) 78 | # Create optimization structures (i.e., Rinds) 79 | structs.create_opt_structures(opt_params=opt_params) 80 | # create plan_full object by specifying load_inf_matrix_full=True 81 | beams_full = pp.Beams(data, load_inf_matrix_full=True) 82 | # load influence matrix based upon beams and structure set 83 | inf_matrix_full = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams_full, is_full=True) 84 | plan_full = pp.Plan(ct, structs, beams, inf_matrix_full, clinical_criteria) 85 | # Load influence matrix 86 | inf_matrix = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams) 87 | 88 | opt_full = pp.Optimization(plan_full, opt_params=opt_params) 89 | opt_full.create_cvxpy_problem() 90 | 91 | A = inf_matrix_full.A 92 | print("number of non-zeros of the original matrix: ", len(A.nonzero()[0])) 93 | 94 | method = getattr(algorithms, args.method) 95 | S = method(A, args.threshold) 96 | print("number of non-zeros of the sparsed matrix: ", len(S.nonzero()[0])) 97 | print("relative L2 norm (%): ", l2_norm(A - S) / l2_norm(A) * 100) 98 | 99 | inf_matrix.A = S 100 | plan = pp.Plan(ct=ct, structs=structs, beams=beams, inf_matrix=inf_matrix, clinical_criteria=clinical_criteria) 101 | opt = pp.Optimization(plan, opt_params=opt_params) 102 | opt.create_cvxpy_problem() 103 | x = opt.solve(solver=args.solver, verbose=False) 104 | 105 | opt_full.vars['x'].value = x['optimal_intensity'] 106 | violation = 0 107 | for constraint in opt_full.constraints[2:]: 108 | violation += np.sum(constraint.violation()) 109 | print("feasibility violation:", violation) 110 | objective_function_value(x['optimal_intensity']) 111 | 112 | dose_1d = S @ (x['optimal_intensity'] * plan.get_num_of_fractions()) 113 | dose_full = A @ (x['optimal_intensity'] * plan.get_num_of_fractions()) 114 | print("relative dose discrepancy (%): ", (np.linalg.norm(dose_full - dose_1d) / np.linalg.norm(dose_full)) * 100) 115 | 116 | struct_names = ['PTV', 'ESOPHAGUS', 'HEART', 'CORD', 'LUNGS_NOT_GTV'] 117 | 118 | fig, ax = plt.subplots(figsize=(12, 8)) 119 | # Turn on norm flag for same normalization for sparse and full dose. 120 | ax = pp.Visualization.plot_dvh(plan, dose_1d=dose_1d , struct_names=struct_names, style='solid', ax=ax, norm_flag=True) 121 | ax = pp.Visualization.plot_dvh(plan_full, dose_1d=dose_full, struct_names=struct_names, style='dotted', ax=ax, norm_flag=True) 122 | plt.savefig(str(args.method) + "_" + str(args.threshold) + "_" + str(args.patient) + ".pdf") 123 | -------------------------------------------------------------------------------- /NeurIPS_24/script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import math 4 | import pickle 5 | import numpy as np 6 | import algorithms 7 | import portpy.photon as pp 8 | 9 | 10 | def l2_norm(matrix): 11 | values = np.linalg.eig(np.transpose(matrix) @ matrix)[0] 12 | return math.sqrt(np.max(np.abs(values))) 13 | 14 | 15 | def relative_error(dose_sprs, dose_full): 16 | return (np.linalg.norm(dose_full - dose_sprs) / np.linalg.norm(dose_full)) * 100 17 | 18 | 19 | def search1(name, matrix, percentages, nnz): 20 | thresholds = [] 21 | prev_threshold = 0.1 22 | func = getattr(algorithms, name) 23 | nonzeros = func(matrix, prev_threshold) 24 | 25 | for percentage in percentages: 26 | tar_nonzeros = percentage * nnz / 100 27 | threshold = prev_threshold 28 | count = 0 29 | 30 | while np.abs(1 - nonzeros / tar_nonzeros) > 0.025 and count < 20: 31 | threshold *= nonzeros / tar_nonzeros 32 | nonzeros = func(matrix, threshold) 33 | count += 1 34 | 35 | prev_threshold = threshold 36 | thresholds.append(threshold) 37 | return thresholds 38 | 39 | 40 | def search2(name, matrix, percentages, nnz): 41 | thresholds = [] 42 | func = getattr(algorithms, name) 43 | 44 | for percentage in percentages: 45 | tar_nonzeros = percentage * nnz / 100 46 | threshold = tar_nonzeros 47 | nonzeros = func(matrix, threshold) 48 | count = 0 49 | 50 | while np.abs(1 - tar_nonzeros / nonzeros) > 0.025 and count < 20: 51 | threshold *= tar_nonzeros / nonzeros 52 | nonzeros = func(matrix, threshold) 53 | count += 1 54 | 55 | thresholds.append(threshold) 56 | return thresholds 57 | 58 | 59 | def search3(name, matrix, percentages, nnz): 60 | thresholds = [] 61 | prev_threshold = 250 62 | func = getattr(algorithms, name) 63 | tick = time.time() 64 | nonzeros = func(matrix, prev_threshold) 65 | runtime = time.time() - tick 66 | min_threshold = prev_threshold / math.sqrt(600 / runtime) 67 | prev_threshold = 500 68 | nonzeros = func(matrix, prev_threshold) 69 | 70 | for t, percentage in enumerate(percentages): 71 | tar_nonzeros = percentage * nnz / 100 72 | threshold = prev_threshold 73 | count = 0 74 | 75 | while np.abs(1 - nonzeros / tar_nonzeros) > 0.025 and count < 20: 76 | threshold *= nonzeros / tar_nonzeros 77 | 78 | if threshold < min_threshold and t == 1: 79 | threshold = min_threshold 80 | count = 20 81 | elif threshold < min_threshold and t == 0: 82 | return False 83 | else: 84 | nonzeros = func(matrix, threshold) 85 | count += 1 86 | 87 | prev_threshold = threshold 88 | thresholds.append(threshold) 89 | return thresholds 90 | 91 | 92 | def run_algorithm(i, alg, matrix, nnz, percentages, total_points, repetitions, 93 | ct, structs, beams, inf_matrix, clinical_criteria, opt_params, opt, 94 | inf_matrix_full, A, results_dict): 95 | print("Starting", alg) 96 | if alg == "AKL13": 97 | thresholds0 = search2(f"{alg}_nonzeros", matrix, percentages, nnz) 98 | elif alg == "DZ11": 99 | thresholds0 = search3(f"{alg}_nonzeros", matrix, percentages, nnz) 100 | else: 101 | thresholds0 = search1(f"{alg}_nonzeros", matrix, percentages, nnz) 102 | 103 | if thresholds0 == False: 104 | for key in results_dict[alg]: 105 | if key != "Thresholds": 106 | results_dict[alg][key].extend([0] * (total_points * repetitions)) 107 | else: 108 | results_dict[alg][key].extend([0] * 2) 109 | return 110 | 111 | results_dict[alg]["Thresholds"].extend(thresholds0) 112 | print(f"{alg} thresholds: {thresholds0}") 113 | all_thresholds = np.linspace(thresholds0[0], thresholds0[1], num=total_points) 114 | 115 | for j, threshold in enumerate(all_thresholds): 116 | for k in range(repetitions): 117 | print(f"Patient {i}, Algorithm {alg}, Point {j}, Repetition {k}") 118 | func = getattr(algorithms, alg) 119 | tick = time.time() 120 | S = func(matrix, threshold) 121 | tock = time.time() 122 | results_dict[alg]["Times"].append(tock - tick) 123 | print("Time:", tock - tick) 124 | 125 | S_nonzeros = len(S.nonzero()[0]) 126 | results_dict[alg]["Nonzeros"].append(S_nonzeros) 127 | print("Number of nonzeros of S:", S_nonzeros) 128 | 129 | AS_norm = l2_norm(matrix - S) 130 | results_dict[alg]["L2 norms"].append(AS_norm) 131 | print("L2 norm of A-S:", AS_norm) 132 | 133 | inf_matrix.A = S 134 | plan1 = pp.Plan(ct=ct, structs=structs, beams=beams, 135 | inf_matrix=inf_matrix, clinical_criteria=clinical_criteria) 136 | opt1 = pp.Optimization(plan1, opt_params=opt_params) 137 | opt1.create_cvxpy_problem() 138 | tick = time.time() 139 | x_S = opt1.solve(solver='MOSEK', verbose=False) 140 | tock = time.time() 141 | results_dict[alg]["Optimization times"].append(tock - tick) 142 | 143 | opt.vars['x'].value = x_S['optimal_intensity'] 144 | violation = 0 145 | for constraint in opt.constraints[2:]: 146 | violation += np.sum(constraint.violation()) 147 | results_dict[alg]["Feasibility violations"].append(violation) 148 | print("Feasibility violation:", violation) 149 | 150 | objective_function_value(x_S['optimal_intensity'], alg, opt, inf_matrix_full, 151 | A, results_dict, opt_params, clinical_criteria) 152 | 153 | dose_1d = S @ (x_S['optimal_intensity'] * plan1.get_num_of_fractions()) 154 | dose_full = matrix @ (x_S['optimal_intensity'] * plan1.get_num_of_fractions()) 155 | 156 | os.makedirs('Vectors', exist_ok=True) 157 | np.save(f"Vectors/{alg}_{i}_{j}_{k}_x", x_S['optimal_intensity']) 158 | np.save(f"Vectors/{alg}_{i}_{j}_{k}_Sx", dose_1d) 159 | np.save(f"Vectors/{alg}_{i}_{j}_{k}_Ax", dose_full) 160 | 161 | discrepancy = relative_error(dose_1d, dose_full) 162 | results_dict[alg]["Dose discrepancy"].append(discrepancy) 163 | print("Dose discrepancy:", discrepancy) 164 | 165 | def objective_function_value(x, name, opt, inf_matrix_full, A, results_dict, opt_params, 166 | clinical_criteria): 167 | obj_funcs = opt_params['objective_functions'] if 'objective_functions' in opt_params else [] 168 | obj = 0 169 | for i in range(len(obj_funcs)): 170 | if obj_funcs[i]['type'] == 'quadratic-overdose': 171 | if obj_funcs[i]['structure_name'] in opt.my_plan.structures.get_structures(): 172 | struct = obj_funcs[i]['structure_name'] 173 | if len(inf_matrix_full.get_opt_voxels_idx(struct)) == 0: # check if there are any opt voxels for the structure 174 | continue 175 | dose_gy = opt.get_num(obj_funcs[i]['dose_gy']) / clinical_criteria.get_num_of_fractions() 176 | dO = np.maximum(A[inf_matrix_full.get_opt_voxels_idx(struct), :] @ x - dose_gy, 0) 177 | obj += (1 / len(inf_matrix_full.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * np.sum(dO ** 2)) 178 | elif obj_funcs[i]['type'] == 'quadratic-underdose': 179 | if obj_funcs[i]['structure_name'] in opt.my_plan.structures.get_structures(): 180 | struct = obj_funcs[i]['structure_name'] 181 | if len(inf_matrix_full.get_opt_voxels_idx(struct)) == 0: 182 | continue 183 | dose_gy = opt.get_num(obj_funcs[i]['dose_gy']) / clinical_criteria.get_num_of_fractions() 184 | dU = np.minimum(A[inf_matrix_full.get_opt_voxels_idx(struct), :] @ x - dose_gy, 0) 185 | obj += (1 / len(inf_matrix_full.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * np.sum(dU ** 2)) 186 | elif obj_funcs[i]['type'] == 'quadratic': 187 | if obj_funcs[i]['structure_name'] in opt.my_plan.structures.get_structures(): 188 | struct = obj_funcs[i]['structure_name'] 189 | if len(inf_matrix_full.get_opt_voxels_idx(struct)) == 0: 190 | continue 191 | obj += (1 / len(inf_matrix_full.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * np.sum((A[inf_matrix_full.get_opt_voxels_idx(struct), :] @ x) ** 2)) 192 | elif obj_funcs[i]['type'] == 'smoothness-quadratic': 193 | [Qx, Qy, num_rows, num_cols] = opt.get_smoothness_matrix(inf_matrix.beamlets_dict) 194 | smoothness_X_weight = 0.6 195 | smoothness_Y_weight = 0.4 196 | obj += obj_funcs[i]['weight'] * (smoothness_X_weight * (1 / num_cols) * np.sum((Qx @ x) ** 2) + 197 | smoothness_Y_weight * (1 / num_rows) * np.sum((Qy @ x) ** 2)) 198 | results_dict[name]["Objective values"].append(obj) 199 | print("Objective function value:", obj) 200 | 201 | 202 | if __name__ == "__main__": 203 | main_tick = time.time() 204 | total_points = 35 205 | list_of_algorithms = ["Naive", "AHK06", "DZ11", "AKL13", "BKKS21", "RMR"] 206 | total_repetitions = [1, 5, 5, 5, 5, 5] 207 | data_dir = 'data' 208 | data = pp.DataExplorer(data_dir=data_dir) 209 | 210 | results_dict = {"Index": [], "Nonzeros": [], "Shapes": [], "L2 norms": []} 211 | for name in list_of_algorithms: 212 | results_dict[name] = { 213 | "Nonzeros": [], "L2 norms": [], "Times": [], "Dose discrepancy": [], 214 | "Optimization times": [], "Objective values": [], "Feasibility violations": [], "Thresholds": [] 215 | } 216 | 217 | patients_list = np.arange(2, 51) 218 | np.random.shuffle(patients_list) 219 | 220 | for i in patients_list: 221 | results_dict["Index"].append(i) 222 | print("Starting Patient", i) 223 | data.patient_id = 'Lung_Patient_' + str(i) 224 | ct = pp.CT(data) 225 | structs = pp.Structures(data) 226 | beams = pp.Beams(data) 227 | protocol_name = 'Lung_2Gy_30Fx' 228 | clinical_criteria = pp.ClinicalCriteria(data, protocol_name=protocol_name) 229 | 230 | opt_params = data.load_config_opt_params(protocol_name=protocol_name) 231 | structs.create_opt_structures(opt_params=opt_params) 232 | beams_full = pp.Beams(data, load_inf_matrix_full=True) 233 | inf_matrix_full = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams_full, is_full=True) 234 | plan_full = pp.Plan(ct, structs, beams, inf_matrix_full, clinical_criteria) 235 | inf_matrix = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams) 236 | 237 | opt = pp.Optimization(plan_full, opt_params=opt_params) 238 | opt.create_cvxpy_problem() 239 | 240 | A = inf_matrix_full.A 241 | A_nonzeros = len(A.nonzero()[0]) 242 | results_dict["Nonzeros"].append(A_nonzeros) 243 | print("Number of nonzeros of A:", A_nonzeros) 244 | results_dict["Shapes"].append(A.shape) 245 | A_norm = l2_norm(A) 246 | results_dict["L2 norms"].append(A_norm) 247 | print("L2 norm of A:", A_norm) 248 | 249 | sparsification_percentages = np.array([1, 5]) 250 | for idx, alg in enumerate(list_of_algorithms): 251 | repetitions = total_repetitions[idx] 252 | run_algorithm( 253 | i, alg, A, A_nonzeros, sparsification_percentages, 254 | total_points, repetitions, ct, structs, beams, inf_matrix, 255 | clinical_criteria, opt_params, opt, inf_matrix_full, A, results_dict 256 | ) 257 | 258 | tock = time.time() 259 | results_dict["Total time"] = tock - main_tick 260 | print("Total time:", tock - main_tick) 261 | with open('output.pkl', 'wb') as file: 262 | pickle.dump(results_dict, file) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |
3 |
65 |
66 |
67 | 68 | **Figure Explanation:** The figure above compares the proposed RMR algorithm with four existing sparsification algorithms in terms of feasibility and optimality gaps. These gaps were calculated by solving both the original and surrogate optimization problems for 10 lung cancer patients, whose data is publicly available on PortPy. The results demonstrate that the RMR algorithm outperforms the existing methods. 69 | 70 |
71 |
72 |
73 | 74 | **Figure Explanation:** The figure above illustrates the discrepancies in Dose Volume Histogram (DVH) plots between the actual dose ($A\mathbf{x}$, shown as a solid line) and the approximated dose ($𝑆\mathbf{x}$, shown as a dotted line), where 75 | $\mathbf{x}$ is the optimal solution of the surrogate optimization problem. A smaller gap between the dotted and solid lines indicates a more accurate dose approximation. **Left figure** demonstrates a significant dose discrepancy when the matrix 76 | $A$ is sparsified by simply zeroing out small elements—a technique commonly used in practice. **Right figure** shows a minimal dose discrepancy when the matrix $A$ is sparsified using the RMR algorithm. Importantly, in both cases, the sparsified matrix contained only 2% non-zero elements. 77 | 78 | 79 | **Implementation in PortPy:** 80 | 81 | If you are using PortPy for your radiotherapy research, you can apply RMR sparsification by simply adding the following lines of code. For more details, see [Sparse-Only Jupyter Notebook](https://github.com/PortPy-Project/CompressRTP/blob/main/examples/matrix_sparse_only.ipynb). 82 | 83 | ```python 84 | from compress_rtp.utils.get_sparse_only import get_sparse_only 85 | 86 | # Apply RMR sparsification to the matrix A 87 | S = get_sparse_only(A=A, threshold_perc=10, compression='rmr') 88 | 89 | # Replace the original matrix A with the sparsified matrix S 90 | inf_matrix.A = S 91 | ``` 92 | 93 | ## 1.2) Sparse-Plus-Low-Rank Matrix Compression 94 | The rows of matrix $𝐴$ correspond to the patient's voxels, and the similarity of doses delivered to neighboring voxels makes these rows highly correlated (the same argument applies to the columns). This high correlation means that matrix $𝐴$ 95 | is **low-rank** and therefore **compressible**. 96 | 97 |
98 |
99 |
100 | 101 | **Figure Explanation:** The low-rank nature of matrix $𝐴$ can be verified by observing the exponential decay of its singular values, as shown by the blue line in the **left figure**. If we decompose matrix 102 | $A$ into $A=S+L$, where $𝑆$ is a sparse matrix containing large-magnitude elements (e.g., elements greater than 1% of the maximum value of $𝐴$), and $𝐿$ includes smaller elements mainly representing scattering doses, then the singular values of the scattering matrix $𝐿$ reveal an even sharper exponential decay (depicted by the red line). This suggests the use of “sparse-plus-low-rank” compression, $𝐴≈𝑆+𝐻𝑊$, as schematically shown in the **right figure**. 103 | 104 | 105 | The matrix $S$ is sparse, $H$ is a “tall skinny matrix” with only a few columns, and $W$ is a “wide short matrix” with only a few rows. Therefore, $A≈S+HW$ provides a compressed representation of the data. This allows us to solve the following surrogate problem instead of the original problem 106 | 107 | $Minimize \text{ } f(S\mathbf{x}+H\mathbf{y},\mathbf{x})$ 108 | 109 | Subject to $g(S\mathbf{x}+H\mathbf{y},\mathbf{x})\leq 0, \mathbf{y}=W\mathbf{x}, \mathbf{x}\geq 0$ 110 | 111 | Decomposing a matrix into the sum of a sparse matrix and a low-rank matrix has found numerous applications in fields such as computer vision, medical imaging, and statistics. Historically, this structure has been employed as a form of prior knowledge to recover objects of interest that manifest themselves in either the sparse or low-rank components. However, the application presented here represents a novel departure from conventional uses of sparse-plus-low-rank decomposition. Unlike traditional settings where specific components (sparse or low-rank) hold intrinsic importance, our primary goal is not to isolate or interpret these structures. Instead, we leverage them for computationally efficient matrix representation. In this case, the structure serves purely as a tool for optimizing computational efficiency while maintaining data integrity. 112 | 113 | **Note:** Both sparse-only and sparse-plus-low-rank compression techniques serve the same purpose. We are currently investigating the strengths and weaknesses of each technique and their potential combination. Stay tuned for more results. 114 | 115 | **Implementation in PortPy:** 116 | 117 | In PortPy, you can apply the sparse-plus-low-rank compression using the following lines of code. Unlike the sparse-only compression using RMR, which did not require any changes other than replacing $A\mathbf{x}$ with $S\mathbf{x}$ in your optimization formulation and code, this compression requires adding a linear constraint $y=W\mathbf{x}$ and replacing $Ax$ with $S\mathbf{x}+H\mathbf{y}$. These changes can be easily implemented using CVXPy (see the [Sparse-Plus-Low-Rank Jupyter Notebook](https://github.com/PortPy-Project/CompressRTP/blob/main/examples/matrix_sparse_plus_low_rank.ipynb) for details). 118 | 119 | ```python 120 | from compress_rtp.utils.get_sparse_plus_low_rank import get_sparse_plus_low_rank 121 | 122 | S, H, W = get_sparse_plus_low_rank(A=A, threshold_perc=1, rank=5) 123 | ``` 124 | 125 | 126 | ## 2) Fluence Compression to Enforce Smoothness on $x$ 127 | 128 | The fluence smoothness required for efficient and accurate plan delivery is typically achieved by adding an additional "regularization" term to the objective function. This term measures local variations in adjacent beamlets to discourage fluctuating beamlet intensities. However, a significant limitation of this method is its focus on **local complexity** within each beam—it assesses variations between adjacent beamlets but overlooks the **global complexity** of the entire plan. Another challenge is that achieving an optimal balance between plan complexity and dosimetric quality requires careful fine-tuning of the importance weight associated with the smoothness term in the objective function. 129 | 130 | To address these challenges, we treat the intensity map of each beam as a **2D image** and represent it using wavelets corresponding to **low-frequency changes**. The compressed representation of fluence using low-frequency wavelets induces built-in local and global smoothness that can be achieved **without any hyperparameter fine-tuning**. This approach can be easily incorporated into the optimization by adding a set of linear constraints in the form of $𝑥=𝑊𝑦$, where $W$ is the matrix including low-frequency wavelets. 131 | 132 | 133 |
134 |
135 |
136 | 137 | **Figure Explanation:** As illustrated in the figure above, the treatment plan achieved using wavelet compression is not only more conformal to the tumor but also less complex. This is evidenced by a smaller duty cycle compared to the plan achieved by adding only a regularization term to the objective function. 138 | 139 | 140 | **Implementation in PortPy:** 141 | 142 | In **PortPy**, you can incorporate wavelet smoothness by adding the following lines of code. For detailed explanation, see [Wavelet Jupyter Notebook](https://github.com/PortPy-Project/CompressRTP/blob/main/examples/fluence_wavelets.ipynb). 143 | 144 | ```python 145 | from compress_rtp.utils.get_low_dim_basis import get_low_dim_basis 146 | 147 | # Generate the matrix W of low-frequency wavelets 148 | W = get_low_dim_basis(inf_matrix=inf_matrix, compression='wavelet') 149 | 150 | # Add the variable y 151 | y = cp.Variable(W.shape[1]) 152 | 153 | # Add the constraint Wy = x 154 | opt.constraints += [W @ y == opt.vars['x']] 155 | ``` 156 | 157 | # Team 158 | 159 | | Name | Institution | 160 | |------------------------------------------------------------------------------|----------------------------------------| 161 | | [Mojtaba Tefagh](https://www.ed.ac.uk/profile/mojtaba-tefagh) | University of Edinburgh, Scotland | 162 | | [Gourav Jhanwar](https://github.com/gourav3017) | Memorial Sloan Kettering Cancer Center | 163 | | [Masoud Zarepisheh](https://masoudzp.github.io/) | Memorial Sloan Kettering Cancer Center | 164 | 165 | 166 | ## License 167 | CompressRTP code is distributed under **Apache License 2.0 with Commons Clause**, and is available for non-commercial academic purposes. 168 | 169 | ## Reference 170 | 171 | ``` 172 | 173 | @article{Adeli2024Randomized, 174 | title={Randomized Sparse Matrix Compression for 175 | Large-Scale Constrained Optimization in Cancer 176 | Radiotherapy}, 177 | author={Adeli, Shima and Tefagh, Mojtaba and Jhanwar, Gourav and Zarepisheh, Masoud}, 178 | journal={accepted at NeurIPS}, 179 | year={2024} 180 | } 181 | 182 | @article{tefaghcompressed, 183 | title={Compressed radiotherapy treatment planning: A new paradigm for rapid and high-quality treatment planning optimization}, 184 | author={Tefagh, Mojtaba and Jhanwar, Gourav and Zarepisheh, Masoud}, 185 | journal={Medical Physics}, 186 | publisher={Wiley Online Library} 187 | } 188 | 189 | @article{tefagh2023built, 190 | title={Built-in wavelet-induced smoothness to reduce plan complexity in intensity modulated radiation therapy (IMRT)}, 191 | author={Tefagh, Mojtaba and Zarepisheh, Masoud}, 192 | journal={Physics in Medicine \& Biology}, 193 | volume={68}, 194 | number={6}, 195 | pages={065013}, 196 | year={2023}, 197 | publisher={IOP Publishing} 198 | } 199 | 200 | 201 | 202 | ``` 203 | 204 | -------------------------------------------------------------------------------- /compress_rtp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/compress_rtp/__init__.py -------------------------------------------------------------------------------- /compress_rtp/compress_rtp_optimization.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from portpy.photon import Optimization 4 | from typing import TYPE_CHECKING 5 | if TYPE_CHECKING: 6 | from portpy.photon.plan import Plan 7 | from portpy.photon.influence_matrix import InfluenceMatrix 8 | from portpy.photon.clinical_criteria import ClinicalCriteria 9 | import cvxpy as cp 10 | import numpy as np 11 | from copy import deepcopy 12 | 13 | 14 | class CompressRTPOptimization(Optimization): 15 | """ 16 | Class for Compressed RTP optimization. It is child class of PortPy.Photon Optimization class 17 | 18 | - **Attributes** :: 19 | :param my_plan: object of class Plan 20 | :param inf_matrix: object of class InfluenceMatrix 21 | :param clinical_criteria: object of class ClinicalCriteria 22 | :param opt_params: dictionary of vmat optimization parameters 23 | :param vars: dictionary of variables 24 | 25 | :Example: 26 | >>> opt = CompressRTPOptimization(my_plan=my_plan, inf_matrix=inf_matrix, clinical_criteria=clinical_criteria, opt_params=vmat_opt_params) 27 | >>> opt.create_cvxpy_problem_compressed(solver='MOSEK', verbose=True) 28 | 29 | - **Methods** :: 30 | :create_cvxpy_problem_compressed() 31 | Creates cvxpy problem for solving using compressed data 32 | """ 33 | def __init__(self, my_plan: Plan, inf_matrix: InfluenceMatrix = None, 34 | clinical_criteria: ClinicalCriteria = None, 35 | opt_params: dict = None, vars: dict = None): 36 | # Call the constructor of the base class (Optimization) using super() 37 | super().__init__(my_plan=my_plan, inf_matrix=inf_matrix, 38 | clinical_criteria=clinical_criteria, 39 | opt_params=opt_params, vars=vars) 40 | 41 | def create_cvxpy_problem_compressed(self, S=None, H=None, W=None): 42 | 43 | """ 44 | It runs optimization to create optimal plan based upon clinical criteria 45 | 46 | :param S: sparse influence matrix. Uses influence matrix in my_plan by default 47 | :param H: tall skinny matrix. It is obtained using SVD of L = A-S. UKV = svd(L, rank=k). H=U 48 | :param W: thin wide matrix. It is obtained using SVD of L = A-S. W=KV 49 | 50 | """ 51 | # unpack data 52 | my_plan = self.my_plan 53 | inf_matrix = self.inf_matrix 54 | opt_params = self.opt_params 55 | clinical_criteria = self.clinical_criteria 56 | x = self.vars['x'] 57 | obj = self.obj 58 | constraints = self.constraints 59 | 60 | # get opt params for optimization 61 | obj_funcs = opt_params['objective_functions'] if 'objective_functions' in opt_params else [] 62 | opt_params_constraints = opt_params['constraints'] if 'constraints' in opt_params else [] 63 | 64 | if S is None: 65 | S = inf_matrix.A 66 | num_fractions = clinical_criteria.get_num_of_fractions() 67 | st = inf_matrix 68 | if W is None and H is None: 69 | H = np.zeros((S.shape[0], 1)) 70 | W = np.zeros((1, S.shape[1])) 71 | 72 | # Construct optimization problem 73 | Wx = cp.Variable(H.shape[1]) # creating dummy variable for dose 74 | # Generating objective functions 75 | print('Objective Start') 76 | for i in range(len(obj_funcs)): 77 | if obj_funcs[i]['type'] == 'quadratic-overdose': 78 | if obj_funcs[i]['structure_name'] in my_plan.structures.get_structures(): 79 | struct = obj_funcs[i]['structure_name'] 80 | if len(st.get_opt_voxels_idx(struct)) == 0: # check if there are any opt voxels for the structure 81 | continue 82 | key = self.matching_keys(obj_funcs[i], 'dose') 83 | dose_gy = self.dose_to_gy(key, obj_funcs[i][key]) / num_fractions 84 | dO = cp.Variable(len(st.get_opt_voxels_idx(struct)), pos=True) 85 | obj += [(1 / len(st.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * cp.sum_squares(dO))] 86 | constraints += [S[st.get_opt_voxels_idx(struct), :] @ x + H[st.get_opt_voxels_idx(struct), :] @ Wx <= dose_gy + dO] 87 | elif obj_funcs[i]['type'] == 'quadratic-underdose': 88 | if obj_funcs[i]['structure_name'] in my_plan.structures.get_structures(): 89 | struct = obj_funcs[i]['structure_name'] 90 | if len(st.get_opt_voxels_idx(struct)) == 0: 91 | continue 92 | key = self.matching_keys(obj_funcs[i], 'dose') 93 | dose_gy = self.dose_to_gy(key, obj_funcs[i][key]) / num_fractions 94 | dU = cp.Variable(len(st.get_opt_voxels_idx(struct)), pos=True) 95 | obj += [(1 / len(st.get_opt_voxels_idx(struct))) * (obj_funcs[i]['weight'] * cp.sum_squares(dU))] 96 | constraints += [S[st.get_opt_voxels_idx(struct), :] @ x + H[st.get_opt_voxels_idx(struct), :] @ Wx >= dose_gy - dU] 97 | elif obj_funcs[i]['type'] == 'quadratic': 98 | if obj_funcs[i]['structure_name'] in my_plan.structures.get_structures(): 99 | struct = obj_funcs[i]['structure_name'] 100 | if len(st.get_opt_voxels_idx(struct)) == 0: 101 | continue 102 | obj += [(1 / len(st.get_opt_voxels_idx(struct))) * ( 103 | obj_funcs[i]['weight'] * cp.sum_squares(S[st.get_opt_voxels_idx(struct), :] @ x + H[st.get_opt_voxels_idx(struct), :] @ Wx))] 104 | elif obj_funcs[i]['type'] == 'smoothness-quadratic': 105 | [Qx, Qy, num_rows, num_cols] = self.get_smoothness_matrix(inf_matrix.beamlets_dict) 106 | smoothness_X_weight = 0.6 107 | smoothness_Y_weight = 0.4 108 | obj += [obj_funcs[i]['weight'] * (smoothness_X_weight * (1 / num_cols) * cp.sum_squares(Qx @ x) + 109 | smoothness_Y_weight * (1 / num_rows) * cp.sum_squares(Qy @ x))] 110 | 111 | print('Objective done') 112 | 113 | print('Constraints Start') 114 | 115 | constraint_def = deepcopy( 116 | clinical_criteria.get_criteria()) # get all constraints definition using clinical criteria 117 | 118 | # add/modify constraints definition if present in opt params 119 | for opt_constraint in opt_params_constraints: 120 | # add constraint 121 | param = opt_constraint['parameters'] 122 | if param['structure_name'] in my_plan.structures.get_structures(): 123 | criterion_exist, criterion_ind = clinical_criteria.check_criterion_exists(opt_constraint, 124 | return_ind=True) 125 | if criterion_exist: 126 | constraint_def[criterion_ind] = opt_constraint 127 | else: 128 | constraint_def += [opt_constraint] 129 | 130 | # Adding max/mean constraints 131 | for i in range(len(constraint_def)): 132 | if constraint_def[i]['type'] == 'max_dose': 133 | limit_key = self.matching_keys(constraint_def[i]['constraints'], 'limit') 134 | if limit_key: 135 | limit = self.dose_to_gy(limit_key, constraint_def[i]['constraints'][limit_key]) 136 | org = constraint_def[i]['parameters']['structure_name'] 137 | if org != 'GTV' and org != 'CTV': 138 | if org in my_plan.structures.get_structures(): 139 | if len(st.get_opt_voxels_idx(org)) == 0: 140 | continue 141 | constraints += [S[st.get_opt_voxels_idx(org), :] @ x + H[st.get_opt_voxels_idx(org), :] @ Wx <= limit / num_fractions] 142 | elif constraint_def[i]['type'] == 'mean_dose': 143 | limit_key = self.matching_keys(constraint_def[i]['constraints'], 'limit') 144 | if limit_key: 145 | limit = self.dose_to_gy(limit_key, constraint_def[i]['constraints'][limit_key]) 146 | org = constraint_def[i]['parameters']['structure_name'] 147 | # mean constraints using voxel weights 148 | if org in my_plan.structures.get_structures(): 149 | if len(st.get_opt_voxels_idx(org)) == 0: 150 | continue 151 | fraction_of_vol_in_calc_box = my_plan.structures.get_fraction_of_vol_in_calc_box(org) 152 | limit = limit / fraction_of_vol_in_calc_box # modify limit due to fraction of volume receiving no dose 153 | constraints += [(1 / sum(st.get_opt_voxels_volume_cc(org))) * 154 | (cp.sum((cp.multiply(st.get_opt_voxels_volume_cc(org), 155 | S[st.get_opt_voxels_idx(org), :] @ x + H[st.get_opt_voxels_idx(org), :] @ Wx)))) 156 | <= limit / num_fractions] 157 | 158 | constraints += [Wx == (W @ x)] 159 | print('Constraints done') -------------------------------------------------------------------------------- /compress_rtp/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/compress_rtp/utils/__init__.py -------------------------------------------------------------------------------- /compress_rtp/utils/get_low_dim_basis.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from portpy.photon.influence_matrix import InfluenceMatrix 4 | import numpy as np 5 | import scipy 6 | try: 7 | import pywt 8 | except ImportError: 9 | pass 10 | 11 | 12 | def get_low_dim_basis(inf_matrix: InfluenceMatrix, compression: str = 'wavelet'): 13 | """ 14 | :param inf_matrix: an object of class InfluenceMatrix for the specified plan 15 | :param compression: the compression method 16 | :type compression: str 17 | :return: a list that contains the dimension reduction basis in the format of array(float) 18 | """ 19 | 20 | low_dim_basis = {} 21 | num_of_beams = len(inf_matrix.beamlets_dict) 22 | num_of_beamlets = inf_matrix.beamlets_dict[num_of_beams - 1]['end_beamlet_idx'] + 1 23 | beam_id = [inf_matrix.beamlets_dict[i]['beam_id'] for i in range(num_of_beams)] 24 | beamlets = inf_matrix.get_bev_2d_grid(beam_id=beam_id) 25 | index_position = list() 26 | for ind in range(num_of_beams): 27 | low_dim_basis[beam_id[ind]] = [] 28 | for i in range(inf_matrix.beamlets_dict[ind]['start_beamlet_idx'], 29 | inf_matrix.beamlets_dict[ind]['end_beamlet_idx'] + 1): 30 | index_position.append((np.where(beamlets[ind] == i)[0][0], np.where(beamlets[ind] == i)[1][0])) 31 | if compression == 'wavelet': 32 | max_dim_0 = np.max([beamlets[ind].shape[0] for ind in range(num_of_beams)]) 33 | max_dim_1 = np.max([beamlets[ind].shape[1] for ind in range(num_of_beams)]) 34 | beamlet_2d_grid = np.zeros((int(np.ceil(max_dim_0 / 2)), int(np.ceil(max_dim_1 / 2)))) 35 | for row in range(beamlet_2d_grid.shape[0]): 36 | for col in range(beamlet_2d_grid.shape[1]): 37 | beamlet_2d_grid[row][col] = 1 38 | approximation_coeffs = pywt.idwt2((beamlet_2d_grid, (None, None, None)), 'sym4', 39 | mode='periodization') 40 | horizontal_coeffs = pywt.idwt2((None, (beamlet_2d_grid, None, None)), 'sym4', mode='periodization') 41 | for b in range(num_of_beams): 42 | if ((2 * row + 1 < beamlets[b].shape[0] and 2 * col + 1 < beamlets[b].shape[1] and 43 | beamlets[b][2 * row + 1][2 * col + 1] != -1) or 44 | (2 * row + 1 < beamlets[b].shape[0] and 2 * col < beamlets[b].shape[1] and 45 | beamlets[b][2 * row + 1][2 * col] != -1) or 46 | (2 * row < beamlets[b].shape[0] and 2 * col + 1 < beamlets[b].shape[1] and 47 | beamlets[b][2 * row][2 * col + 1] != -1) or 48 | (2 * row < beamlets[b].shape[0] and 2 * col < beamlets[b].shape[1] and 49 | beamlets[b][2 * row][2 * col] != -1)): 50 | approximation = np.zeros(num_of_beamlets) 51 | horizontal = np.zeros(num_of_beamlets) 52 | for ind in range(inf_matrix.beamlets_dict[b]['start_beamlet_idx'], 53 | inf_matrix.beamlets_dict[b]['end_beamlet_idx'] + 1): 54 | approximation[ind] = approximation_coeffs[index_position[ind]] 55 | horizontal[ind] = horizontal_coeffs[index_position[ind]] 56 | low_dim_basis[beam_id[b]].append(np.transpose(np.stack([approximation, horizontal]))) 57 | beamlet_2d_grid[row][col] = 0 58 | for b in beam_id: 59 | low_dim_basis[b] = np.concatenate(low_dim_basis[b], axis=1) 60 | u, s, vh = scipy.sparse.linalg.svds(low_dim_basis[b], k=min(low_dim_basis[b].shape[0], low_dim_basis[b].shape[1]) - 1) 61 | ind = np.where(s > 0.0001) 62 | low_dim_basis[b] = u[:, ind[0]] 63 | return np.concatenate([low_dim_basis[b] for b in beam_id], axis=1) -------------------------------------------------------------------------------- /compress_rtp/utils/get_sparse_only.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | import scipy 4 | 5 | 6 | def get_sparse_only(A: np.ndarray, threshold_perc: float = 1, compression: str = 'naive'): 7 | """ 8 | Get sparse matrix using threshold and different methods 9 | :param A: matrix to be sparsified 10 | :param threshold_perc: threshold for matrix sparsification 11 | :param compression: Method of Sparsification 12 | 13 | :return: Sparse influence matrix 14 | """ 15 | threshold = np.max(A) * threshold_perc * 0.01 16 | if compression == 'rmr': 17 | copy_matrix = A.copy() 18 | print('Generating sparse matrix using RMR...') 19 | np.apply_along_axis(row_operation, 1, copy_matrix, threshold) 20 | S = scipy.sparse.csr_matrix(copy_matrix) 21 | else: 22 | S = np.where(A >= threshold, A, 0) 23 | S = scipy.sparse.csr_matrix(S) 24 | return S 25 | 26 | 27 | def row_operation(copy_row, threshold): 28 | argzero = np.argwhere((np.abs(copy_row) <= threshold) * (copy_row != 0)) 29 | argzero = argzero.reshape(len(argzero), ) 30 | argzero_copy = copy_row[argzero] 31 | copy_row[argzero] = 0 32 | sum = np.sum(argzero_copy) 33 | if sum != 0: 34 | k = math.ceil(sum / threshold) 35 | 36 | indices = np.random.choice(argzero, k, p=argzero_copy / sum, replace=True) 37 | np.add.at(copy_row, indices, sum / k) 38 | 39 | -------------------------------------------------------------------------------- /compress_rtp/utils/get_sparse_plus_low_rank.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy 3 | 4 | try: 5 | from sklearn.utils.extmath import randomized_svd 6 | except ImportError: 7 | pass 8 | 9 | 10 | def get_sparse_plus_low_rank(A: np.ndarray, threshold_perc: float = 1, rank: int = 5): 11 | """ 12 | :param A: dose influence matrix 13 | :param threshold_perc: thresold percentage. Default to 1% of max(A) 14 | :type rank: rank of L = A-S. 15 | :returns: S, H, W using randomized svd 16 | """ 17 | tol = np.max(A) * threshold_perc * 0.01 18 | S = np.where(A > tol, A, 0) 19 | if rank == 0: 20 | S = scipy.sparse.csr_matrix(S) 21 | return S 22 | else: 23 | print('Running svd..') 24 | [U, svd_S, V] = randomized_svd(A - S, n_components=rank + 1, random_state=0) 25 | print('svd done!') 26 | H = U[:, :rank] 27 | W = np.diag(svd_S[:rank]) @ V[:rank, :] 28 | S = scipy.sparse.csr_matrix(S) 29 | return S, H, W 30 | -------------------------------------------------------------------------------- /examples/fluence_wavelets.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This example shows creating and modification of wavelet bases for fluence map compression using portpy 4 | 5 | """ 6 | 7 | import portpy.photon as pp 8 | from compress_rtp.utils.get_low_dim_basis import get_low_dim_basis 9 | import cvxpy as cp 10 | import matplotlib.pyplot as plt 11 | 12 | 13 | def ex_wavelet(): 14 | 15 | """ 16 | 1) Accessing the portpy data (DataExplorer class) 17 | 18 | """ 19 | 20 | # Note: you first need to download the patient database from the link provided in the GitHub page. 21 | 22 | # specify the patient data location. 23 | data_dir = r'../../Data' 24 | 25 | # Use PortPy DataExplorer class to explore PortPy data and pick one of the patient 26 | data = pp.DataExplorer(data_dir=data_dir) 27 | patient_id = 'Lung_Patient_3' 28 | data.patient_id = patient_id 29 | 30 | # Load ct and structure set for the above patient using CT and Structures class 31 | ct = pp.CT(data) 32 | structs = pp.Structures(data) 33 | 34 | # If the list of beams are not provided, it uses the beams selected manually 35 | # by a human expert planner for the patient (manually selected beams are stored in portpy data). 36 | # Create beams for the planner beams by default 37 | # for the customized beams, you can pass the argument beam_ids 38 | # e.g. beams = pp.Beams(data, beam_ids=[0,10,20,30,40,50,60]) 39 | beams = pp.Beams(data) 40 | 41 | # create rinds based upon rind definition in optimization params 42 | protocol_name = 'Lung_2Gy_30Fx' 43 | opt_params = data.load_config_opt_params(protocol_name=protocol_name) 44 | structs.create_opt_structures(opt_params=opt_params) 45 | 46 | # load influence matrix based upon beams and structure set 47 | inf_matrix = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams) 48 | 49 | # load clinical criteria from the config files for which plan to be optimized 50 | clinical_criteria = pp.ClinicalCriteria(data, protocol_name=protocol_name) 51 | 52 | ''' 53 | 2) Optimizing the plan without quadratic smoothness (with and without wavelet constraint) 54 | 55 | ''' 56 | # - Without wavelet constraint 57 | 58 | # remove smoothness objective 59 | for i in range(len(opt_params['objective_functions'])): 60 | if opt_params['objective_functions'][i]['type'] == 'smoothness-quadratic': 61 | opt_params['objective_functions'][i]['weight'] = 0 62 | 63 | # create a plan using ct, structures, beams and influence matrix. Clinical criteria is optional 64 | my_plan = pp.Plan(ct=ct, structs=structs, beams=beams, inf_matrix=inf_matrix, clinical_criteria=clinical_criteria) 65 | 66 | # create cvxpy problem using the clinical criteria and optimization parameters 67 | opt = pp.Optimization(my_plan, opt_params=opt_params) 68 | opt.create_cvxpy_problem() 69 | sol_no_quad_no_wav = opt.solve(solver='MOSEK', verbose=False) 70 | 71 | # - With wavelet constraint 72 | 73 | # creating the wavelet incomplete basis representing a low dimensional subspace for dimension reduction 74 | wavelet_basis = get_low_dim_basis(inf_matrix=inf_matrix, compression='wavelet') 75 | # Smoothness Constraint 76 | y = cp.Variable(wavelet_basis.shape[1]) 77 | opt.constraints += [wavelet_basis @ y == opt.vars['x']] 78 | sol_no_quad_with_wav = opt.solve(solver='MOSEK', verbose=False) 79 | 80 | ''' 81 | 3) Optimizing the plan with quadratic smoothness 82 | 83 | ''' 84 | # - Without wavelet constraint 85 | 86 | for i in range(len(opt_params['objective_functions'])): 87 | if opt_params['objective_functions'][i]['type'] == 'smoothness-quadratic': 88 | opt_params['objective_functions'][i]['weight'] = 10 89 | 90 | # create cvxpy problem using the clinical criteria and optimization parameters 91 | opt = pp.Optimization(my_plan, opt_params=opt_params) 92 | opt.create_cvxpy_problem() 93 | 94 | sol_quad_no_wav = opt.solve(solver='MOSEK', verbose=False) 95 | 96 | # - With Wavelet constraint 97 | 98 | # Wavelet Smoothness Constraint 99 | y = cp.Variable(wavelet_basis.shape[1]) 100 | opt.constraints += [wavelet_basis @ y == opt.vars['x']] 101 | sol_quad_with_wav = opt.solve(solver='MOSEK', verbose=False) 102 | 103 | ''' 104 | 4) Saving and loading plans 105 | 106 | ''' 107 | 108 | pp.save_plan(my_plan, plan_name='my_plan.pkl', path=r'C:\temp') 109 | pp.save_optimal_sol(sol_no_quad_no_wav, sol_name='sol_no_quad_no_wav.pkl', path=r'C:\temp') 110 | pp.save_optimal_sol(sol_no_quad_with_wav, sol_name='sol_no_quad_with_wav.pkl', path=r'C:\temp') 111 | pp.save_optimal_sol(sol_quad_no_wav, sol_name='sol_quad_no_wav.pkl', path=r'C:\temp') 112 | pp.save_optimal_sol(sol_quad_with_wav, sol_name='sol_quad_with_wav.pkl', path=r'C:\temp') 113 | 114 | # my_plan = pp.load_plan(plan_name='my_plan', path=r'C:\temp') 115 | # sol_no_quad_no_wav = pp.load_optimal_sol(sol_name='sol_no_quad_no_wav', path=r'C:\temp') 116 | # sol_no_quad_with_wav = pp.load_optimal_sol(sol_name='sol_no_quad_with_wav', path=r'C:\temp') 117 | # sol_quad_no_wav = pp.load_optimal_sol(sol_name='sol_quad_no_wav', path=r'C:\temp') 118 | # sol_quad_with_wav = pp.load_optimal_sol(sol_name='sol_quad_with_wav', path=r'C:\temp') 119 | 120 | ''' 121 | 5) Visualization and compare the plans 122 | 123 | ''' 124 | # plot fluence 3D and 2D 125 | fig, ax = plt.subplots(1, 2, figsize=(18, 6), subplot_kw={'projection': '3d'}) 126 | fig.suptitle('Without Quadratic smoothness') 127 | pp.Visualization.plot_fluence_3d(sol=sol_no_quad_no_wav, beam_id=37, ax=ax[0], title='Without Wavelet') 128 | pp.Visualization.plot_fluence_3d(sol=sol_no_quad_with_wav, beam_id=37, ax=ax[1], title='With Wavelet') 129 | 130 | fig, ax = plt.subplots(1, 2, figsize=(18, 6), subplot_kw={'projection': '3d'}) 131 | fig.suptitle('With Quadratic smoothness') 132 | pp.Visualization.plot_fluence_3d(sol=sol_quad_no_wav, beam_id=37, ax=ax[0], title='Without Wavelet') 133 | pp.Visualization.plot_fluence_3d(sol=sol_quad_with_wav, beam_id=37, ax=ax[1], title='With Wavelet') 134 | plt.show() 135 | 136 | # plot DVH for the structures in the given list. Default dose_1d is in Gy and volume is in relative scale(%). 137 | structs = ['PTV', 'ESOPHAGUS', 'HEART', 'CORD', 'LUNG_L', 'LUNG_R'] 138 | fig, ax = plt.subplots(1, 2, figsize=(20, 8)) 139 | ax0 = pp.Visualization.plot_dvh(my_plan, sol=sol_no_quad_no_wav, structs=structs, style='solid', ax=ax[0]) 140 | ax0 = pp.Visualization.plot_dvh(my_plan, sol=sol_no_quad_with_wav, structs=structs, style='dashed', ax=ax0) 141 | fig.suptitle('DVH comparison') 142 | ax0.set_title('Without Quadratic smoothness \n solid: Without Wavelet, Dash: With Wavelet') 143 | # plt.show() 144 | # print('\n\n') 145 | 146 | # fig, ax = plt.subplots(figsize=(12, 8)) 147 | ax1 = pp.Visualization.plot_dvh(my_plan, sol=sol_quad_no_wav, structs=structs, style='solid', ax=ax[1]) 148 | ax1 = pp.Visualization.plot_dvh(my_plan, sol=sol_quad_with_wav, structs=structs, style='dashed', ax=ax1) 149 | ax1.set_title('With Quadratic smoothness \n solid: Without Wavelet, Dash: With Wavelet') 150 | plt.show() 151 | 152 | ''' 153 | 6) Evaluate the plan based upon clinical criteria 154 | 155 | ''' 156 | # visualize plan metrics based upon clinical criteria 157 | pp.Evaluation.display_clinical_criteria(my_plan, 158 | sol=[sol_no_quad_no_wav, sol_no_quad_with_wav, sol_quad_no_wav, sol_quad_with_wav], 159 | sol_names=['no_quad_no_wav', 'no_quad_with_wav', 'quad_no_wav', 'quad_with_wav']) 160 | 161 | 162 | if __name__ == "__main__": 163 | ex_wavelet() 164 | -------------------------------------------------------------------------------- /examples/matrix_spare_plus_low_rank.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This example demonstrates compressed planning based on a sparse-plus-low-rank matrix compression technique: 4 | 5 | """ 6 | import os 7 | import portpy.photon as pp 8 | from compress_rtp.compress_rtp_optimization import CompressRTPOptimization 9 | from compress_rtp.utils.get_sparse_plus_low_rank import get_sparse_plus_low_rank 10 | from compress_rtp.utils.get_sparse_only import get_sparse_only 11 | import matplotlib.pyplot as plt 12 | from copy import deepcopy 13 | 14 | 15 | def sparse_plus_low_rank(): 16 | """ 17 | 1) Accessing the portpy data (DataExplorer class) 18 | To start using this resource, users are required to download the latest version of the dataset, which can be found at (https://drive.google.com/drive/folders/1nA1oHEhlmh2Hk8an9e0Oi0ye6LRPREit). Then, the dataset can be accessed as demonstrated below. 19 | 20 | """ 21 | 22 | # specify the patient data location. 23 | data_dir = r'../../PortPy/data' 24 | # Use PortPy DataExplorer class to explore PortPy data 25 | data = pp.DataExplorer(data_dir=data_dir) 26 | 27 | # pick a patient from the existing patient list to get detailed info (e.g., beam angles, structures). 28 | data.patient_id = 'Lung_Patient_2' 29 | 30 | ct = pp.CT(data) 31 | structs = pp.Structures(data) 32 | 33 | # If the list of beams are not provided, it uses the beams selected manually 34 | # by a human expert planner for the patient (manually selected beams are stored in portpy data). 35 | # Create beams for the planner beams by default 36 | # for the customized beams, you can pass the argument beam_ids 37 | # e.g. beams = pp.Beams(data, beam_ids=[0,10,20,30,40,50,60]) 38 | beams = pp.Beams(data, load_inf_matrix_full=True) 39 | 40 | # In order to create an IMRT plan, we first need to specify a protocol which includes the disease site, 41 | # the prescribed dose for the PTV, the number of fractions, and the radiation dose thresholds for OARs. 42 | # These information are stored in .json files which can be found in a directory named "config_files". 43 | # An example of such a file is 'Lung_2Gy_30Fx.json'. Here's how you can load these files: 44 | protocol_name = 'Lung_2Gy_30Fx' 45 | # load clinical criteria from the config files for which plan to be optimized 46 | clinical_criteria = pp.ClinicalCriteria(data, protocol_name=protocol_name) 47 | 48 | # Optimization problem formulation 49 | protocol_name = 'Lung_2Gy_30Fx' 50 | # Loading hyper-parameter values for optimization problem 51 | opt_params = data.load_config_opt_params(protocol_name=protocol_name) 52 | # Creating optimization structures (i.e., Rinds) 53 | structs.create_opt_structures(opt_params=opt_params, clinical_criteria=clinical_criteria) 54 | # Loading influence matrix 55 | inf_matrix = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams, is_full=True) 56 | 57 | """ 58 | 59 | 2) creating a simple IMRT plan using CVXPy (Plan class, Optimization class) 60 | Note: you can call different opensource / commercial optimization engines from CVXPy. 61 | For commercial engines (e.g., Mosek, Gorubi, CPLEX), you first need to obtain an appropriate license. 62 | Most commercial optimization engines give free academic license. 63 | 64 | Create my_plan object which would store all the data needed for optimization 65 | (e.g., influence matrix, structures and their voxels, beams and their beamlets). 66 | 67 | """ 68 | # create a plan using ct, structures, beams and influence matrix. Clinical criteria is optional 69 | my_plan = pp.Plan(ct=ct, structs=structs, beams=beams, inf_matrix=inf_matrix, clinical_criteria=clinical_criteria) 70 | 71 | # run optimization with naive thresold of 1% of max(A) and no low rank 72 | # create cvxpy problem using the clinical criteria and optimization parameters 73 | A = deepcopy(inf_matrix.A) 74 | S = get_sparse_only(A=A, threshold_perc=1) 75 | # Users can also use below method to get sparse matrix using threshold. Rank=0 is equivalent to above method 76 | # S = get_sparse_plus_low_rank(A=A, thresold_perc=1, rank=0) 77 | inf_matrix.A = S 78 | opt = pp.Optimization(my_plan, inf_matrix=inf_matrix, opt_params=opt_params) 79 | opt.create_cvxpy_problem() 80 | sol_sparse = opt.solve(solver='MOSEK', verbose=True) 81 | 82 | # run optimization with thresold of 1% and rank 5 83 | # create cvxpy problem using the clinical criteria and optimization parameters 84 | S, H, W = get_sparse_plus_low_rank(A=A, threshold_perc=1, rank=5) 85 | opt = CompressRTPOptimization(my_plan, opt_params=opt_params) 86 | opt.create_cvxpy_problem_compressed(S=S, H=H, W=W) 87 | 88 | # run imrt fluence map optimization using cvxpy and one of the supported solvers and save the optimal solution in sol 89 | mosek_params = {'MSK_IPAR_PRESOLVE_ELIMINATOR_MAX_NUM_TRIES': 0, 90 | 'MSK_IPAR_INTPNT_SCALING': 'MSK_SCALING_NONE'} 91 | sol_slr = opt.solve(solver='MOSEK', verbose=True, mosek_params=mosek_params) 92 | 93 | """ 94 | 3) visualizing the dvh with and without compression (Visualization class) 95 | 96 | """ 97 | 98 | fig, ax = plt.subplots(1, 2, figsize=(20, 8)) 99 | struct_names = ['PTV', 'ESOPHAGUS', 'HEART', 'CORD', 'LUNGS_NOT_GTV'] 100 | dose_1d_sparse = (S @ sol_sparse['optimal_intensity']) * my_plan.get_num_of_fractions() 101 | dose_1d_full_sparse = (A @ sol_sparse['optimal_intensity']) * my_plan.get_num_of_fractions() 102 | ax0 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_sparse, struct_names=struct_names, style='dotted', ax=ax[0], norm_flag=True) 103 | ax0 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_full_sparse, struct_names=struct_names, style='solid', ax=ax0, norm_flag=True) 104 | ax0.set_title("sparse_vs_full") 105 | 106 | dose_1d_slr = (S @ sol_slr['optimal_intensity'] + H @ (W @ sol_slr['optimal_intensity'])) * my_plan.get_num_of_fractions() 107 | dose_1d_full_slr = (A @ sol_slr['optimal_intensity']) * my_plan.get_num_of_fractions() 108 | ax1 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_slr, struct_names=struct_names, style='dashed', ax=ax[1], norm_flag=True) 109 | ax1 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_full_slr, struct_names=struct_names, style='solid', ax=ax1, norm_flag=True) 110 | ax1.set_title("slr_vs_full") 111 | plt.show(block=False) 112 | 113 | """ 114 | 4) evaluating the plan (Evaluation class) 115 | The Evaluation class offers a set of methods for quantifying the optimized plan. 116 | If you need to compute individual dose volume metrics, you can use methods such as *get_dose* or *get_volume*. 117 | Furthermore, the class also facilitates the assessment of the plan based on a collection of metrics, 118 | such as mean, max, and dose-volume histogram (DVH), as specified in the clinical protocol. This capability is demonstrated below 119 | """ 120 | 121 | # visualize plan metrics based upon clinical criteria 122 | pp.Evaluation.display_clinical_criteria(my_plan, dose_1d=[dose_1d_full_sparse, dose_1d_full_slr], sol_names=['Without compression', 'With compression']) 123 | 124 | """ 125 | 5) saving and loading the plan for future use (utils) 126 | 127 | """ 128 | # Comment/Uncomment these lines to save and load the pickle file for plans and optimal solution from the directory 129 | pp.save_plan(my_plan, plan_name='my_plan.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 130 | pp.save_optimal_sol(sol_slr, sol_name='sol_sparse.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 131 | pp.save_optimal_sol(sol_slr, sol_name='sol_slr.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 132 | # my_plan = pp.load_plan(plan_name='my_plan.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 133 | # sol = pp.load_optimal_sol(sol_name='sol_sparse.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 134 | 135 | 136 | if __name__ == "__main__": 137 | sparse_plus_low_rank() 138 | -------------------------------------------------------------------------------- /examples/matrix_sparse_only.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This example demonstrates compressed planning based on a RMR matrix compression technique: 4 | 5 | """ 6 | import os 7 | import portpy.photon as pp 8 | from compress_rtp.utils.get_sparse_only import get_sparse_only 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | from copy import deepcopy 12 | 13 | 14 | def matrix_sparse_only_rmr(): 15 | """ 16 | 1) Accessing the portpy data (DataExplorer class) 17 | To start using this resource, users are required to download the latest version of the dataset, which can be found at (https://drive.google.com/drive/folders/1nA1oHEhlmh2Hk8an9e0Oi0ye6LRPREit). Then, the dataset can be accessed as demonstrated below. 18 | 19 | """ 20 | 21 | # specify the patient data location. 22 | data_dir = r'../../PortPy/data' 23 | # Use PortPy DataExplorer class to explore PortPy data 24 | data = pp.DataExplorer(data_dir=data_dir) 25 | 26 | # pick a patient from the existing patient list to get detailed info (e.g., beam angles, structures). 27 | data.patient_id = 'Lung_Patient_6' 28 | ct = pp.CT(data) 29 | structs = pp.Structures(data) 30 | 31 | # If the list of beams are not provided, it uses the beams selected manually 32 | # by a human expert planner for the patient (manually selected beams are stored in portpy data). 33 | # Create beams for the planner beams by default 34 | # for the customized beams, you can pass the argument beam_ids 35 | # e.g. beams = pp.Beams(data, beam_ids=[0,10,20,30,40,50,60]) 36 | beams = pp.Beams(data, load_inf_matrix_full=True) 37 | 38 | # In order to create an IMRT plan, we first need to specify a protocol which includes the disease site, 39 | # the prescribed dose for the PTV, the number of fractions, and the radiation dose thresholds for OARs. 40 | # These information are stored in .json files which can be found in a directory named "config_files". 41 | # An example of such a file is 'Lung_2Gy_30Fx.json'. Here's how you can load these files: 42 | protocol_name = 'Lung_2Gy_30Fx' 43 | # load clinical criteria from the config files for which plan to be optimized 44 | clinical_criteria = pp.ClinicalCriteria(data, protocol_name=protocol_name) 45 | 46 | # Optimization problem formulation 47 | protocol_name = 'Lung_2Gy_30Fx' 48 | # Loading hyper-parameter values for optimization problem 49 | opt_params = data.load_config_opt_params(protocol_name=protocol_name) 50 | # Creating optimization structures (i.e., Rinds) 51 | structs.create_opt_structures(opt_params=opt_params, clinical_criteria=clinical_criteria) 52 | # Loading influence matrix 53 | inf_matrix = pp.InfluenceMatrix(ct=ct, structs=structs, beams=beams, is_full=True) 54 | 55 | """ 56 | 57 | 2) creating a simple IMRT plan using CVXPy (Plan class, Optimization class) 58 | Note: you can call different opensource / commercial optimization engines from CVXPy. 59 | For commercial engines (e.g., Mosek, Gorubi, CPLEX), you first need to obtain an appropriate license. 60 | Most commercial optimization engines give free academic license. 61 | 62 | Create my_plan object which would store all the data needed for optimization 63 | (e.g., influence matrix, structures and their voxels, beams and their beamlets). 64 | 65 | """ 66 | # create a plan using ct, structures, beams and influence matrix. Clinical criteria is optional 67 | my_plan = pp.Plan(ct=ct, structs=structs, beams=beams, inf_matrix=inf_matrix, clinical_criteria=clinical_criteria) 68 | 69 | # run optimization with naive thresold of 1% of max(A) and no low rank 70 | # create cvxpy problem using the clinical criteria and optimization parameters 71 | A = deepcopy(inf_matrix.A) 72 | S_sparse = get_sparse_only(A=A, threshold_perc=1) 73 | inf_matrix.A = S_sparse 74 | opt = pp.Optimization(my_plan, inf_matrix=inf_matrix, opt_params=opt_params) 75 | opt.create_cvxpy_problem() 76 | sol_sparse_naive = opt.solve(solver='MOSEK', verbose=True) 77 | 78 | # run optimization with thresold of 1% and sparsifying matrix using RMR method 79 | # create cvxpy problem using the clinical criteria and optimization parameters 80 | S_rmr = get_sparse_only(A=A, threshold_perc=10, compression='rmr') 81 | inf_matrix.A = S_rmr 82 | opt = pp.Optimization(my_plan, inf_matrix=inf_matrix, opt_params=opt_params) 83 | opt.create_cvxpy_problem() 84 | sol_sparse_rmr = opt.solve(solver='MOSEK', verbose=True) 85 | 86 | 87 | """ 88 | 3) visualizing the dvh with and without compression (Visualization class) 89 | 90 | """ 91 | 92 | fig, ax = plt.subplots(1, 2, figsize=(20, 8)) 93 | struct_names = ['PTV', 'ESOPHAGUS', 'HEART', 'CORD', 'LUNGS_NOT_GTV'] 94 | dose_1d_naive = (S_sparse @ sol_sparse_naive['optimal_intensity']) * my_plan.get_num_of_fractions() 95 | dose_1d_full_naive = (A @ sol_sparse_naive['optimal_intensity']) * my_plan.get_num_of_fractions() 96 | ax0 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_naive, struct_names=struct_names, style='dotted', ax=ax[0], norm_flag=True) 97 | ax0 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_full_naive, struct_names=struct_names, style='solid', ax=ax0, norm_flag=True) 98 | ax0.set_title("sparse_naive_vs_full") 99 | 100 | dose_1d_rmr = (S_rmr @ sol_sparse_rmr['optimal_intensity']) * my_plan.get_num_of_fractions() 101 | dose_1d_full_rmr = (A @ sol_sparse_rmr['optimal_intensity']) * my_plan.get_num_of_fractions() 102 | ax1 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_rmr, struct_names=struct_names, style='dashed', ax=ax[1], norm_flag=True) 103 | ax1 = pp.Visualization.plot_dvh(my_plan, dose_1d=dose_1d_full_rmr, struct_names=struct_names, style='solid', ax=ax1, norm_flag=True) 104 | ax1.set_title("sparse_rmr_vs_full") 105 | plt.show(block=False) 106 | 107 | """ 108 | 4) evaluating the plan (Evaluation class) 109 | The Evaluation class offers a set of methods for quantifying the optimized plan. 110 | If you need to compute individual dose volume metrics, you can use methods such as *get_dose* or *get_volume*. 111 | Furthermore, the class also facilitates the assessment of the plan based on a collection of metrics, 112 | such as mean, max, and dose-volume histogram (DVH), as specified in the clinical protocol. This capability is demonstrated below 113 | """ 114 | 115 | # visualize plan metrics based upon clinical criteria 116 | pp.Evaluation.display_clinical_criteria(my_plan, dose_1d=[dose_1d_full_naive, dose_1d_full_rmr], sol_names=['Naive Sparsification', 'RMR Sparsification']) 117 | 118 | """ 119 | 5) saving and loading the plan for future use (utils) 120 | 121 | """ 122 | # Comment/Uncomment these lines to save and load the pickle file for plans and optimal solution from the directory 123 | pp.save_plan(my_plan, plan_name='my_plan.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 124 | pp.save_optimal_sol(sol_sparse_naive, sol_name='sol_sparse_naive.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 125 | pp.save_optimal_sol(sol_sparse_rmr, sol_name='sol_sparse_rmr.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 126 | # my_plan = pp.load_plan(plan_name='my_plan.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 127 | # sol = pp.load_optimal_sol(sol_name='sol_sparse.pkl', path=os.path.join(r'C:\temp', data.patient_id)) 128 | 129 | 130 | if __name__ == "__main__": 131 | matrix_sparse_only_rmr() -------------------------------------------------------------------------------- /images/Algorithm_RMR.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/Algorithm_RMR.png -------------------------------------------------------------------------------- /images/CompressRTPLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/CompressRTPLogo.png -------------------------------------------------------------------------------- /images/CompressRTPLogo2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/CompressRTPLogo2.PNG -------------------------------------------------------------------------------- /images/FluenceCompress.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/FluenceCompress.PNG -------------------------------------------------------------------------------- /images/LowDimRT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/LowDimRT.png -------------------------------------------------------------------------------- /images/RMR_NeurIPS_Paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/RMR_NeurIPS_Paper.pdf -------------------------------------------------------------------------------- /images/RMR_performance.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/RMR_performance.PNG -------------------------------------------------------------------------------- /images/RMR_vs_Naive.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/RMR_vs_Naive.PNG -------------------------------------------------------------------------------- /images/RMR_vs_Native.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/RMR_vs_Native.png -------------------------------------------------------------------------------- /images/RMR_vs_Others.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/RMR_vs_Others.png -------------------------------------------------------------------------------- /images/SLR.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/SLR.PNG -------------------------------------------------------------------------------- /images/SPlusL_Lung_Benefits.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/SPlusL_Lung_Benefits.png -------------------------------------------------------------------------------- /images/SPlusL_singular_values.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/SPlusL_singular_values.png -------------------------------------------------------------------------------- /images/Wavelet_Benefits.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PortPy-Project/CompressRTP/867ff1632026b9d25ae9ea84ca2a5612b2a076a9/images/Wavelet_Benefits.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | portpy>=1.0.3 2 | PyWavelets>=1.4.0 3 | scikit-learn>=1.5.2 4 | --------------------------------------------------------------------------------