├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── benchmarks └── random_qps.py ├── examples └── reluqpth-simple.py ├── reluqp ├── __init__.py ├── classes.py ├── reluqpth.py └── utils.py ├── results └── random_qp_benchmark copy.png └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | .vscode/ 163 | 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Robotic Exploration Lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ReLUQP-py 2 | 3 | This is a Python implementation of the ReLU-QP solver from the paper "[ReLU-QP: A GPU-Accelerated Quadratic Programming Solver for Model-Predictive Control](https://arxiv.org/abs/2311.18056)". 4 | A Julia implementation can be found [here](https://github.com/RoboticExplorationLab/ReLUQP.jl). 5 | 6 | 7 | ## Installation 8 | 9 | ```pip install -e .``` 10 | 11 | ## Usage 12 | 13 | Please see the example in the `examples` folder. 14 | 15 | ## Citation 16 | If you find this code useful, please consider citing our paper: 17 | ``` 18 | @inproceedings{bishop_relu-qp_2023, 19 | title = {{ReLU}-{QP}: A {GPU}-Accelerated Quadratic Programming Solver for Model-Predictive Control}, 20 | url = {http://arxiv.org/abs/2311.18056}, 21 | booktitle = {IEEE International Conference on Robotics and Automation}, 22 | author = {Bishop, Arun L. and Zhang, John Z. and Gurumurthy, Swaminathan and Tracy, Kevin and Manchester, Zachary}, 23 | year = {2024} 24 | } 25 | ``` 26 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RoboticExplorationLab/ReLUQP-py/f30914895b1e267ee14e4ce14281943b6d0dc02a/__init__.py -------------------------------------------------------------------------------- /benchmarks/random_qps.py: -------------------------------------------------------------------------------- 1 | """ 2 | benchmarking random QP 3 | """ 4 | 5 | import reluqp.reluqpth as reluqpth 6 | import reluqp.utils as utils 7 | import numpy as np 8 | import osqp 9 | from scipy import sparse 10 | import matplotlib.pyplot as plt 11 | import proxsuite 12 | import tqdm 13 | 14 | class Random_QP_benchmark(): 15 | def __init__(self) -> None: 16 | pass 17 | 18 | def reluqpth_solve(self, nx=10, n_eq=5, n_ineq=5, seed=1, tol=1e-4): 19 | H, g, A, l, u, x_sol = utils.rand_qp(nx=nx, n_eq=n_eq, n_ineq=n_ineq, seed=seed) 20 | model = reluqpth.ReLU_QP() 21 | model.setup(H=H, g=g, A=A, l=l, u=u, eps_abs=tol) 22 | results = model.solve() 23 | assert results.info.status == 'solved' 24 | return results.info.solve_time, results.x 25 | 26 | def osqp_solve(self, nx=10, n_eq=5, n_ineq=5, seed=1, tol=1e-4): 27 | H, g, A, l, u, x_sol = utils.rand_qp(nx=nx, n_eq=n_eq, n_ineq=n_ineq, seed=seed) 28 | model = osqp.OSQP() 29 | model.setup(P=sparse.csc_matrix(H), q=g, A=sparse.csc_matrix(A), l=l, u=u, eps_abs=tol, eps_rel=0, verbose=False) 30 | results = model.solve() 31 | assert results.info.status == 'solved' 32 | return results.info.solve_time, results.x 33 | 34 | def proxqp_solve(self, nx=10, n_eq=5, n_ineq=5, seed=1, tol=1e-4): 35 | H, g, A, l, u, x_sol = utils.rand_qp(nx=nx, n_eq=n_eq, n_ineq=n_ineq, seed=seed) 36 | model = proxsuite.proxqp.dense.QP(nx, n_eq, n_ineq) 37 | model.settings.eps_abs = tol 38 | model.settings.eps_rel = 0 39 | model.settings.verbose = False 40 | model.settings.compute_timings= True 41 | model.settings.initial_guess = proxsuite.proxqp.NO_INITIAL_GUESS 42 | model.init(H, g, A[:n_eq], l[:n_eq], A[n_eq:], l[n_eq:], u[n_eq:]) 43 | model.solve() 44 | 45 | return model.results.info.run_time/1e6, model.results.x 46 | 47 | def random_initial_solve(self, nx_min=10, nx_max=1000, n_sample=10, n_seeds=10, n_trials=1, tol=1e-4): 48 | nx_list = np.geomspace(nx_min, nx_max, num=n_sample) 49 | timing_dict = dict(nx_list=nx_list, osqp_mean=[], osqp_std=[], 50 | reluqpth_mean=[], reluqpth_std=[], 51 | proxqp_mean=[], proxqp_std=[]) 52 | 53 | # make sure reluqp is compiled 54 | for _ in range(10): 55 | _, _ = self.reluqpth_solve() 56 | 57 | for nx in nx_list: 58 | reluqpth_times = [] 59 | osqp_times = [] 60 | proxqp_times = [] 61 | print("nx: ", int(nx)) 62 | 63 | for seed in tqdm.tqdm(range(n_seeds)): 64 | reluqpth_solve_time, reluqpth_sol = self.reluqpth_solve(nx=int(nx), n_eq=int(nx/4), n_ineq=int(nx/4), seed=seed, tol=tol) 65 | osqp_solve_time, osqp_sol = self.osqp_solve(nx=int(nx), n_eq=int(nx/4), n_ineq=int(nx/4), seed=seed, tol=tol) 66 | proxqp_solve_time, proxqp_sol = self.proxqp_solve(nx=int(nx), n_eq=int(nx/4), n_ineq=int(nx/4), seed=seed, tol=tol) 67 | 68 | assert np.linalg.norm(reluqpth_sol.cpu().detach().numpy() - osqp_sol, ord=np.inf) < tol 69 | 70 | reluqpth_times.append(reluqpth_solve_time) 71 | osqp_times.append(osqp_solve_time) 72 | proxqp_times.append(proxqp_solve_time) 73 | 74 | timing_dict["osqp_mean"].append(np.mean(osqp_times)) 75 | timing_dict["osqp_std"].append(np.std(osqp_times)) 76 | timing_dict["reluqpth_mean"].append(np.mean(reluqpth_times)) 77 | timing_dict["reluqpth_std"].append(np.std(reluqpth_times)) 78 | timing_dict["proxqp_mean"].append(np.mean(proxqp_times)) 79 | timing_dict["proxqp_std"].append(np.std(proxqp_times)) 80 | 81 | self.plot_timing_results(timing_dict) 82 | 83 | def plot_timing_results(self, timing_dict): 84 | plt.style.use("ggplot") 85 | fig, ax = plt.subplots() 86 | ax.set_yscale("log") 87 | ax.set_xscale("log") 88 | # ax.plot(timing_dict['nx_list'], timing_dict['reluqpth_mean'], label='reluqpth') 89 | # ax.plot(timing_dict['nx_list'], timing_dict['osqp_mean'], label='osqp') 90 | ax.errorbar(timing_dict['nx_list'], timing_dict['reluqpth_mean'], yerr=timing_dict['reluqpth_std'], label='reluqpth') 91 | ax.errorbar(timing_dict['nx_list'], timing_dict['osqp_mean'], yerr=timing_dict['osqp_std'], label='osqp') 92 | ax.errorbar(timing_dict['nx_list'], timing_dict['proxqp_mean'], yerr=timing_dict['proxqp_std'], label='proxqp') 93 | ax.set_xlabel('problem size') 94 | ax.set_ylabel('solve time (s)') 95 | ax.legend() 96 | # plt.show() 97 | plt.savefig("results/random_qp_benchmark.png") 98 | # plt.plot(timing_dict['nx_list'], timing_dict['reluqpth'], label='reluqpth') 99 | # plt.plot(timing_dict['nx_list'], timing_dict['osqp'], label='osqp') 100 | # plt.xlabel('problem index') 101 | # plt.ylabel('solve time (s)') 102 | # plt.legend() 103 | # plt.show() 104 | 105 | if __name__ == "__main__": 106 | benchmark = Random_QP_benchmark() 107 | 108 | benchmark.random_initial_solve(nx_min=10, nx_max=500, n_sample=10, n_seeds=5, tol=1e-6) 109 | 110 | -------------------------------------------------------------------------------- /examples/reluqpth-simple.py: -------------------------------------------------------------------------------- 1 | import reluqp.reluqpth as reluqp 2 | import reluqp.utils as utils 3 | 4 | if __name__ == '__main__': 5 | nx = 10 6 | n_eq = 5 7 | n_ineq = 5 8 | H, g, A, l, u, x_sol = utils.rand_qp(nx=nx, n_eq=n_eq, n_ineq=n_ineq) 9 | 10 | model = reluqp.ReLU_QP() 11 | model.setup(H, g, A, l, u) 12 | results = model.solve() 13 | 14 | print(results.info.status) 15 | print(results.x) 16 | 17 | -------------------------------------------------------------------------------- /reluqp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RoboticExplorationLab/ReLUQP-py/f30914895b1e267ee14e4ce14281943b6d0dc02a/reluqp/__init__.py -------------------------------------------------------------------------------- /reluqp/classes.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | class QP(object): 5 | def __init__(self, H: torch.tensor or np.ndarray, 6 | g: torch.tensor or np.ndarray, 7 | A: torch.tensor or np.ndarray, 8 | l: torch.tensor or np.ndarray, u: torch.tensor or np.ndarray, 9 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu"), precision=torch.double): 10 | 11 | # convert to torch tensors if it's numpy array 12 | if isinstance(H, np.ndarray): 13 | H = torch.from_numpy(H) 14 | if isinstance(g, np.ndarray): 15 | g = torch.from_numpy(g) 16 | if isinstance(A, np.ndarray): 17 | A = torch.from_numpy(A) 18 | if isinstance(l, np.ndarray): 19 | l = torch.from_numpy(l) 20 | if isinstance(u, np.ndarray): 21 | u = torch.from_numpy(u) 22 | 23 | self.H = H.to(device=device, dtype=precision).contiguous() 24 | self.g = g.to(device=device, dtype=precision).contiguous() 25 | self.A = A.to(device=device, dtype=precision).contiguous() 26 | self.l = l.to(device=device, dtype=precision).contiguous() 27 | self.u = u.to(device=device, dtype=precision).contiguous() 28 | 29 | self.nx = H.shape[0] # number of decision variables 30 | self.nc = A.shape[0] # number of constraints 31 | 32 | class Settings(object): 33 | def __init__(self, verbose=False, 34 | warm_starting=True, 35 | scaling=False, #todo: implement scaling 36 | rho=0.1, 37 | rho_min=1e-6, 38 | rho_max=1e6, 39 | sigma=1e-6, 40 | adaptive_rho=True, 41 | adaptive_rho_interval=1, 42 | adaptive_rho_tolerance=5, 43 | max_iter=4000, 44 | eps_abs=1e-3, 45 | eq_tol=1e-6, 46 | check_interval=25, 47 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu"), 48 | precision= torch.float64): 49 | 50 | self.verbose = verbose 51 | self.warm_starting = warm_starting 52 | self.scaling = scaling 53 | self.rho = rho 54 | self.rho_min = rho_min 55 | self.rho_max = rho_max 56 | self.sigma = sigma 57 | self.adaptive_rho = adaptive_rho 58 | self.adaptive_rho_interval = adaptive_rho_interval 59 | self.adaptive_rho_tolerance = adaptive_rho_tolerance 60 | self.max_iter = max_iter 61 | self.eps_abs = eps_abs 62 | self.eq_tol = eq_tol 63 | self.check_interval = check_interval 64 | self.device = device 65 | self.precision = precision 66 | 67 | class Info(object): 68 | def __init__(self, iter=None, 69 | status=None, 70 | obj_val=None, 71 | pri_res=None, 72 | dua_res=None, 73 | setup_time=0, 74 | solve_time=0, 75 | update_time=0, 76 | run_time=0, 77 | rho_estimate=None, 78 | ): 79 | self.iter = iter 80 | self.status = status 81 | self.obj_val = obj_val 82 | self.pri_res = pri_res 83 | self.dua_res = dua_res 84 | self.setup_time = setup_time 85 | self.solve_time = solve_time 86 | self.update_time = update_time 87 | self.run_time = run_time 88 | self.rho_estimate = rho_estimate 89 | 90 | 91 | class Results(object): 92 | def __init__(self, x=None, z=None, lam=None, info: Info=None): 93 | self.x = x 94 | self.z = z 95 | self.lam = lam 96 | self.info = info 97 | 98 | -------------------------------------------------------------------------------- /reluqp/reluqpth.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import numpy as np 4 | from reluqp.classes import Settings, Results, Info, QP 5 | # from utils import * 6 | import timeit 7 | # adopted from swami's implementation 8 | class ReLU_Layer(torch.nn.Module): 9 | def __init__(self, QP=None, settings=Settings()): 10 | super(ReLU_Layer, self).__init__() 11 | 12 | torch.set_default_dtype(settings.precision) 13 | self.QP = QP 14 | self.settings = settings 15 | self.rhos = self.setup_rhos() 16 | 17 | self.W_ks, self.B_ks, self.b_ks = self.setup_matrices() 18 | self.clamp_inds = (self.QP.nx, self.QP.nx + self.QP.nc) 19 | 20 | def setup_rhos(self): 21 | """ 22 | Setup rho values for ADMM 23 | """ 24 | stng = self.settings 25 | rhos = [stng.rho] 26 | if stng.adaptive_rho: 27 | rho = stng.rho/stng.adaptive_rho_tolerance 28 | while rho >= stng.rho_min: 29 | rhos.append(rho) 30 | rho = rho/stng.adaptive_rho_tolerance 31 | rho = stng.rho*stng.adaptive_rho_tolerance 32 | while rho <= stng.rho_max: 33 | rhos.append(rho) 34 | rho = rho*stng.adaptive_rho_tolerance 35 | rhos.sort() 36 | # conver to torch tensor 37 | rhos = torch.tensor(rhos, device=stng.device, dtype=stng.precision).contiguous() 38 | return rhos 39 | 40 | def setup_matrices(self): 41 | """ 42 | Setup ADMM matrices for ReLU-QP solver for each rho 43 | """ 44 | # unpack values 45 | H, g, A, l, u = self.QP.H, self.QP.g, self.QP.A, self.QP.l, self.QP.u 46 | nx, nc = self.QP.nx, self.QP.nc 47 | sigma = self.settings.sigma 48 | stng = self.settings 49 | 50 | # Calculate kkt_rhs_invs 51 | kkt_rhs_invs = [] 52 | for rho_scalar in self.rhos: 53 | rho = rho_scalar * torch.ones(nc).to(g) 54 | rho[(u - l) <= stng.eq_tol] = rho_scalar * 1e3 55 | rho = torch.diag(rho) 56 | kkt_rhs_invs.append(torch.inverse(H + sigma * torch.eye(nx).to(g) + A.T @ (rho @ A))) 57 | 58 | W_ks = {} 59 | B_ks = {} 60 | b_ks = {} 61 | 62 | # Other layer updates for each rho 63 | for rho_ind, rho_scalar in enumerate(self.rhos): 64 | rho = rho_scalar * torch.ones(nc, device=stng.device, dtype=stng.precision).contiguous() 65 | rho[(u - l) <= stng.eq_tol] = rho_scalar * 1e3 66 | rho_inv = torch.diag(1.0 / rho) 67 | rho = torch.diag(rho).to(device=stng.device, dtype=stng.precision).contiguous() 68 | K = kkt_rhs_invs[rho_ind] 69 | Ix = torch.eye(nx, device=stng.device, dtype=stng.precision).contiguous() 70 | Ic = torch.eye(nc, device=stng.device, dtype=stng.precision).contiguous() 71 | W_ks[rho_ind] = torch.cat([ 72 | torch.cat([ K @ (sigma * Ix - A.T @ (rho @ A)), 2 * K @ A.T @ rho, -K @ A.T], dim=1), 73 | torch.cat([ A @ K @ (sigma * Ix - A.T @ (rho @ A)) + A, 2 * A @ K @ A.T @ rho - Ic, -A @ K @ A.T + rho_inv], dim=1), 74 | torch.cat([ rho @ A, -rho, Ic], dim=1) 75 | ], dim=0).contiguous() 76 | B_ks[rho_ind] = torch.cat([-K, -A @ K, torch.zeros(nc, nx).to(g)], dim=0).contiguous() 77 | b_ks[rho_ind] = (B_ks[rho_ind] @ g).contiguous() 78 | return W_ks, B_ks, b_ks 79 | 80 | def forward(self, input, idx): 81 | input = self.jit_forward(input, self.W_ks[idx], self.b_ks[idx], self.QP.l, self.QP.u, self.clamp_inds[0], self.clamp_inds[1]) 82 | return input 83 | 84 | @torch.jit.script 85 | def jit_forward(input, W, b, l, u, idx1: int, idx2: int): 86 | torch.matmul(W, input, out=input) 87 | input.add_(b) 88 | input[idx1:idx2].clamp_(l, u) 89 | return input 90 | 91 | 92 | class ReLU_QP(object): 93 | def __init__(self): 94 | super().__init__() 95 | 96 | self.info = Info() 97 | self.results = Results(info=self.info) 98 | 99 | self.start = torch.cuda.Event(enable_timing=True) 100 | self.end = torch.cuda.Event(enable_timing=True) 101 | 102 | def setup(self, H, g, A, l, u, 103 | verbose=False, 104 | warm_starting=True, 105 | scaling=False, #todo: implement scaling 106 | rho=0.1, 107 | rho_min=1e-6, 108 | rho_max=1e6, 109 | sigma=1e-6, 110 | adaptive_rho=True, 111 | adaptive_rho_interval=1, 112 | adaptive_rho_tolerance=5, 113 | max_iter=4000, 114 | eps_abs=1e-3, 115 | check_interval=25, 116 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu"), 117 | precision= torch.float64): 118 | """ 119 | Setup ReLU-QP solver problem of the form 120 | 121 | minimize 1/2 x' * H * x + g' * x 122 | subject to l <= A * x <= u 123 | 124 | solver settings can be specified as additional keyword arguments 125 | """ 126 | self.start.record() 127 | 128 | self.settings = Settings(verbose=verbose, 129 | warm_starting=warm_starting, 130 | scaling=scaling, 131 | rho=rho, 132 | rho_min=rho_min, 133 | rho_max=rho_max, 134 | sigma=sigma, 135 | adaptive_rho=adaptive_rho, 136 | adaptive_rho_interval=adaptive_rho_interval, 137 | adaptive_rho_tolerance=adaptive_rho_tolerance, 138 | max_iter=max_iter, 139 | eps_abs=eps_abs, 140 | check_interval=check_interval, 141 | device=device, 142 | precision=precision) 143 | 144 | self.QP = QP(H, g, A, l, u) 145 | 146 | self.layers = ReLU_Layer(QP=self.QP, settings=self.settings) 147 | 148 | self.x = torch.zeros(self.QP.nx).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 149 | self.z = torch.zeros(self.QP.nc).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 150 | self.lam = torch.zeros(self.QP.nc).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 151 | self.output = torch.cat([self.x, self.z, self.lam]).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 152 | 153 | self.rho_ind = np.argmin(np.abs(self.layers.rhos.cpu().detach().numpy() - self.settings.rho)) 154 | 155 | self.end.record() 156 | torch.cuda.synchronize() 157 | self.results.info.setup_time = self.start.elapsed_time(self.end)/1000.0 158 | 159 | def update(self, g=None, l=None, u=None, 160 | Hx=None, Ax=None): 161 | """ 162 | Update ReLU-QP problem arguments 163 | """ 164 | self.start.record() 165 | # todo update vectors 166 | if g is not None: 167 | self.QP.g = torch.from_numpy(g).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 168 | for (i, rho) in enumerate(self.layers.rhos): 169 | self.layers.b_ks[i] = self.layers.B_ks[i] @ self.QP.g 170 | 171 | if l is not None: 172 | self.QP.l = torch.from_numpy(l).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 173 | if u is not None: 174 | self.QP.u = torch.from_numpy(u).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 175 | 176 | # assert that matrices cannot be changed for now 177 | assert Hx is None and Ax is None, "updating Hx and Ax is not supported yet" 178 | 179 | self.end.record() 180 | torch.cuda.synchronize() 181 | self.results.info.update_time = self.start.elapsed_time(self.end)/1000.0 182 | 183 | return None 184 | 185 | def update_settings(self, **kwargs): 186 | """ 187 | Update ReLU-QP solver settings 188 | 189 | It is possible to change: 'max_iter', 'eps_abs', 190 | 'verbose', 191 | 'check_interval', 192 | """ 193 | for key, value in kwargs.items(): 194 | if key in ["max_iter", "eps_ab", "verbose", "check_interval"]: 195 | setattr(self.settings, key, value) 196 | elif key in ["rho", "rho_min", "rho_max", "sigma", "adaptive_rho", "adaptive_rho_interval", "adaptive_rho_tolerance"]: 197 | raise ValueError("Cannot change {} after setup".format(key)) 198 | else: 199 | raise ValueError("Invalid setting: {}".format(key)) 200 | 201 | def solve(self): 202 | """ 203 | Solve QP Problem 204 | """ 205 | self.start.record() 206 | 207 | stng = self.settings 208 | nx, nc = self.QP.nx, self.QP.nc 209 | 210 | # rho = torch.tensor(self.layers.rhos[self.rho_ind]).to(self.settings.device).to(self.settings.precision).contiguous() 211 | rho = self.layers.rhos[self.rho_ind] 212 | 213 | # gpu_soln = torch.cat([self.x, self.z, self.lam]).to(self.settings.device).to(self.settings.precision).contiguous() 214 | for k in range(1, stng.max_iter + 1): 215 | self.output = self.layers(self.output, self.rho_ind) 216 | # self.x, self.z, self.lam = gpu_soln[:nx], gpu_soln[nx:nx+nc], gpu_soln[nx+nc:nx+2*nc] 217 | # rho update 218 | if k % stng.check_interval == 0 and stng.adaptive_rho: 219 | self.x, self.z, self.lam = self.output[:nx], self.output[nx:nx+nc], self.output[nx+nc:nx+2*nc] 220 | primal_res, dual_res, rho = self.compute_residuals(self.QP.H, 221 | self.QP.A, self.QP.g, self.x, self.z, self.lam, rho, stng.rho_min, stng.rho_max) 222 | 223 | if rho > self.layers.rhos[self.rho_ind] * stng.adaptive_rho_tolerance and self.rho_ind < len(self.layers.rhos) - 1: 224 | self.rho_ind += 1 225 | self.rho_ind = min(self.rho_ind, len(self.layers.rhos) - 1) 226 | elif rho < self.layers.rhos[self.rho_ind] / stng.adaptive_rho_tolerance and self.rho_ind > 0: 227 | self.rho_ind -= 1 228 | 229 | if stng.verbose: 230 | print('Iter: {}, rho: {:.2e}, res_p: {:.2e}, res_d: {:.2e}'.format(k, rho, primal_res, dual_res)) 231 | 232 | # check convergence 233 | if primal_res < stng.eps_abs * np.sqrt(nc) and dual_res < stng.eps_abs * np.sqrt(nx): 234 | 235 | self.update_results(iter=k, 236 | status="solved", 237 | pri_res=primal_res, 238 | dua_res=dual_res, 239 | rho_estimate=rho) 240 | 241 | return self.results 242 | 243 | primal_res, dual_res, rho = self.compute_residuals(self.QP.H, self.QP.A, self.QP.g, self.x, self.z, self.lam, rho, stng.rho_min, stng.rho_max) 244 | self.update_results(iter=stng.max_iter, 245 | status="max_iters_reached", 246 | pri_res=primal_res, 247 | dua_res=dual_res, 248 | rho_estimate=rho) 249 | return self.results 250 | 251 | def warm_start(self, x:torch.tensor or np.ndarray = None, 252 | z:torch.tensor or np.ndarray = None, 253 | lam:torch.tensor or np.ndarray = None, 254 | rho:float = None): 255 | """ 256 | Warm start primal or dual variables, lagrange multipliers, and rho 257 | """ 258 | if x is not None: 259 | if isinstance(x, np.ndarray): 260 | x = torch.from_numpy(x) 261 | self.x = x.to(device=self.settings.device, dtype=self.settings.precision).contiguous() 262 | 263 | if z is not None: 264 | if isinstance(z, np.ndarray): 265 | z = torch.from_numpy(z) 266 | self.z = z.to(device=self.settings.device, dtype=self.settings.precision).contiguous() 267 | 268 | if lam is not None: 269 | if isinstance(lam, np.ndarray): 270 | lam = torch.from_numpy(lam) 271 | self.lam = lam.to(device=self.settings.device, dtype=self.settings.precision).contiguous() 272 | 273 | if rho is not None: 274 | self.rho_ind = np.argmin(np.abs(self.layers.rhos.cpu().detach().numpy() - rho)) 275 | 276 | return None 277 | 278 | def update_results(self, iter=None, 279 | status=None, 280 | pri_res=None, 281 | dua_res=None, 282 | rho_estimate=None): 283 | """ 284 | Update results and info 285 | """ 286 | 287 | self.results.x = self.x 288 | self.results.z = self.z 289 | self.results.lam = self.lam 290 | 291 | self.results.info.iter = iter 292 | self.results.info.status = status 293 | self.results.info.obj_val = self.compute_J(H=self.QP.H, g=self.QP.g, x=self.x) 294 | self.results.info.pri_res = pri_res 295 | self.results.info.dua_res = dua_res 296 | self.results.info.rho_estimate = rho_estimate 297 | # self.info.update_time = update_time #todo: implement in update method 298 | self.end.record() 299 | torch.cuda.synchronize() 300 | run_time = self.start.elapsed_time(self.end)/1000.0 301 | 302 | self.results.info.run_time = run_time 303 | self.results.info.solve_time = self.results.info.update_time + run_time 304 | self.lam = torch.zeros(self.QP.nc).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 305 | if not self.settings.warm_starting: 306 | self.clear_primal_dual() 307 | 308 | @torch.jit.script 309 | def compute_residuals(H, A, g, x, z, lam, rho, rho_min: float, rho_max: float): 310 | t1 = torch.matmul(A, x) 311 | t2 = torch.matmul(H, x) 312 | t3 = torch.matmul(A.T, lam) 313 | 314 | primal_res = torch.linalg.vector_norm(t1 - z, ord=torch.inf) 315 | dual_res = torch.linalg.vector_norm(t2 + t3 + g, ord=torch.inf) 316 | numerator = torch.div(primal_res, torch.max(torch.linalg.vector_norm(t1, ord=torch.inf), torch.linalg.vector_norm(z, ord=torch.inf))) 317 | denom = torch.div(dual_res, torch.max(torch.max(torch.linalg.vector_norm(t2, ord=torch.inf), torch.linalg.vector_norm(t3, ord=torch.inf)), torch.linalg.vector_norm(g, ord=torch.inf))) 318 | rho = torch.clamp(rho * torch.sqrt(numerator / denom), rho_min, rho_max) 319 | return primal_res, dual_res, rho 320 | 321 | @torch.jit.script 322 | def compute_J(H=None, g=None, x=None): 323 | return 0.5*torch.dot(x,torch.matmul(H,x)) + torch.dot(g,x) 324 | 325 | def clear_primal_dual(self): 326 | """ 327 | Clear primal and dual variables and reset rho index 328 | """ 329 | self.x = torch.zeros(self.QP.nx).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 330 | self.z = torch.zeros(self.QP.nc).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 331 | self.lam = torch.zeros(self.QP.nc).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 332 | self.output = torch.cat([self.x, self.z, self.lam]).to(device=self.settings.device, dtype=self.settings.precision).contiguous() 333 | self.rho_ind = np.argmin(np.abs(self.layers.rhos.cpu().detach().numpy() - self.settings.rho)) 334 | return None 335 | 336 | # todo: implement scaling 337 | # todo: better verbose printing 338 | 339 | if __name__ == "__main__": 340 | # test on simple QP 341 | # min 1/2 x' * H * x + g' * x 342 | # s.t. l <= A * x <= u 343 | H = torch.tensor([[6, 2, 1], [2, 5, 2], [1, 2, 4.0]], dtype=torch.double) 344 | g = torch.tensor([-8.0, -3, -3], dtype=torch.double) 345 | A = torch.tensor([[1, 0, 1], [0, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=torch.double) 346 | l = torch.tensor([3.0, 0, -10.0, -10, -10], dtype=torch.double) 347 | u = torch.tensor([3.0, 0, torch.inf, torch.inf, torch.inf], dtype=torch.double) 348 | 349 | 350 | for i in range(10): 351 | qp = ReLU_QP() 352 | qp.setup(H=H, g=g, A=A, l=l, u=u) 353 | results = qp.solve() 354 | print("setup time: ", results.info.setup_time) 355 | print("solve time: ", results.info.solve_time) 356 | 357 | qp = ReLU_QP() 358 | qp.setup(H=H, g=g, A=A, l=l, u=u) 359 | results = qp.solve() 360 | 361 | assert torch.allclose(results.x.cpu(), torch.tensor([2.0, -1, 1], dtype=torch.float64)) 362 | # print(results['x']) 363 | print("Test passed!") 364 | print(results.x) 365 | print(results.info.solve_time) 366 | print(results.info.setup_time) 367 | print(results.info.iter) 368 | print(results.info.status) 369 | 370 | print(timeit.timeit(lambda: qp.solve(), number = 1000, globals=globals())/1000) 371 | pass 372 | 373 | 374 | -------------------------------------------------------------------------------- /reluqp/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cvxpy as cp 3 | 4 | # lazy randn 5 | def randn(*dims): 6 | return np.random.randn(*dims) 7 | 8 | def rand(*dims): 9 | return np.random.rand(*dims) 10 | 11 | def rand_qp(nx=10, n_eq=5, n_ineq=5, seed=1, compute_sol=True): 12 | np.random.seed(seed) 13 | H = randn(nx, nx) 14 | H = H.T @ H + np.eye(nx) 15 | H = H + H.T 16 | 17 | A = randn(n_eq, nx) 18 | C = randn(n_ineq, nx) 19 | 20 | active_ineq = randn(n_ineq) > 0.5 21 | 22 | mu = randn(n_eq) 23 | lamb = (randn(n_ineq))*active_ineq 24 | 25 | x = randn(nx) 26 | b = A@x 27 | d = C@x - randn(n_ineq)*(~active_ineq) 28 | 29 | g = -H@x - A.T@mu - C.T@lamb 30 | 31 | if compute_sol: 32 | x = cp.Variable(nx) 33 | prob = cp.Problem(cp.Minimize((1/2)*cp.quad_form(x, np.array(H)) + g.T@x), [A@x == b, C@x >= d]) 34 | prob.solve() 35 | return (H, g, np.vstack((A, C)), np.concatenate((b, d)), 36 | np.concatenate((b, np.full(n_ineq, np.inf))), x.value) 37 | else: 38 | return (H, g, np.vstack((A, C)), np.concatenate((b, d)), 39 | np.concatenate((b, np.full(n_ineq, np.inf))), None) 40 | 41 | 42 | def update_qp(H, A, n_eq, n_ineq, seed=1, compute_sol=True): 43 | """ 44 | Update the QP problem with vectors 45 | """ 46 | np.random.seed(seed) 47 | nx = H.shape[0] 48 | C = A[n_eq:] 49 | A = A[:n_eq] 50 | 51 | active_ineq = randn(n_ineq) > 0.5 52 | mu = randn(n_eq) 53 | lamb = (randn(n_ineq))*active_ineq 54 | 55 | x = randn(nx) 56 | b = A@x 57 | d = C@x - randn(n_ineq)*(~active_ineq) 58 | 59 | g = -H@x - A.T@mu - C.T@lamb 60 | 61 | if compute_sol: 62 | x = cp.Variable(nx) 63 | prob = cp.Problem(cp.Minimize((1/2)*cp.quad_form(x, np.array(H)) + g.T@x), [A@x == b, C@x >= d]) 64 | prob.solve() 65 | 66 | return (H, g, np.vstack((A, C)), np.concatenate((b, d)), 67 | np.concatenate((b, np.full(n_ineq, np.inf))), x.value) 68 | else: 69 | return (H, g, np.vstack((A, C)), np.concatenate((b, d)), 70 | np.concatenate((b, np.full(n_ineq, np.inf))), None) 71 | -------------------------------------------------------------------------------- /results/random_qp_benchmark copy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RoboticExplorationLab/ReLUQP-py/f30914895b1e267ee14e4ce14281943b6d0dc02a/results/random_qp_benchmark copy.png -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='reluqp', # Replace with your package name 5 | version='1.0', 6 | packages=find_packages(), 7 | ) --------------------------------------------------------------------------------