├── paropt ├── __init__.pxd ├── cpp_headers │ └── __init__.pxd ├── README.md ├── mpi-compat.h ├── ParOpt.pxd ├── __init__.py ├── ParOptEig.pyx └── plot_history.py ├── lib └── README ├── docs ├── source │ ├── _static │ │ └── custom.css │ ├── reference.rst │ ├── openmdao_example.rst │ ├── options.rst │ ├── parallel_openmdao_example.rst │ ├── parallel_rosenbrock.rst │ ├── sellar.rst │ └── conf.py ├── ParOpt_theory_manual.pdf ├── Makefile └── refs.bib ├── conda ├── conda_build_config.yaml ├── build.sh └── meta.yaml ├── pyproject.toml ├── .github └── workflows │ ├── black.yml │ ├── clang-format-check.yml │ ├── deploy_conda_package.yml │ └── unit_tests.yml ├── src ├── ParOptAMD.h ├── Makefile ├── ParOptComplexStep.h ├── ParOptOptimizer.h ├── ParOptSparseUtils.h ├── ParOptVec.h ├── ParOptScaledQuasiNewton.h ├── ParOptBlasLapack.h ├── ParOptOptions.h ├── ParOptVec.cpp ├── ParOptSparseMat.h ├── ParOptSparseCholesky.h ├── ParOptMMA.h ├── ParOptOptimizer.cpp ├── ParOptQuasiNewton.h └── ParOptCompactEigenvalueApprox.h ├── examples ├── sellar │ ├── Makefile │ ├── sellar.py │ └── sellar.cpp ├── cholesky │ ├── Makefile │ └── cholesky.cpp ├── rosenbrock │ ├── Makefile │ ├── example.py │ ├── rosenbrock.py │ └── sparse_rosenbrock.cpp ├── openmdao │ ├── paraboloid_min.py │ ├── distrib_paraboloid.py │ └── simple.py ├── toy │ └── toy.py ├── dymos │ ├── brachistochrone │ │ └── brachistochrone.py │ └── simple │ │ └── simple.py ├── limited_memory_test │ └── limited_memory_test.py ├── reduced_problem │ └── reduced.py ├── maratos_effect │ └── maratos.py ├── plot_history │ └── ipopt_plot.py ├── sparse │ └── sparse_rosenbrock.py ├── random_convex │ └── random_convex.py ├── COPS │ └── electron │ │ └── electron.py └── random_quadratic │ └── random_quadratic.py ├── .gitignore ├── ParOpt_Common.mk ├── Makefile.in.info ├── tests └── regression_tests │ └── test_pvec.py ├── Makefile ├── README.md ├── INSTALL.txt └── setup.py /paropt/__init__.pxd: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /paropt/cpp_headers/__init__.pxd: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lib/README: -------------------------------------------------------------------------------- 1 | The ParOpt shared and static libraries go in this directory 2 | -------------------------------------------------------------------------------- /docs/source/_static/custom.css: -------------------------------------------------------------------------------- 1 | .wy-nav-content { 2 | max-width: 75% !important; 3 | } -------------------------------------------------------------------------------- /docs/ParOpt_theory_manual.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smdogroup/paropt/HEAD/docs/ParOpt_theory_manual.pdf -------------------------------------------------------------------------------- /conda/conda_build_config.yaml: -------------------------------------------------------------------------------- 1 | mpi: 2 | - mpich 3 | - openmpi 4 | 5 | scalar: 6 | - real 7 | - complex 8 | 9 | python: 10 | - 3.7 11 | - 3.8 12 | - 3.9 13 | - 3.10 14 | - 3.11 15 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | #pyproject.toml 2 | [build-system] 3 | # Minimum requirements for the build system to execute. 4 | requires = ['setuptools>=45.0', 'wheel', 'cython>=3.0.0', 'numpy>=2.0', 'mpi4py>=3.1.1'] 5 | -------------------------------------------------------------------------------- /.github/workflows/black.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | 8 | jobs: 9 | lint: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - uses: psf/black@stable 14 | -------------------------------------------------------------------------------- /src/ParOptAMD.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_AMD_H 2 | #define PAR_OPT_AMD_H 3 | 4 | /* 5 | Comptute the AMD reordering of the variables. 6 | 7 | Note: cols is overwritten 8 | */ 9 | void ParOptAMD(int nvars, int *rowp, int *cols, int *perm, 10 | int use_exact_degree); 11 | 12 | #endif // PAR_OPT_AMD_H -------------------------------------------------------------------------------- /examples/sellar/Makefile: -------------------------------------------------------------------------------- 1 | include ../../Makefile.in 2 | include ../../ParOpt_Common.mk 3 | 4 | default: sellar.o 5 | ${CXX} ${CCFLAGS} -o sellar sellar.o ${PAROPT_LD_FLAGS} 6 | 7 | debug: CCFLAGS=${CCFLAGS_DEBUG} 8 | debug: default 9 | 10 | complex: CCFLAGS+=-DPAROPT_USE_COMPLEX 11 | complex: default 12 | 13 | clean: 14 | ${RM} sellar *.o 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # use glob syntax. 2 | syntax: glob 3 | *~ 4 | *.o 5 | *.so 6 | *.out 7 | *.pdf 8 | *.png 9 | *.dat 10 | *.pyc 11 | *.aux 12 | *.log 13 | *.tex 14 | *.pdf 15 | *.lay 16 | *.phy 17 | *.f5 18 | *.plt 19 | *.out_summary 20 | paropt/*.cpp 21 | paropt/*.pxi 22 | *.tr 23 | *.blg 24 | *.bbl 25 | .idea/* 26 | .vscode/* 27 | *.in 28 | *.pkl 29 | *.a 30 | .DS_Store 31 | -------------------------------------------------------------------------------- /examples/cholesky/Makefile: -------------------------------------------------------------------------------- 1 | include ../../Makefile.in 2 | include ../../ParOpt_Common.mk 3 | 4 | default: cholesky.o 5 | ${CXX} ${CCFLAGS} -o cholesky cholesky.o ${PAROPT_LD_FLAGS} 6 | 7 | debug: CCFLAGS=${CCFLAGS_DEBUG} 8 | debug: default 9 | 10 | complex: CCFLAGS=${CCFLAGS_DEBUG} -DPAROPT_USE_COMPLEX 11 | complex: default 12 | 13 | clean: 14 | ${RM} cholesky cholesky *.o 15 | -------------------------------------------------------------------------------- /paropt/README.md: -------------------------------------------------------------------------------- 1 | #Python interface to ParOpt 2 | 3 | This directory contains the python interface to ParOpt. The interface uses Cython with callbacks to python. 4 | 5 | Data passed back to python is done in place through numpy arrays. This makes the code efficient and avoids copying. However, be careful not to overwrite design variable values as this will directly modify the design variable arrays in ParOpt itself. -------------------------------------------------------------------------------- /examples/rosenbrock/Makefile: -------------------------------------------------------------------------------- 1 | include ../../Makefile.in 2 | include ../../ParOpt_Common.mk 3 | 4 | default: rosenbrock.o sparse_rosenbrock.o 5 | ${CXX} ${CCFLAGS} -o rosenbrock rosenbrock.o ${PAROPT_LD_FLAGS} 6 | ${CXX} ${CCFLAGS} -o sparse_rosenbrock sparse_rosenbrock.o ${PAROPT_LD_FLAGS} 7 | 8 | debug: CCFLAGS=${CCFLAGS_DEBUG} 9 | debug: default 10 | 11 | complex: CCFLAGS=${CCFLAGS_DEBUG} -DPAROPT_USE_COMPLEX 12 | complex: default 13 | 14 | clean: 15 | ${RM} rosenbrock sparse_rosenbrock *.o 16 | -------------------------------------------------------------------------------- /paropt/mpi-compat.h: -------------------------------------------------------------------------------- 1 | /* Author: Lisandro Dalcin */ 2 | /* Contact: dalcinl@gmail.com */ 3 | 4 | #ifndef MPI_COMPAT_H 5 | #define MPI_COMPAT_H 6 | 7 | #include 8 | 9 | #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) 10 | typedef void *PyMPI_MPI_Message; 11 | #define MPI_Message PyMPI_MPI_Message 12 | #endif 13 | 14 | #if (MPI_VERSION < 4) && !defined(PyMPI_HAVE_MPI_Session) 15 | typedef void *PyMPI_MPI_Session; 16 | #define MPI_Session PyMPI_MPI_Session 17 | #endif 18 | 19 | #endif /*MPI_COMPAT_H*/ 20 | -------------------------------------------------------------------------------- /.github/workflows/clang-format-check.yml: -------------------------------------------------------------------------------- 1 | name: clang-format Check 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | 8 | jobs: 9 | formatting-check: 10 | name: Formatting Check 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | path: 15 | - 'src' 16 | - 'examples' 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Run clang-format style check for C/C++ programs. 20 | uses: jidicula/clang-format-action@v4.8.0 21 | with: 22 | clang-format-version: '13' 23 | check-path: ${{ matrix.path }} 24 | fallback-style: 'Google' 25 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = source 8 | BUILDDIR = ../../paropt-docs 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/source/reference.rst: -------------------------------------------------------------------------------- 1 | Class reference 2 | =============== 3 | 4 | .. automodule:: paropt 5 | :members: 6 | 7 | .. autoclass:: paropt.paropt_driver.ParOptDriver 8 | :members: 9 | 10 | .. automodule:: paropt.ParOpt 11 | :members: 12 | 13 | .. autoclass:: paropt.ParOpt.Optimizer 14 | :members: 15 | 16 | .. autoclass:: paropt.ParOpt.PVec 17 | :members: 18 | 19 | .. autoclass:: paropt.ParOpt.InteriorPoint 20 | :members: 21 | 22 | .. autoclass:: paropt.ParOpt.TrustRegion 23 | :members: 24 | 25 | .. doxygenclass:: ParOptProblem 26 | :members: 27 | 28 | .. doxygenclass:: ParOptInteriorPoint 29 | :members: 30 | 31 | .. doxygenclass:: ParOptTrustRegion 32 | :members: -------------------------------------------------------------------------------- /paropt/ParOpt.pxd: -------------------------------------------------------------------------------- 1 | # distutils: language=c++ 2 | 3 | # For MPI capabilities 4 | from mpi4py.MPI cimport * 5 | cimport mpi4py.MPI as MPI 6 | 7 | # Import numpy 8 | import numpy as np 9 | cimport numpy as np 10 | 11 | # Import ParOpt c++ headers 12 | from paropt.cpp_headers.ParOpt cimport * 13 | 14 | cdef class PVec: 15 | cdef ParOptVec *ptr 16 | 17 | cdef inline _init_PVec(ParOptVec *ptr): 18 | vec = PVec() 19 | vec.ptr = ptr 20 | vec.ptr.incref() 21 | return vec 22 | 23 | cdef class CompactQuasiNewton: 24 | cdef ParOptCompactQuasiNewton *ptr 25 | 26 | cdef class ProblemBase: 27 | cdef ParOptProblem *ptr 28 | 29 | cdef class TrustRegionSubproblem(ProblemBase): 30 | cdef ParOptTrustRegionSubproblem *subproblem 31 | -------------------------------------------------------------------------------- /docs/source/openmdao_example.rst: -------------------------------------------------------------------------------- 1 | OpenMDAO example 2 | ================ 3 | 4 | This examples uses the paropt_driver for OpenMDAO to solve the following optimization problem: 5 | 6 | .. math:: 7 | 8 | \begin{align} 9 | \text{min} \qquad & (x-3)^2 + xy + (y+4)^2 - 3 \\ 10 | \text{with respect to} \qquad & -50 \le x, y \le 50 \\ 11 | \text{subject to} \qquad & x + y \ge 0 \\ 12 | \end{align} 13 | 14 | Python implementation 15 | --------------------- 16 | 17 | The python implementation of this problem is as follows 18 | 19 | .. literalinclude:: ../../examples/openmdao/paraboloid_min.py 20 | :language: python 21 | 22 | This code results in the output: 23 | 24 | :: 25 | 26 | Minimum value = -27.00 27 | (x, y) = (7.00, -7.00) 28 | 29 | -------------------------------------------------------------------------------- /ParOpt_Common.mk: -------------------------------------------------------------------------------- 1 | 2 | # Set the linking files 3 | PAROPT_INCLUDE = -I${PAROPT_DIR}/src 4 | PAROPT_LIB = ${PAROPT_DIR}/lib/libparopt.a 5 | 6 | # Set the optimized/debug compile flags 7 | PAROPT_OPT_CC_FLAGS = ${CCFLAGS} ${PAROPT_INCLUDE} ${METIS_INCLUDE} 8 | PAROPT_DEBUG_CC_FLAGS = ${CCFLAGS_DEBUG} ${PAROPT_INCLUDE} ${METIS_INCLUDE} 9 | 10 | # Set the optimized flags to the default 11 | PAROPT_CC_FLAGS = ${PAROPT_OPT_CC_FLAGS} 12 | 13 | # Set the linking flags 14 | PAROPT_EXTERN_LIBS = ${LAPACK_LIBS} ${METIS_LIB} 15 | PAROPT_LD_FLAGS = ${PAROPT_LD_CMD} ${PAROPT_EXTERN_LIBS} 16 | 17 | # This is the one rule that is used to compile all the 18 | # source code in TACS 19 | %.o: %.cpp 20 | ${CXX} ${PAROPT_CC_FLAGS} -c $< -o $*.o 21 | @echo 22 | @echo " --- Compiled $*.cpp successfully ---" 23 | @echo 24 | -------------------------------------------------------------------------------- /src/Makefile: -------------------------------------------------------------------------------- 1 | include ../Makefile.in 2 | include ../ParOpt_Common.mk 3 | 4 | OBJS = ParOptOptions.o \ 5 | ParOptInteriorPoint.o \ 6 | ParOptVec.o \ 7 | ParOptQuasiNewton.o \ 8 | ParOptMMA.o \ 9 | ParOptTrustRegion.o \ 10 | ParOptProblem.o \ 11 | ParOptOptimizer.o \ 12 | ParOptSparseMat.o \ 13 | ParOptCompactEigenvalueApprox.o \ 14 | CyParOptProblem.o \ 15 | ParOptAMD.o \ 16 | ParOptSparseCholesky.o \ 17 | ParOptSparseUtils.o 18 | 19 | default: ${OBJS} 20 | ${AR} ${AR_FLAGS} ${PAROPT_LIB} ${OBJS} 21 | 22 | %.o: %.c 23 | ${CXX} ${CCFLAGS} -c $< -o $@ 24 | 25 | debug: CCFLAGS=${CCFLAGS_DEBUG} 26 | debug: default 27 | 28 | complex: CCFLAGS+=-DPAROPT_USE_COMPLEX 29 | complex: default 30 | 31 | complex_debug: CCFLAGS=${CCFLAGS_DEBUG} -DPAROPT_USE_COMPLEX 32 | complex_debug: default 33 | 34 | clean: 35 | rm -rf *.o 36 | -------------------------------------------------------------------------------- /Makefile.in.info: -------------------------------------------------------------------------------- 1 | # This is the Makefile.in file for the parallel optimizer. 2 | 3 | # This is the default location for ParOpt 4 | PAROPT_DIR=${HOME}/git/paropt 5 | PYTHON=python 6 | PIP=pip 7 | 8 | # Which compiler to use 9 | CXX = mpicxx 10 | 11 | # The C++ compiler flags 12 | CCFLAGS = -fPIC -O3 13 | CCFLAGS_DEBUG = -fPIC -g 14 | 15 | # Set the ar flags 16 | AR_FLAGS = rcs 17 | 18 | # ParOpt linking command 19 | PAROPT_LD_CMD=-L${PAROPT_DIR}/lib/ -Wl,-rpath,${PAROPT_DIR}/lib -lparopt 20 | 21 | # For linux systems, use the following settings: 22 | LAPACK_LIBS = -L/usr/lib -llapack -lblas 23 | SO_EXT=so 24 | SO_LINK_FLAGS=-fPIC -shared 25 | 26 | # For MAC OS X, use the following settings: 27 | # LAPACK_LIBS = -framework accelerate 28 | # SO_EXT=so 29 | # SO_LINK_FLAGS=-fPIC -dynamiclib -undefined dynamic_lookup 30 | 31 | # Flags for the METIS library 32 | METIS_INCLUDE = -I${METIS_DIR}/include/ 33 | METIS_LIB = ${METIS_DIR}/lib/libmetis.a -------------------------------------------------------------------------------- /src/ParOptComplexStep.h: -------------------------------------------------------------------------------- 1 | #ifndef PAROPT_COMPLEX_STEP_H 2 | #define PAROPT_COMPLEX_STEP_H 3 | 4 | #include 5 | 6 | /* 7 | Copyright (c) 2016 Graeme Kennedy. All rights reserved 8 | */ 9 | 10 | // Define the real part function for the complex data type 11 | inline double ParOptRealPart(const std::complex& c) { return real(c); } 12 | 13 | // Define the imaginary part function for the complex data type 14 | inline double ParOptImagPart(const std::complex& c) { return imag(c); } 15 | 16 | // Dummy function for real part 17 | inline double ParOptRealPart(const double& r) { return r; } 18 | 19 | // Compute the absolute value 20 | #ifndef FABS_COMPLEX_IS_DEFINED // prevent redefinition 21 | #define FABS_COMPLEX_IS_DEFINED 22 | inline std::complex fabs(const std::complex& c) { 23 | if (real(c) < 0.0) { 24 | return -c; 25 | } 26 | return c; 27 | } 28 | #endif // FABS_COMPLEX_IS_DEFINED 29 | 30 | #endif // PAROPT_COMPLEX_STEP_H 31 | -------------------------------------------------------------------------------- /paropt/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ParOpt is an interior point optimizer 3 | """ 4 | 5 | import os 6 | 7 | 8 | def get_cython_include(): 9 | """ 10 | Get the include directory for the Cython .pxd files in ParOpt 11 | """ 12 | return [os.path.abspath(os.path.dirname(__file__))] 13 | 14 | 15 | def get_include(): 16 | """ 17 | Get the include directory for the Cython .pxd files in ParOpt 18 | """ 19 | root_path, tail = os.path.split(os.path.abspath(os.path.dirname(__file__))) 20 | 21 | rel_inc_dirs = ["src"] 22 | 23 | inc_dirs = [] 24 | for path in rel_inc_dirs: 25 | inc_dirs.append(os.path.join(root_path, path)) 26 | 27 | return inc_dirs 28 | 29 | 30 | def get_libraries(): 31 | """ 32 | Get the library directories 33 | """ 34 | root_path, tail = os.path.split(os.path.abspath(os.path.dirname(__file__))) 35 | 36 | rel_lib_dirs = ["lib"] 37 | libs = ["paropt"] 38 | lib_dirs = [] 39 | for path in rel_lib_dirs: 40 | lib_dirs.append(os.path.join(root_path, path)) 41 | 42 | return lib_dirs, libs 43 | 44 | 45 | try: 46 | from paropt.plot_history import plot_history 47 | except: 48 | pass 49 | -------------------------------------------------------------------------------- /conda/build.sh: -------------------------------------------------------------------------------- 1 | export PAROPT_DIR=${SRC_DIR} 2 | 3 | if [[ $(uname) == Darwin ]]; then 4 | export SO_EXT="dylib" 5 | export SO_LINK_FLAGS="-fPIC -dynamiclib" 6 | export LIB_SLF="${SO_LINK_FLAGS} -install_name @rpath/libparopt.dylib" 7 | export LAPACK_LIBS="-framework accelerate" 8 | elif [[ "$target_platform" == linux-* ]]; then 9 | export SO_EXT="so" 10 | export SO_LINK_FLAGS="-fPIC -shared" 11 | export LIB_SLF="${SO_LINK_FLAGS}" 12 | export LAPACK_LIBS="-L${PREFIX}/lib/ -llapack -lpthread -lblas" 13 | fi 14 | 15 | if [[ $scalar == "complex" ]]; then 16 | export OPTIONAL="complex" 17 | export PIP_FLAGS="-DPAROPT_USE_COMPLEX" 18 | elif [[ $scalar == "real" ]]; then 19 | export OPTIONAL="default" 20 | fi 21 | 22 | cp Makefile.in.info Makefile.in; 23 | make ${OPTIONAL} PAROPT_DIR=${PAROPT_DIR} \ 24 | LAPACK_LIBS="${LAPACK_LIBS}" \ 25 | METIS_INCLUDE=-I${PREFIX}/include/ METIS_LIB="-L${PREFIX}/lib/ -lmetis" \ 26 | SO_LINK_FLAGS="${LIB_SLF}" SO_EXT=${SO_EXT}; 27 | mv ${PAROPT_DIR}/lib/libparopt.${SO_EXT} ${PREFIX}/lib; 28 | 29 | # Recursively copy all header files 30 | mkdir ${PREFIX}/include/paropt; 31 | find ${PAROPT_DIR}/src/ -name '*.h' -exec cp -prv '{}' ${PREFIX}/include/paropt ';' 32 | 33 | CFLAGS=${PIP_FLAGS} ${PYTHON} -m pip install --no-deps --prefix=${PREFIX} . -vv; 34 | -------------------------------------------------------------------------------- /examples/sellar/sellar.py: -------------------------------------------------------------------------------- 1 | from mpi4py import MPI 2 | import numpy as np 3 | from paropt import ParOpt 4 | 5 | 6 | # Create the rosenbrock function class 7 | class Sellar(ParOpt.Problem): 8 | def __init__(self): 9 | # Initialize the base class 10 | nvars = 4 11 | ncon = 1 12 | super(Sellar, self).__init__(MPI.COMM_SELF, nvars=nvars, ncon=ncon) 13 | 14 | return 15 | 16 | def getVarsAndBounds(self, x, lb, ub): 17 | """Set the values of the bounds""" 18 | 19 | x[0] = 2.0 20 | x[1] = 1.0 21 | x[2] = 0.0 22 | x[3] = 0.0 23 | 24 | lb[0] = 0.0 25 | lb[1] = 0.0 26 | lb[2] = -1.0 27 | lb[3] = -1.0 28 | 29 | ub[0] = 10.0 30 | ub[1] = 10.0 31 | ub[2] = 3.16 32 | ub[3] = 24.0 33 | return 34 | 35 | def evalObjCon(self, x): 36 | """Evaluate the objective and constraint""" 37 | fail = 0 38 | fobj = x[1] * x[1] + x[0] + x[2] + np.exp(-x[3]) 39 | cons = np.array([x[0] + x[1] - 1.0]) 40 | return fail, fobj, cons 41 | 42 | def evalObjConGradient(self, x, g, A): 43 | """Evaluate the objective and constraint gradient""" 44 | fail = 0 45 | 46 | g[0] = 1.0 47 | g[1] = 2.0 * x[1] 48 | g[2] = 1.0 49 | g[3] = -np.exp(-x[3]) 50 | 51 | A[0][0] = 1.0 52 | A[0][1] = 1.0 53 | 54 | return fail 55 | 56 | 57 | # Allocate the optimization problem 58 | problem = Sellar() 59 | 60 | # Set up the optimization problem 61 | options = {} 62 | opt = ParOpt.Optimizer(problem, options) 63 | opt.optimize() 64 | -------------------------------------------------------------------------------- /tests/regression_tests/test_pvec.py: -------------------------------------------------------------------------------- 1 | from paropt import ParOpt 2 | import unittest 3 | import numpy as np 4 | 5 | 6 | class Prob(ParOpt.Problem): 7 | """ 8 | A helper problem instance 9 | """ 10 | 11 | def __init__(self, comm, nvars, ncon): 12 | super().__init__(comm, nvars=nvars, ncon=ncon) 13 | 14 | 15 | class PVecTest(unittest.TestCase): 16 | N_PROCS = 2 # num of procs used 17 | 18 | def setUp(self): 19 | # Get rank and size 20 | self.rank = self.comm.rank 21 | self.size = self.comm.size 22 | 23 | # Create problem 24 | self.nvars = self.rank + 10 25 | ncon = 1 26 | self.prob = Prob(self.comm, self.nvars, ncon) 27 | 28 | return 29 | 30 | def test_setitem_list_list(self): 31 | # Create vector 32 | vec = self.prob.createDesignVec() 33 | 34 | # Populate vector 35 | indices = [i for i in range(self.nvars)] 36 | vals = np.random.rand(self.nvars) 37 | vec[indices] = vals 38 | 39 | # Test 40 | for i in indices: 41 | self.assertEqual(vec[i], vals[i]) 42 | 43 | def test_setitem_list_single(self): 44 | # Create vector 45 | vec = self.prob.createDesignVec() 46 | 47 | # Populate vector 48 | indices = [i for i in range(self.nvars)] 49 | vec[indices] = 1.23 50 | 51 | # Test 52 | for i in indices: 53 | self.assertEqual(vec[i], 1.23) 54 | 55 | def test_getitem_list(self): 56 | # Create vector 57 | vec = self.prob.createDesignVec() 58 | vec[:] = 1.23 59 | 60 | # Populate vector 61 | indices = [i for i in range(self.nvars)] 62 | 63 | # Test __getitem__ 64 | for i in range(self.nvars): 65 | self.assertEqual(vec[i], 1.23) 66 | -------------------------------------------------------------------------------- /examples/rosenbrock/example.py: -------------------------------------------------------------------------------- 1 | # Import some utilities 2 | import numpy as np 3 | import mpi4py.MPI as MPI 4 | import matplotlib.pyplot as plt 5 | 6 | # Import ParOpt 7 | from paropt import ParOpt 8 | 9 | 10 | # Create the rosenbrock function class 11 | class Rosenbrock(ParOpt.Problem): 12 | def __init__(self): 13 | # Set the communicator pointer 14 | self.comm = MPI.COMM_WORLD 15 | self.nvars = 2 16 | self.ncon = 1 17 | 18 | # The design history file 19 | self.x_hist = [] 20 | 21 | # Initialize the base class 22 | super(Rosenbrock, self).__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 23 | 24 | return 25 | 26 | def getVarsAndBounds(self, x, lb, ub): 27 | """Set the values of the bounds""" 28 | x[:] = -1.0 29 | lb[:] = -2.0 30 | ub[:] = 2.0 31 | return 32 | 33 | def evalObjCon(self, x): 34 | """Evaluate the objective and constraint""" 35 | # Append the point to the solution history 36 | self.x_hist.append(np.array(x)) 37 | 38 | # Evaluate the objective and constraints 39 | fail = 0 40 | con = np.zeros(1) 41 | fobj = 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2 42 | con[0] = x[0] + x[1] + 5.0 43 | return fail, fobj, con 44 | 45 | def evalObjConGradient(self, x, g, A): 46 | """Evaluate the objective and constraint gradient""" 47 | fail = 0 48 | 49 | # The objective gradient 50 | g[0] = 200 * (x[1] - x[0] ** 2) * (-2 * x[0]) - 2 * (1 - x[0]) 51 | g[1] = 200 * (x[1] - x[0] ** 2) 52 | 53 | # The constraint gradient 54 | A[0][0] = 1.0 55 | A[0][1] = 1.0 56 | return fail 57 | 58 | 59 | problem = Rosenbrock() 60 | 61 | options = {"algorithm": "ip"} 62 | opt = ParOpt.Optimizer(problem, options) 63 | opt.optimize() 64 | -------------------------------------------------------------------------------- /src/ParOptOptimizer.h: -------------------------------------------------------------------------------- 1 | #ifndef PAROPT_OPTIMIZER_H 2 | #define PAROPT_OPTIMIZER_H 3 | 4 | #include "ParOptInteriorPoint.h" 5 | #include "ParOptMMA.h" 6 | #include "ParOptTrustRegion.h" 7 | 8 | /* 9 | ParOptOptimizer is a generic interface to the optimizers that are 10 | avaialbe in the ParOpt library. 11 | 12 | These optimizers consist of the following: 13 | 1. ParOptInteriorPoint 14 | 2. ParOptTrustRegion 15 | 3. ParOptMMA 16 | 17 | ParOptTrustRegion and ParOptMMA use the interior point code to solve 18 | the optimization subproblems that are formed at each iteration. 19 | */ 20 | class ParOptOptimizer : public ParOptBase { 21 | public: 22 | ParOptOptimizer(ParOptProblem *_problem, ParOptOptions *_options); 23 | ~ParOptOptimizer(); 24 | 25 | // Get default optimization options 26 | static void addDefaultOptions(ParOptOptions *options); 27 | ParOptOptions *getOptions(); 28 | 29 | // Get the optimization problem class 30 | ParOptProblem *getProblem(); 31 | 32 | // Perform the optimization 33 | void optimize(); 34 | 35 | // Get the optimized point 36 | void getOptimizedPoint(ParOptVec **x, ParOptScalar **z, ParOptVec **zw, 37 | ParOptVec **zl, ParOptVec **zu); 38 | 39 | // Set the trust-region subproblem. This is required when non-standard 40 | // subproblems are used. This is an advanced feature this it not required 41 | // in most applications. 42 | void setTrustRegionSubproblem(ParOptTrustRegionSubproblem *_subproblem); 43 | 44 | private: 45 | // The problem instance 46 | ParOptProblem *problem; 47 | 48 | // The options 49 | ParOptOptions *options; 50 | 51 | // Store the optimizers 52 | ParOptInteriorPoint *ip; 53 | ParOptTrustRegion *tr; 54 | ParOptMMA *mma; 55 | 56 | // Specific object for the trust-region subproblem 57 | ParOptTrustRegionSubproblem *subproblem; 58 | }; 59 | 60 | #endif // PAROPT_OPTIMIZER_H 61 | -------------------------------------------------------------------------------- /docs/refs.bib: -------------------------------------------------------------------------------- 1 | 2 | @book{Fiacco:McCormick:1990, 3 | author = {Anthony V. Fiacco and Garth P. McCormick}, 4 | title = {Nonlinear Programming}, 5 | publisher = {Society for Industrial and Applied Mathematics}, 6 | year = {1990}, 7 | doi = {10.1137/1.9781611971316}} 8 | 9 | @book{Wright:1997:PD-IP-methods, 10 | title={Primal-dual interior-point methods}, 11 | author={Wright, Stephen J}, 12 | volume={54}, 13 | year={1997}, 14 | publisher={{SIAM}} 15 | } 16 | 17 | @book{Nocedal.Wright, 18 | title = {Numerical Optimization}, 19 | author = {Jorge Nocedal and Stephen J. Wright}, 20 | year = {2006}, 21 | edition={2nd}, 22 | place = {New York}, 23 | series={Springer Series in Operations Research and Financial Engineering}, 24 | publisher={Springer} 25 | } 26 | 27 | @article{Wachter:2006:IPOPT, 28 | author = {W{\"a}chter, Andreas and Biegler, Lorenz T.}, 29 | title = {On the Implementation of an Interior-point Filter Line-search Algorithm for Large-scale Nonlinear Programming}, 30 | journal = {Math. Program.}, 31 | issue_date = {May 2006}, 32 | volume = {106}, 33 | number = {1}, 34 | month = may, 35 | year = {2006}, 36 | issn = {0025-5610}, 37 | pages = {25--57}, 38 | numpages = {33}, 39 | doi = {10.1007/s10107-004-0559-y}, 40 | acmid = {1107695}, 41 | publisher = {Springer-Verlag New York, Inc.}, 42 | address = {Secaucus, NJ, USA}, 43 | keywords = {65K05, 90C30, 90C51}} 44 | 45 | @article{Byrd:1994:quasi-Newton-LBFGS, 46 | year={1994}, 47 | issn={0025-5610}, 48 | journal={Mathematical Programming}, 49 | volume={63}, 50 | number={1-3}, 51 | doi={10.1007/BF01582063}, 52 | title={Representations of quasi-{N}ewton matrices and their use in limited memory methods}, 53 | publisher={Springer-Verlag}, 54 | keywords={Quasi-Newton method; constrained optimization; limited memory method; large-scale optimization}, 55 | author={Byrd, Richard H. and Nocedal, Jorge and Schnabel, Robert B.}, 56 | pages={129-156}, 57 | language={English}} 58 | -------------------------------------------------------------------------------- /docs/source/options.rst: -------------------------------------------------------------------------------- 1 | Options and generic interface for ParOpt optimziers 2 | =================================================== 3 | 4 | ParOpt consists of three different optimizers: an interior point method, a trust-region method and the method of moving asymptotes. 5 | 6 | These optimizers can be accessed through the common python interface ``ParOpt.Optimizers``. 7 | This python object is allocated with a problem class which inherits from ``ParOpt.Problem``, and a dictionary of options. 8 | 9 | The optimizer interface is generally executed as follows: 10 | 11 | .. code-block:: python 12 | 13 | # Create the optimizer with the specified options. Here we specify a 14 | # trust-region optimizer, with an initial trust region size of 0.1 and 15 | # a maximum size of 10.0. All other options are set to default. 16 | options = { 17 | 'algorithm': 'tr', 18 | 'tr_init_size': 0.1, 19 | 'tr_max_size': 10.0} 20 | opt = ParOpt.Optimizer(problem, options) 21 | 22 | # Execute the optimization 23 | opt.optimize() 24 | 25 | # Extract the optimized values and multipliers 26 | x, z, zw, zl, zu = opt.getOptimizedPoint() 27 | 28 | Switching the above optimization problem to use the interior-point method or the method of moving asymptotes will be as simple as specifying ``'ip'`` or ``'mma'`` as the argument associated with ``'algorithm'``. 29 | 30 | .. _options-label: 31 | 32 | Options 33 | ------- 34 | 35 | The option data is populated directly from the C++ code. 36 | The options are pulled from all optimizers, so not all options are applicable. 37 | In general the options specific to the trust region method have ``tr_`` as a prefix while options associated with the method of moving asymptotes have ``mma_`` as a prefix. 38 | Options without the ``tr_`` or ``mma_`` prefix apply to the interior point method. 39 | 40 | The full set of options can displayed as follows: 41 | 42 | .. code-block:: python 43 | 44 | from paropt import ParOpt 45 | ParOpt.printOptionSummary() 46 | 47 | This produces the following output: 48 | 49 | .. program-output:: python -c "from paropt import ParOpt; ParOpt.printOptionSummary()" 50 | -------------------------------------------------------------------------------- /examples/openmdao/paraboloid_min.py: -------------------------------------------------------------------------------- 1 | from openmdao.api import ( 2 | Problem, 3 | ScipyOptimizeDriver, 4 | pyOptSparseDriver, 5 | ExecComp, 6 | IndepVarComp, 7 | ) 8 | from paropt.paropt_driver import ParOptDriver 9 | import argparse 10 | 11 | # Create an argument parser 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument( 14 | "--driver", 15 | default="paropt", 16 | choices=["paropt", "scipy", "pyoptsparse"], 17 | help="driver", 18 | ) 19 | parser.add_argument( 20 | "--algorithm", default="ip", choices=["ip", "tr", "mma"], help="optimizer type" 21 | ) 22 | args = parser.parse_args() 23 | driver = args.driver 24 | algorithm = args.algorithm 25 | 26 | # Build the model 27 | prob = Problem() 28 | 29 | # Define the independent variables 30 | indeps = prob.model.add_subsystem("indeps", IndepVarComp()) 31 | indeps.add_output("x", 3.0) 32 | indeps.add_output("y", -4.0) 33 | 34 | # Define the objective and the constraint functions 35 | prob.model.add_subsystem("paraboloid", ExecComp("f = (x-3)**2 + x*y + (y+4)**2 - 3")) 36 | prob.model.add_subsystem("con", ExecComp("c = x**2 + y**2")) 37 | 38 | # Connect the model 39 | prob.model.connect("indeps.x", "paraboloid.x") 40 | prob.model.connect("indeps.y", "paraboloid.y") 41 | prob.model.connect("indeps.x", "con.x") 42 | prob.model.connect("indeps.y", "con.y") 43 | 44 | # Define the optimization problem 45 | prob.model.add_design_var("indeps.x", lower=-50, upper=50) 46 | prob.model.add_design_var("indeps.y", lower=-50, upper=50) 47 | prob.model.add_objective("paraboloid.f") 48 | prob.model.add_constraint("con.c", equals=27.0) 49 | 50 | # Create and set the ParOpt driver 51 | if driver == "paropt": 52 | prob.driver = ParOptDriver() 53 | prob.driver.options["algorithm"] = algorithm 54 | elif driver == "scipy": 55 | prob.driver = ScipyOptimizeDriver() 56 | elif driver == "pyoptsparse": 57 | prob.driver = pyOptSparseDriver() 58 | prob.driver.options["optimizer"] = "ParOpt" 59 | 60 | # Run the problem 61 | prob.setup() 62 | prob.run_driver() 63 | 64 | # Print the minimum value 65 | print("Minimum value = {fmin:.2f}".format(fmin=prob["paraboloid.f"][0])) 66 | 67 | # Print the x/y location of the minimum 68 | print( 69 | "(x, y) = ({x:.2f}, {y:.2f})".format(x=prob["indeps.x"][0], y=prob["indeps.y"][0]) 70 | ) 71 | print("x**2 + y**2 = ", prob["indeps.x"][0] ** 2 + prob["indeps.y"][0] ** 2) 72 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # ======================== 2 | # Makefile for PAROPT_DIR/ 3 | # ======================== 4 | 5 | include Makefile.in 6 | include ParOpt_Common.mk 7 | 8 | PAROPT_SUBDIRS = src 9 | 10 | SEARCH_PATTERN=$(addsuffix /*.cpp, ${PAROPT_SUBDIRS}) 11 | PAROPT_OBJS := $(patsubst %.cpp,%.o,$(wildcard ${SEARCH_PATTERN})) 12 | 13 | default: 14 | @for subdir in ${PAROPT_SUBDIRS}; do \ 15 | echo; (cd $$subdir && ${MAKE}) || exit 1; \ 16 | done 17 | ${CXX} ${SO_LINK_FLAGS} ${PAROPT_OBJS} ${PAROPT_EXTERN_LIBS} -o ${PAROPT_DIR}/lib/libparopt.${SO_EXT} 18 | @echo "ctypedef double ParOptScalar" > paropt/cpp_headers/ParOptTypedefs.pxi; 19 | @echo "PAROPT_NPY_SCALAR = np.NPY_DOUBLE" > paropt/ParOptDefs.pxi; 20 | @echo "dtype = np.double" >> paropt/ParOptDefs.pxi; 21 | 22 | debug: 23 | @for subdir in ${PAROPT_SUBDIRS}; do \ 24 | echo; (cd $$subdir && ${MAKE} debug) || exit 1; \ 25 | done 26 | ${CXX} ${SO_LINK_FLAGS} ${PAROPT_OBJS} ${PAROPT_EXTERN_LIBS} -o ${PAROPT_DIR}/lib/libparopt.${SO_EXT} 27 | @echo "ctypedef double ParOptScalar" > paropt/cpp_headers/ParOptTypedefs.pxi; 28 | @echo "PAROPT_NPY_SCALAR = np.NPY_DOUBLE" > paropt/ParOptDefs.pxi; 29 | @echo "dtype = np.double" >> paropt/ParOptDefs.pxi; 30 | 31 | complex: 32 | @for subdir in ${PAROPT_SUBDIRS}; do \ 33 | echo; (cd $$subdir && ${MAKE} complex) || exit 1; \ 34 | done 35 | ${CXX} ${SO_LINK_FLAGS} ${PAROPT_OBJS} ${PAROPT_EXTERN_LIBS} -o ${PAROPT_DIR}/lib/libparopt.${SO_EXT} 36 | @echo "ctypedef complex ParOptScalar" > paropt/cpp_headers/ParOptTypedefs.pxi; 37 | @echo "PAROPT_NPY_SCALAR = np.NPY_CDOUBLE" > paropt/ParOptDefs.pxi; 38 | @echo "dtype = complex" >> paropt/ParOptDefs.pxi; 39 | 40 | complex_debug: 41 | @for subdir in ${PAROPT_SUBDIRS}; do \ 42 | echo; (cd $$subdir && ${MAKE} complex_debug) || exit 1; \ 43 | done 44 | ${CXX} ${SO_LINK_FLAGS} ${PAROPT_OBJS} ${PAROPT_EXTERN_LIBS} -o ${PAROPT_DIR}/lib/libparopt.${SO_EXT} 45 | @echo "ctypedef complex ParOptScalar" > paropt/cpp_headers/ParOptTypedefs.pxi; 46 | @echo "PAROPT_NPY_SCALAR = np.NPY_CDOUBLE" > paropt/ParOptDefs.pxi; 47 | @echo "dtype = complex" >> paropt/ParOptDefs.pxi; 48 | 49 | interface: 50 | ${PIP} install -e .\[all\]; \ 51 | 52 | complex_interface: 53 | CXXFLAGS=-DPAROPT_USE_COMPLEX ${PIP} install -e .\[all\]; \ 54 | 55 | clean: 56 | ${RM} lib/libparopt.a lib/*.so 57 | ${RM} paropt/*.so paropt/*.cpp 58 | @for subdir in ${PAROPT_SUBDIRS}; do \ 59 | echo; (cd $$subdir && ${MAKE} $@ ) || exit 1; \ 60 | done 61 | -------------------------------------------------------------------------------- /conda/meta.yaml: -------------------------------------------------------------------------------- 1 | {% set name = "paropt" %} 2 | 3 | {% set version = environ.get('GIT_DESCRIBE_TAG', '') %} 4 | {% if version.startswith('v') %} 5 | {% set version = version[1:] %} 6 | {% endif %} 7 | 8 | {% set build = 0 %} 9 | 10 | {% set mpi = mpi or 'openmpi' %} 11 | {% if scalar == "real" %} 12 | {% set build = build + 100 %} 13 | {% endif %} 14 | 15 | package: 16 | name: "paropt" 17 | version: "{{ version }}" 18 | 19 | source: 20 | git_url: https://github.com/smdogroup/paropt.git 21 | 22 | build: 23 | number: {{ build }} 24 | skip: true # [py<=36] 25 | string: py{{ CONDA_PY }}_{{ scalar }}_h{{ PKG_HASH }}_{{ build }} 26 | track_features: 27 | - paropt_complex # [scalar == "complex"] 28 | 29 | requirements: 30 | build: 31 | - python {{ python }} 32 | - numpy 1.18 # [py==37] 33 | - numpy 1.18 # [py==38] 34 | - numpy 1.19 # [py==39] 35 | - numpy 1.22 # [py==310] 36 | - numpy 1.23 # [py==311] 37 | - {{ mpi }} 38 | - openmpi-mpicxx # [mpi == "openmpi"] 39 | - mpich-mpicxx # [mpi == "mpich"] 40 | - {{ compiler('cxx') }} 41 | - make 42 | - libopenblas 43 | - lapack 44 | - metis ==5.1.0 45 | - mpi4py 46 | - cython >=3.0.0 47 | - setuptools 48 | 49 | host: 50 | - python {{ python }} 51 | - pip 52 | - numpy 1.18 # [py==37] 53 | - numpy 1.18 # [py==38] 54 | - numpy 1.19 # [py==39] 55 | - numpy 1.22 # [py==310] 56 | - numpy 1.23 # [py==311] 57 | - {{ mpi }} 58 | - openmpi-mpicxx # [mpi == "openmpi"] 59 | - mpich-mpicxx # [mpi == "mpich"] 60 | - libopenblas 61 | - lapack 62 | - metis ==5.1.0 63 | - mpi4py 64 | - cython >=3.0.0 65 | - setuptools 66 | 67 | run: 68 | - python 69 | - numpy >=1.18.5,<2.0.a0 # [py==37] 70 | - numpy >=1.18.5,<2.0.a0 # [py==38] 71 | - numpy >=1.19.5,<2.0.a0 # [py==39] 72 | - numpy >=1.22.0,<2.0.a0 # [py==310] 73 | - numpy >=1.23.0,<2.0.a0 # [py==311] 74 | - scipy 75 | - {{ mpi }} 76 | - openmpi-mpicxx # [mpi == "openmpi"] 77 | - mpich-mpicxx # [mpi == "mpich"] 78 | - libopenblas 79 | - lapack 80 | - metis ==5.1.0 81 | - mpi4py 82 | 83 | test: 84 | imports: 85 | - paropt 86 | - paropt.ParOpt 87 | - paropt.ParOptEig 88 | 89 | about: 90 | home: https://github.com/smdogroup/paropt 91 | license: LGPLv3 92 | license_family: LGPL 93 | summary: Parallel optimization library 94 | doc_url: https://smdogroup.github.io/paropt/ 95 | -------------------------------------------------------------------------------- /examples/toy/toy.py: -------------------------------------------------------------------------------- 1 | # Import some utilities 2 | import numpy as np 3 | import mpi4py.MPI as MPI 4 | 5 | # Import ParOpt 6 | from paropt import ParOpt 7 | 8 | 9 | class Toy(ParOpt.Problem): 10 | def __init__(self, comm): 11 | # Set the communicator pointer 12 | self.comm = comm 13 | self.nvars = 3 14 | self.ncon = 2 15 | 16 | # The design history file 17 | self.x_hist = [] 18 | 19 | # Initialize the base class 20 | super(Toy, self).__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 21 | 22 | return 23 | 24 | def getVarsAndBounds(self, x, lb, ub): 25 | """Set the values of the bounds""" 26 | x[0] = 4.0 27 | x[1] = 3.0 28 | x[2] = 2.0 29 | 30 | lb[:] = 0.0 31 | ub[:] = 5.0 32 | return 33 | 34 | def evalObjCon(self, x): 35 | """Evaluate the objective and constraint""" 36 | # Append the point to the solution history 37 | self.x_hist.append(np.array(x)) 38 | 39 | # Evaluate the objective and constraints 40 | fail = 0 41 | con = np.zeros(self.ncon) 42 | fobj = x[0] ** 2 + x[1] ** 2 + x[2] ** 2 43 | print("x is ", np.array(x)) 44 | print("Objective is ", fobj) 45 | con[0] = 9.0 - (x[0] - 5.0) ** 2 - (x[1] - 2) ** 2 - (x[2] - 1) ** 2 46 | con[1] = 9.0 - (x[0] - 3.0) ** 2 - (x[1] - 4) ** 2 - (x[2] - 3) ** 2 47 | print("constraint values are ", np.array(con)) 48 | return fail, fobj, con 49 | 50 | def evalObjConGradient(self, x, g, A): 51 | """Evaluate the objective and constraint gradient""" 52 | fail = 0 53 | 54 | # The objective gradient 55 | g[0] = 2.0 * x[0] 56 | g[1] = 2.0 * x[1] 57 | g[2] = 2.0 * x[2] 58 | 59 | A[0][0] = -2.0 * (x[0] - 5.0) 60 | A[0][1] = -2.0 * (x[1] - 2.0) 61 | A[0][2] = -2.0 * (x[2] - 1.0) 62 | 63 | A[1][0] = -2.0 * (x[0] - 3.0) 64 | A[1][1] = -2.0 * (x[1] - 4.0) 65 | A[1][2] = -2.0 * (x[2] - 3.0) 66 | return fail 67 | 68 | 69 | # The communicator 70 | comm = MPI.COMM_WORLD 71 | 72 | problem = Toy(comm) 73 | 74 | options = { 75 | "algorithm": "mma", 76 | "mma_init_asymptote_offset": 0.5, 77 | "mma_min_asymptote_offset": 0.01, 78 | "mma_bound_relax": 1e-4, 79 | "mma_max_iterations": 100, 80 | } 81 | 82 | # Create the ParOpt problem 83 | opt = ParOpt.Optimizer(problem, options) 84 | 85 | # Optimize 86 | opt.optimize() 87 | x, z, zw, zl, zu = opt.getOptimizedPoint() 88 | -------------------------------------------------------------------------------- /src/ParOptSparseUtils.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_SPARSE_UTILS_H 2 | #define PAR_OPT_SPARSE_UTILS_H 3 | 4 | #include "ParOptComplexStep.h" 5 | #include "ParOptVec.h" 6 | 7 | // Compute y = alpha * A * x + beta * y 8 | void ParOptCSRMatVec(double alpha, int nrows, const int *rowp, const int *cols, 9 | const ParOptScalar *Avals, const ParOptScalar *x, 10 | double beta, ParOptScalar *y); 11 | 12 | // Compute A * x -> y 13 | void ParOptCSCMatVec(double alpha, int nrows, int ncols, const int *colp, 14 | const int *rows, const ParOptScalar *Avals, 15 | const ParOptScalar *x, double beta, ParOptScalar *y); 16 | 17 | // Based on the pattern of A, compute A^{T}. The numerical values are optional 18 | void ParOptSparseTranspose(int nrows, int ncols, const int *rowp, 19 | const int *cols, const ParOptScalar *Avals, 20 | int *colp, int *rows, ParOptScalar *ATvals); 21 | 22 | // Compute the number of non-zeros in the matrix product A * A^{T} 23 | int ParOptMatMatTransSymbolic(int nrows, int ncols, const int *rowp, 24 | const int *cols, const int *colp, const int *rows, 25 | int *Bcolp, int *flag); 26 | 27 | // Compute the matrix-matrix product A * A^{T} 28 | void ParOptMatMatTransNumeric(int nrows, int ncols, const int *rowp, 29 | const int *cols, const ParOptScalar *Avals, 30 | const int *colp, const int *rows, 31 | const ParOptScalar *ATvals, const int *Bcolp, 32 | int *Brows, ParOptScalar *Bvals, int *flag, 33 | ParOptScalar *tmp); 34 | 35 | // Compute the result C + A * D * A^{T}, where C and D are diagonal 36 | void ParOptMatMatTransNumeric(int nrows, int ncols, const ParOptScalar *cvals, 37 | const int *rowp, const int *cols, 38 | const ParOptScalar *Avals, 39 | const ParOptScalar *dvals, const int *colp, 40 | const int *rows, const ParOptScalar *ATvals, 41 | const int *Bcolp, int *Brows, ParOptScalar *Bvals, 42 | int *flag, ParOptScalar *tmp); 43 | 44 | // Remove duplicates from a list 45 | int ParOptRemoveDuplicates(int *array, int len, int exclude = -1); 46 | 47 | // Sort and make the data structure unique - remove diagonal 48 | void ParOptSortAndRemoveDuplicates(int nvars, int *rowp, int *cols, 49 | int remove_diagonal = 0); 50 | 51 | #endif // PAR_OPT_SPARSE_UTILS_H -------------------------------------------------------------------------------- /.github/workflows/deploy_conda_package.yml: -------------------------------------------------------------------------------- 1 | name: Deploy conda package 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | # Allows you to run this workflow manually from the Actions tab. 8 | workflow_dispatch: 9 | 10 | jobs: 11 | # This job is called deploy_conda_package. 12 | deploy_conda_package: 13 | # Run on Ubuntu/MacOS 14 | runs-on: ${{ matrix.OS }} 15 | # Ensures conda environment is initialized for all steps 16 | defaults: 17 | run: 18 | shell: bash -l {0} 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | include: 24 | # real versions 25 | - OS: ubuntu-latest 26 | BUILD_DIR: linux-64 27 | 28 | # complex versions 29 | - OS: macos-latest 30 | BUILD_DIR: osx-64 31 | 32 | name: ParOpt Conda Package Deployment (${{ matrix.OS }}) 33 | 34 | # Recommended if you intend to make multiple deployments in quick succession. 35 | # This will kill any currently running CI from previous commits to the same branch 36 | concurrency: 37 | group: ci-${{ github.ref }}-${{ matrix.OS }} 38 | cancel-in-progress: true 39 | 40 | steps: 41 | 42 | - name: Display run details 43 | run: | 44 | echo "============================================================="; 45 | echo "Run #${GITHUB_RUN_NUMBER}"; 46 | echo "Run ID: ${GITHUB_RUN_ID}"; 47 | lscpu; 48 | echo "Testing: ${GITHUB_REPOSITORY}"; 49 | echo "Triggered by: ${GITHUB_EVENT_NAME}"; 50 | echo "Initiated by: ${GITHUB_ACTOR}"; 51 | echo "============================================================="; 52 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it. 53 | - uses: actions/checkout@v2 54 | - name: Setup miniconda 55 | uses: conda-incubator/setup-miniconda@v2 56 | with: 57 | auto-update-conda: true 58 | python-version: 3.8 59 | 60 | - name: Build ParOpt package 61 | run: | 62 | export PAROPT_DIR=${GITHUB_WORKSPACE}; 63 | echo "PAROPT_DIR=${GITHUB_WORKSPACE}" >> $GITHUB_ENV; 64 | export ANACONDA_API_TOKEN=${{ secrets.ANACONDA_TOKEN }}; 65 | conda install anaconda-client conda-build conda-verify -q -y; 66 | conda config --set anaconda_upload no; 67 | cd ${PAROPT_DIR}/conda; 68 | conda build --no-include-recipe -c conda-forge -c smdogroup --output-folder . .; 69 | anaconda upload --label main ${{ matrix.BUILD_DIR }}/*real*.tar.bz2 --force; 70 | anaconda upload --label complex ${{ matrix.BUILD_DIR }}/*complex*.tar.bz2 --force; 71 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build, unit tests, and docs](https://github.com/smdogroup/paropt/actions/workflows/unit_tests.yml/badge.svg)](https://github.com/smdogroup/paropt/actions/workflows/unit_tests.yml) 2 | 3 | [![Anaconda-Server Badge](https://anaconda.org/smdogroup/paropt/badges/version.svg)](https://anaconda.org/smdogroup/paropt) 4 | [![Anaconda-Server Badge](https://anaconda.org/smdogroup/paropt/badges/platforms.svg)](https://anaconda.org/smdogroup/paropt) 5 | [![Anaconda-Server Badge](https://anaconda.org/smdogroup/paropt/badges/downloads.svg)](https://anaconda.org/smdogroup/paropt) 6 | 7 | # ParOpt: A library of parallel optimization algorithms # 8 | --------------------------------------------------------- 9 | 10 | ParOpt is a parallel optimization library for use in general large-scale optimization applications, but is often specifically used for topology and multi-material optimization problems. 11 | The optimizer has the capability to handle large numbers of weighting constraints that arise in the parametrization of multi-material problems. 12 | 13 | The implementation of the optimizer is in C++ and uses MPI. ParOpt is also wrapped with python using Cython. 14 | 15 | The ParOpt theory manual is located here: [ParOpt_theory_manual](docs/ParOpt_theory_manual.pdf) 16 | 17 | Online documentation for ParOpt is located here: [https://smdogroup.github.io/paropt/](https://smdogroup.github.io/paropt/) 18 | 19 | If you use ParOpt, please cite our paper: 20 | 21 | Ting Wei Chin, Mark K. Leader, Graeme J. Kennedy, A scalable framework for large-scale 3D multimaterial topology optimization with octree-based mesh adaptation, Advances in Engineering Software, Volume 135, 2019. 22 | 23 | ``` 24 | @article{Chin:2019, 25 | title = {A scalable framework for large-scale 3D multimaterial topology optimization with octree-based mesh adaptation}, 26 | journal = {Advances in Engineering Software}, 27 | volume = {135}, 28 | year = {2019}, 29 | doi = {10.1016/j.advengsoft.2019.05.004}, 30 | author = {Ting Wei Chin and Mark K. Leader and Graeme J. Kennedy}} 31 | ``` 32 | 33 | ParOpt is released under the terms of the LGPLv3 license. 34 | 35 | This program is free software: you can redistribute it and/or modify 36 | it under the terms of the GNU Lesser General Public License as published by 37 | the Free Software Foundation, version 3. 38 | 39 | This program is distributed in the hope that it will be useful, 40 | but WITHOUT ANY WARRANTY; without even the implied warranty of 41 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 42 | GNU Lesser General Public License for more details. 43 | 44 | You should have received a copy of the GNU Lesser General Public License 45 | along with this program. If not, see . 46 | -------------------------------------------------------------------------------- /docs/source/parallel_openmdao_example.rst: -------------------------------------------------------------------------------- 1 | Parallel OpenMDAO example 2 | ========================= 3 | 4 | This examples uses the ParOpt driver for OpenMDAO to solve the 5 | following optimization problem in parallel: 6 | 7 | .. math:: 8 | 9 | \begin{align} 10 | \text{min} \qquad & (w - 10)^2 + \sum_i{\left( x_i - 5 \right) ^2} \\ 11 | \text{with respect to} \qquad & x_i \le 10 \\ 12 | \text{subject to} \qquad & \sum_i{x_i ^3} \le 10 \\ 13 | \end{align} 14 | 15 | 16 | There are two inputs to the distributed paraboloid component: 17 | `x` is an array input connected to a distributed IndependentVarComp, 18 | and `w` is a scalar input connected to a non-distributed IndependentVarComp. 19 | 20 | Because `w` is a non-distributed variable connected as an input to a 21 | distributed component, it will be duplicated on each processor (it has 22 | a local size of 1, but a global size equal to the number of 23 | processors). There are two options for connecting variables from a 24 | non-distributed component to a distributed component. This behavior is 25 | governed by defining `src_indices` for the component. This example 26 | uses the default for `w` , where rank0 would get `src_indices = 0`, 27 | rank1 would get `src_indices = 1`, and so on. When the default 28 | behavior is used, there will be a warning issued by OpenMDAO to 29 | clarify what the default behavior is doing, but this warning doesn't 30 | imply that anything is wrong. 31 | 32 | For parallel optimization with ParOpt, the objective and constraint 33 | values are expected to be duplicated on all processors, while the design 34 | variables are distributed across processors. Therefore in this example, 35 | the objective (`y`) and constraint (`a`) values are computed as the sum of 36 | an `Allgather` operation. 37 | 38 | Python implementation 39 | --------------------- 40 | 41 | The python implementation of this problem is as follows 42 | 43 | .. literalinclude:: ../../examples/openmdao/distrib_paraboloid.py 44 | :language: python 45 | 46 | This code can be run with any number of processors (for example, using `mpirun -np <# of processors> python distrib_paraboloid.py`). Using two processors, this code results in the following output: 47 | 48 | :: 49 | 50 | /usr/local/lib/python3.9/site-packages/openmdao/core/component.py:905: UserWarning:'dp' : Component is distributed but input 'dp.w' was added without src_indices. Setting src_indices to np.arange(0, 1, dtype=int).reshape((1,)). 51 | /usr/local/lib/python3.9/site-packages/openmdao/core/component.py:905: UserWarning:'dp' : Component is distributed but input 'dp.w' was added without src_indices. Setting src_indices to np.arange(1, 2, dtype=int).reshape((1,)). 52 | f = 133.94 53 | c = -0.00 54 | Rank = 0; x = [1.25992104 1.25992104 1.25992104] 55 | Rank = 1; x = [1.25992104 1.25992104] 56 | 57 | -------------------------------------------------------------------------------- /src/ParOptVec.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_VEC_H 2 | #define PAR_OPT_VEC_H 3 | 4 | /* 5 | The following classes define the vector and limited-memory BFGS 6 | classes used by the parallel optimizer. 7 | */ 8 | 9 | #include 10 | 11 | #include "mpi.h" 12 | 13 | // Define the complex ParOpt type 14 | typedef std::complex ParOptComplex; 15 | 16 | // Set the type of the ParOptScalar 17 | #ifdef PAROPT_USE_COMPLEX 18 | #define PAROPT_MPI_TYPE MPI_DOUBLE_COMPLEX 19 | typedef std::complex ParOptScalar; 20 | #else 21 | #define PAROPT_MPI_TYPE MPI_DOUBLE 22 | typedef double ParOptScalar; 23 | #endif // PAROPT_USE_COMPLEX 24 | 25 | /** 26 | ParOpt base class for reference counting 27 | */ 28 | class ParOptBase { 29 | public: 30 | ParOptBase() { ref_count = 0; } 31 | virtual ~ParOptBase() {} 32 | 33 | /// Increase the reference count 34 | void incref() { ref_count++; } 35 | 36 | /// Decrease the reference count and delete the object once 37 | /// the reference count is zero. 38 | void decref() { 39 | ref_count--; 40 | if (ref_count == 0) { 41 | delete this; 42 | } 43 | } 44 | 45 | private: 46 | int ref_count; 47 | }; 48 | 49 | /* 50 | This vector class defines the basic linear algebra operations and 51 | member functions required for design optimization. 52 | */ 53 | class ParOptVec : public ParOptBase { 54 | public: 55 | virtual ~ParOptVec() {} 56 | 57 | // Perform standard operations required for linear algebra 58 | // ------------------------------------------------------- 59 | virtual void set(ParOptScalar alpha) = 0; 60 | virtual void zeroEntries() = 0; 61 | virtual void copyValues(ParOptVec *vec) = 0; 62 | virtual double norm() = 0; 63 | virtual double maxabs() = 0; 64 | virtual double l1norm() = 0; 65 | virtual ParOptScalar dot(ParOptVec *vec) = 0; 66 | virtual void mdot(ParOptVec **vecs, int nvecs, ParOptScalar *output) = 0; 67 | virtual void scale(ParOptScalar alpha) = 0; 68 | virtual void axpy(ParOptScalar alpha, ParOptVec *x) = 0; 69 | virtual int getArray(ParOptScalar **array) = 0; 70 | }; 71 | 72 | /* 73 | A basic ParOptVec implementation 74 | */ 75 | class ParOptBasicVec : public ParOptVec { 76 | public: 77 | ParOptBasicVec(MPI_Comm _comm, int n); 78 | ~ParOptBasicVec(); 79 | 80 | // Perform standard operations required for linear algebra 81 | // ------------------------------------------------------- 82 | void set(ParOptScalar alpha); 83 | void zeroEntries(); 84 | void copyValues(ParOptVec *vec); 85 | double norm(); 86 | double maxabs(); 87 | double l1norm(); 88 | ParOptScalar dot(ParOptVec *vec); 89 | void mdot(ParOptVec **vecs, int nvecs, ParOptScalar *output); 90 | void scale(ParOptScalar alpha); 91 | void axpy(ParOptScalar alpha, ParOptVec *x); 92 | int getArray(ParOptScalar **array); 93 | 94 | private: 95 | MPI_Comm comm; 96 | int size; 97 | ParOptScalar *x; 98 | }; 99 | 100 | #endif // PAR_OPT_VEC_H 101 | -------------------------------------------------------------------------------- /src/ParOptScaledQuasiNewton.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_SCALED_QUASI_NEWTON_H 2 | #define PAR_OPT_SCALED_QUASI_NEWTON_H 3 | 4 | #include "ParOptComplexStep.h" 5 | #include "ParOptQuasiNewton.h" 6 | 7 | /* 8 | This class is only used when there is a single constraint and objective is 9 | linear. 10 | 11 | In this case: 12 | L = f - z*c 13 | B ~= Hess = -z*Hess(c) 14 | And we define 15 | B0 ~= -Hess(c) 16 | such that 17 | B = z*B0 18 | 19 | This will give a better approximation to the Hessian of the Lagrangian with B 20 | if we use the quasi-Newton update correction. 21 | */ 22 | class ParOptScaledQuasiNewton : public ParOptCompactQuasiNewton { 23 | public: 24 | ParOptScaledQuasiNewton(ParOptProblem *_prob, ParOptCompactQuasiNewton *_qn) { 25 | int rank; 26 | MPI_Comm_rank(_prob->getMPIComm(), &rank); 27 | if (rank == 0) { 28 | fprintf(stdout, 29 | "[ParOptScaledQuasiNewton.h] initializing " 30 | "ParOptScaledQuasiNewton!\n"); 31 | } 32 | qn = _qn; 33 | qn->incref(); 34 | 35 | d0 = new ParOptScalar[qn->getMaxLimitedMemorySize()]; 36 | y0 = _prob->createDesignVec(); 37 | y0->incref(); 38 | } 39 | 40 | ~ParOptScaledQuasiNewton() { 41 | delete[] d0; 42 | qn->decref(); 43 | y0->decref(); 44 | } 45 | 46 | // Set the type of diagonal to use 47 | void setInitDiagonalType(ParOptQuasiNewtonDiagonalType _diagonal_type) { 48 | qn->setInitDiagonalType(_diagonal_type); 49 | } 50 | 51 | // Reset the internal data 52 | void reset() { qn->reset(); } 53 | 54 | // Perform the quasi-Newton update with the specified multipliers 55 | int update(ParOptVec *x, const ParOptScalar *z, ParOptVec *zw, ParOptVec *s, 56 | ParOptVec *y) { 57 | z0 = z[0]; 58 | 59 | // This should never happen 60 | if (ParOptRealPart(z0) < 0.0) { 61 | z0 = 0.0; 62 | } 63 | 64 | y0->copyValues(y); 65 | y0->scale(1.0 / z0); 66 | return qn->update(x, z, zw, s, y0); 67 | } 68 | 69 | // Perform a matrix-vector multiplication 70 | void mult(ParOptVec *x, ParOptVec *y) { 71 | qn->mult(x, y); 72 | y->scale(z0); 73 | } 74 | 75 | // Perform a matrix-vector multiplication and add the result to y 76 | void multAdd(ParOptScalar alpha, ParOptVec *x, ParOptVec *y) { 77 | qn->multAdd(alpha * z0, x, y); 78 | } 79 | 80 | // Get the compact representation for the limited-memory quasi-Newton method 81 | int getCompactMat(ParOptScalar *_b0, const ParOptScalar **_d, 82 | const ParOptScalar **_M, ParOptVec ***Z) { 83 | const ParOptScalar *d; 84 | ParOptScalar b0; 85 | int m; 86 | m = qn->getCompactMat(&b0, &d, _M, Z); 87 | *_b0 = z0 * b0; 88 | for (int i = 0; i < m; i++) { 89 | d0[i] = sqrt(z0) * d[i]; 90 | } 91 | *_d = d0; 92 | return m; 93 | } 94 | 95 | // Get the maximum size of the compact representation 96 | int getMaxLimitedMemorySize() { return qn->getMaxLimitedMemorySize(); } 97 | 98 | private: 99 | ParOptCompactQuasiNewton *qn; 100 | ParOptScalar z0; 101 | ParOptVec *y0; 102 | ParOptScalar *d0; 103 | }; 104 | 105 | #endif // PAR_OPT_SCALED_QUASI_NEWTON_H -------------------------------------------------------------------------------- /examples/sellar/sellar.cpp: -------------------------------------------------------------------------------- 1 | #include "ParOptInteriorPoint.h" 2 | 3 | /* 4 | The following is a simple implementation of a Sellar function with 5 | constraints that can be used to test the parallel optimizer. 6 | */ 7 | class Sellar : public ParOptProblem { 8 | public: 9 | static const int nvars = 4; 10 | static const int ncon = 1; 11 | Sellar(MPI_Comm _comm) : ParOptProblem(_comm) { 12 | setProblemSizes(nvars, ncon, 0); 13 | setNumInequalities(0, 0); 14 | } 15 | 16 | //! Create the quasi-def matrix associated with this problem 17 | ParOptQuasiDefMat *createQuasiDefMat() { 18 | int nwblock = 0; 19 | return new ParOptQuasiDefBlockMat(this, nwblock); 20 | } 21 | 22 | //! Get the variables/bounds 23 | void getVarsAndBounds(ParOptVec *xvec, ParOptVec *lbvec, ParOptVec *ubvec) { 24 | // declare design variable and bounds vector 25 | ParOptScalar *x, *lb, *ub; 26 | 27 | // store the memory addresses of the class variables 28 | xvec->getArray(&x); 29 | lbvec->getArray(&lb); 30 | ubvec->getArray(&ub); 31 | 32 | // Set the initial design variables 33 | x[0] = 2.0; 34 | x[1] = 1.0; 35 | x[2] = 0.0; 36 | x[3] = 0.0; 37 | 38 | // set lower and upper bounds to design variables 39 | lb[0] = 0.0; 40 | lb[1] = 0.0; 41 | lb[2] = -1.0; 42 | lb[3] = -1.0; 43 | ub[0] = 10.0; 44 | ub[1] = 10.0; 45 | ub[2] = 3.16; 46 | ub[3] = 24.0; 47 | } 48 | 49 | //! Evaluate the objective and constraints 50 | int evalObjCon(ParOptVec *xvec, ParOptScalar *fobj, ParOptScalar *cons) { 51 | // declare local variables 52 | ParOptScalar *x; 53 | xvec->getArray(&x); 54 | 55 | // the objective function 56 | *fobj = x[1] * x[1] + x[0] + x[2] + exp(-x[3]); 57 | cons[0] = x[0] + x[1] - 1.0; 58 | 59 | return 0; 60 | } 61 | 62 | //! Evaluate the objective and constraint gradients 63 | int evalObjConGradient(ParOptVec *xvec, ParOptVec *gvec, ParOptVec **Ac) { 64 | // define the local variables 65 | double *x, *g; 66 | 67 | // get the local variables values 68 | xvec->getArray(&x); 69 | 70 | // derivative of the objective function wrt to the DV 71 | gvec->zeroEntries(); 72 | gvec->getArray(&g); 73 | g[0] = 1.0; 74 | g[1] = 2.0 * x[1]; 75 | g[2] = 1.0; 76 | g[3] = -exp(-x[3]); 77 | 78 | // Derivative of the constraint 79 | Ac[0]->zeroEntries(); 80 | Ac[0]->getArray(&g); 81 | g[0] = 1.0; 82 | g[1] = 1.0; 83 | 84 | return 0; 85 | } 86 | }; 87 | 88 | int main(int argc, char *argv[]) { 89 | MPI_Init(&argc, &argv); 90 | 91 | // Allocate the Sellar function 92 | Sellar *sellar = new Sellar(MPI_COMM_SELF); 93 | sellar->incref(); 94 | 95 | // Allocate the optimizer with default options 96 | ParOptInteriorPoint *opt = new ParOptInteriorPoint(sellar); 97 | opt->incref(); 98 | 99 | opt->checkGradients(1e-6); 100 | 101 | double start = MPI_Wtime(); 102 | opt->optimize(); 103 | double diff = MPI_Wtime() - start; 104 | printf("Time taken: %f seconds \n", diff); 105 | 106 | sellar->decref(); 107 | opt->decref(); 108 | 109 | MPI_Finalize(); 110 | return (0); 111 | } 112 | -------------------------------------------------------------------------------- /src/ParOptBlasLapack.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_BLAS_LAPACK_H 2 | #define PAR_OPT_BLAS_LAPACK_H 3 | 4 | #include "ParOptVec.h" 5 | 6 | // The following are the definitions required for BLAS/LAPACK 7 | #ifdef PAROPT_USE_COMPLEX 8 | #define BLASddot zdotu_ 9 | #define BLASdnrm2 dznrm2_ 10 | #define BLASdaxpy zaxpy_ 11 | #define BLASdscal zscal_ 12 | #define BLAStpsv ztpsv_ 13 | #define BLASgbmv zgbmv_ 14 | #define BLASgemm zgemm_ 15 | #define BLASsyrk zsyrk_ 16 | #define LAPACKdgetrf zgetrf_ 17 | #define LAPACKdgetrs zgetrs_ 18 | #define LAPACKdpptrf zpptrf_ 19 | #define LAPACKdpptrs zpptrs_ 20 | #else 21 | #define BLASddot ddot_ 22 | #define BLASdnrm2 dnrm2_ 23 | #define BLASdaxpy daxpy_ 24 | #define BLASdscal dscal_ 25 | #define BLAStpsv dtpsv_ 26 | #define BLASgbmv dgbmv_ 27 | #define BLASgemm dgemm_ 28 | #define BLASsyrk dsyrk_ 29 | #define LAPACKdgetrf dgetrf_ 30 | #define LAPACKdgetrs dgetrs_ 31 | #define LAPACKdpptrf dpptrf_ 32 | #define LAPACKdpptrs dpptrs_ 33 | #endif // PAROPT_USE_COMPLEX 34 | 35 | extern "C" { 36 | extern ParOptScalar BLASddot(int *n, ParOptScalar *x, int *incx, 37 | ParOptScalar *y, int *incy); 38 | extern double BLASdnrm2(int *n, ParOptScalar *x, int *incx); 39 | extern void BLASdaxpy(int *n, ParOptScalar *a, ParOptScalar *x, int *incx, 40 | ParOptScalar *y, int *incy); 41 | extern void BLASdscal(int *n, ParOptScalar *a, ParOptScalar *x, int *incx); 42 | 43 | // Compute C := alpha*A*A**T + beta*C or C := alpha*A**T*A + beta*C 44 | extern void BLASsyrk(const char *uplo, const char *trans, int *n, int *k, 45 | ParOptScalar *alpha, ParOptScalar *a, int *lda, 46 | ParOptScalar *beta, ParOptScalar *c, int *ldc); 47 | 48 | // Solve A*x = b or A^T*x = b where A is in packed format 49 | extern void BLAStpsv(const char *uplo, const char *transa, const char *diag, 50 | int *n, ParOptScalar *a, ParOptScalar *x, int *incx); 51 | 52 | // Level 2 BLAS routines 53 | // y = alpha * A * x + beta * y, for a general matrix 54 | extern void BLASgemv(const char *c, int *m, int *n, ParOptScalar *alpha, 55 | ParOptScalar *a, int *lda, ParOptScalar *x, int *incx, 56 | ParOptScalar *beta, ParOptScalar *y, int *incy); 57 | 58 | // Level 3 BLAS routines 59 | // C := alpha*op( A )*op( B ) + beta*C, 60 | extern void BLASgemm(const char *ta, const char *tb, int *m, int *n, int *k, 61 | ParOptScalar *alpha, ParOptScalar *a, int *lda, 62 | ParOptScalar *b, int *ldb, ParOptScalar *beta, 63 | ParOptScalar *c, int *ldc); 64 | 65 | // General factorization routines 66 | extern void LAPACKdgetrf(int *m, int *n, ParOptScalar *a, int *lda, int *ipiv, 67 | int *info); 68 | extern void LAPACKdgetrs(const char *c, int *n, int *nrhs, ParOptScalar *a, 69 | int *lda, int *ipiv, ParOptScalar *b, int *ldb, 70 | int *info); 71 | 72 | // Factorization of packed-storage matrices 73 | extern void LAPACKdpptrf(const char *c, int *n, ParOptScalar *ap, int *info); 74 | extern void LAPACKdpptrs(const char *c, int *n, int *nrhs, ParOptScalar *ap, 75 | ParOptScalar *rhs, int *ldrhs, int *info); 76 | } 77 | 78 | #endif 79 | -------------------------------------------------------------------------------- /docs/source/parallel_rosenbrock.rst: -------------------------------------------------------------------------------- 1 | Parallel Rosenbrock 2 | =================== 3 | 4 | This problem deals with a generalized Rosenbrock function in parallel. In this case the objective function is 5 | 6 | .. math:: 7 | 8 | f(x) = \sum_{i=1}^{n-1} (1 - x_{i})^{2} + 100(x_{i+1} - x_{i}^2)^2 9 | 10 | Two constraints are imposed in this problem. The first is that the point remain within a ball of radius 1/2 centered on the origin: 11 | 12 | .. math:: 13 | 14 | c_{1}(x) = \frac{1}{4} - \sum_{i=1}^{n} x_{i}^{2} \ge 0 15 | 16 | The second is a linear constraint that the sum of all variables be greater than -10: 17 | 18 | .. math:: 19 | 20 | c_{2}(x) = \sum_{i=1}^{n} x_{i} + 10 \ge 0 21 | 22 | C++ implementation 23 | ------------------ 24 | 25 | This example is a further demonstration of the ParOptProblem interface class making use of a distributed design vector, separable sparse constraints, and Hessian-vector products. 26 | 27 | The Hessian-vector products can be used to accelerate the convergence of the optimizer. 28 | They are generally used once the optimization problem has converged to a point that is closer to the optimum. 29 | These products are implemented by the user in the ParOptProblem class, using the following prototype: 30 | 31 | .. code-block:: c++ 32 | 33 | int evalHvecProduct( ParOptVec *xvec, 34 | ParOptScalar *z, ParOptVec *zwvec, 35 | ParOptVec *pxvec, ParOptVec *hvec ); 36 | 37 | This function provides access to Hessian-vector products of the Lagrangian. 38 | In ParOpt the Lagrangian function is defined as 39 | 40 | .. math:: 41 | 42 | \mathcal{L} \triangleq f(x) - z^{T} c(x) - z_{w}^{T} c_{w}(x) 43 | 44 | where :math:`\mathcal{L}` is the Lagrangian, :math:`f(x), c(x), c_{w}(x)` are the objective, dense constraints and sparse separable constraints, respectively, and :math:`z, z_{w}` are multipliers associated with the dense and sparse constraints. 45 | The Hessian-vector product is then computed as 46 | 47 | .. math:: 48 | 49 | h = \nabla^{2} \mathcal{L}(x, z, z_{w}) p_{x} 50 | 51 | The interface to the sparse separable constraint code consists of four functions. 52 | These functions consist of the following: 53 | 54 | .. code-block:: c++ 55 | 56 | void evalSparseCon( ParOptVec *x, ParOptVec *out ); 57 | void addSparseJacobian( ParOptScalar alpha, ParOptVec *x, 58 | ParOptVec *px, ParOptVec *out ); 59 | void addSparseJacobianTranspose( ParOptScalar alpha, ParOptVec *x, 60 | ParOptVec *pzw, ParOptVec *out ); 61 | void addSparseInnerProduct( ParOptScalar alpha, ParOptVec *x, 62 | ParOptVec *cvec, ParOptScalar *A ); 63 | 64 | These member functions provide the following mathematical operations: 65 | 66 | .. math:: 67 | 68 | \begin{align} 69 | \mathrm{out} & \leftarrow c_{w} \leftarrow c_{w}(x) \\ 70 | \mathrm{out} & \leftarrow \alpha A_{w}(x) p_{x} \\ 71 | \mathrm{out} & \leftarrow \alpha A_{w}(x)^{T} p_{z_{w}} \\ 72 | \mathrm{out} & \leftarrow \alpha A_{w} C A_{w}(x)^{T} \\ 73 | \end{align} 74 | 75 | Here :math:`A_{w}(x) = \nabla c_{w}(x)$` and :math:`C` is a diagonal matrix. 76 | 77 | .. literalinclude:: ../../examples/rosenbrock/rosenbrock.cpp 78 | :language: c++ -------------------------------------------------------------------------------- /INSTALL.txt: -------------------------------------------------------------------------------- 1 | Installing ParOpt on MacOS 2 | -------------------------- 3 | Prerequisites: 4 | - Assumes you already have python3. If not, you can install it with homebrew (or your preference) 5 | 6 | Notes: 7 | - This was done on Mac OS 10.13.6 but should work on most recently updated versions. 8 | - This assumes installation using python3 but it can be done with python2 as well 9 | 10 | 11 | 0) Open a terminal window and go to the home directory. 12 | $ cd ~ 13 | 14 | 1) Download Xcode 15 | 16 | 2) Install homebrew if needed 17 | - In a terminal window, run 18 | $ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" 19 | 20 | - Open your .bash_profile file under the home directory. At the bottom, add 21 | export PATH=/usr/local/bin:/usr/local/sbin:$PATH 22 | - Then in the terminal, run 23 | $ source .bash_profile 24 | - Note: from here, know that any time you add something to .bash_profile, you will have to run source .bash_profile afterwards each time 25 | 26 | 3) Install openmpi 27 | $ brew install open-mpi 28 | 29 | 4) Install python packages 30 | $ pip3 install numpy 31 | $ pip3 install Cython 32 | $ pip3 install mpi4py 33 | $ pip3 install matplotlib 34 | 35 | 5) Modify .bash_profile again. Add: 36 | export PYTHONPATH=$PYTHONPATH:"Users//git/" 37 | 38 | 5) Set up the directory and clone ParOpt from GitHub: 39 | $ mkdir git 40 | $ git clone https://github.com/gjkennedy/paropt.git 41 | $ cd paropt 42 | $ cp Makefile.in.info Makefile.in 43 | 44 | 6) Open Makefile.in. Uncomment the region where it says "For MAC OS X, use the following settings", and comment the similar statement above it about linux. 45 | 46 | 7) Make it. Run: 47 | $ make 48 | 49 | 8) Make the interface to python. Run: 50 | $ python3 setup.py install 51 | $ make interface 52 | 53 | 9) Check that the python link is in place. Open a python terminal outside of the paropt 54 | directory: 55 | $ cd .. 56 | $ python3 57 | $ from paropt import ParOpt 58 | $ CTRL+D 59 | 60 | 10) Done! Now try an example: 61 | $ cd paropt/examples/python/ 62 | $ python3 toy.py 63 | 64 | Installing ParOpt on Linux 65 | -------------------------- 66 | Prerequisites: 67 | - Assumes you already have python3 and git installed 68 | Notes: 69 | - This assumes installation using python3 but it can be done with python2 as well 70 | - Need root privileges to install some libraries 71 | 72 | 0) Open a terminal window and go to the home directory. 73 | $ cd ~ 74 | 75 | 1) Install libraries 76 | $ sudo apt-get install libblas-dev 77 | $ sudo apt-get install liblapack-dev 78 | $ sudo apt-get install libopenmpi-dev 79 | $ sudo apt-get install openmpi-bin 80 | 81 | 2) Install python packages 82 | $ pip3 install --upgrade pip 83 | $ pip3 install numpy 84 | $ pip3 install Cython 85 | $ pip3 install mpi4py 86 | $ pip3 install matplotlib 87 | 88 | 3) Modify the .bashrc file by adding the following lines to the end of it: 89 | export PYTHONPATH=${PYTHONPATH}:${HOME}/git/paropt 90 | export PAROPT_DIR=/home//git/paropt 91 | PATH=$PATH:$PAROPT_DIR 92 | 93 | 4) Apply the changes to the .bashrc file: 94 | $ source .bashrc 95 | 96 | 5) Set up the directory and clone ParOpt from GitHub: 97 | $ mkdir git 98 | $ git clone https://github.com/gjkennedy/paropt.git 99 | $ cd paropt 100 | $ cp Makefile.in.info Makefile.in 101 | 102 | 6) Make it. Run: 103 | $ make 104 | 105 | 7) Make the interface to python. Run: 106 | $ python3 setup.py install 107 | $ make interface 108 | 109 | 8) Check that the python link is in place. Open a python terminal outside of the paropt 110 | directory: 111 | $ cd .. 112 | $ python3 113 | $ from paropt import ParOpt 114 | $ CTRL+D 115 | 116 | 9) Done! Now try an example: 117 | $ cd paropt/examples/python/ 118 | $ python3 toy.py 119 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from subprocess import check_output 3 | 4 | # Numpy/mpi4py must be installed prior to installing TACS 5 | import numpy 6 | import mpi4py 7 | 8 | # Import distutils 9 | from setuptools import setup 10 | from distutils.core import Extension as Ext 11 | from Cython.Build import cythonize 12 | 13 | 14 | # Convert from local to absolute directories 15 | def get_global_dir(files): 16 | tmr_root = os.path.abspath(os.path.dirname(__file__)) 17 | new = [] 18 | for f in files: 19 | new.append(os.path.join(tmr_root, f)) 20 | return new 21 | 22 | 23 | def get_mpi_flags(): 24 | # Split the output from the mpicxx command 25 | args = check_output(["mpicxx", "-show"]).decode("utf-8").split() 26 | 27 | # Determine whether the output is an include/link/lib command 28 | inc_dirs, lib_dirs, libs = [], [], [] 29 | for flag_ in args: 30 | try: 31 | flag = flag_.decode("utf-8") 32 | except: 33 | flag = flag_ 34 | 35 | if flag[:2] == "-I": 36 | inc_dirs.append(flag[2:]) 37 | elif flag[:2] == "-L": 38 | lib_dirs.append(flag[2:]) 39 | elif flag[:2] == "-l": 40 | libs.append(flag[2:]) 41 | 42 | return inc_dirs, lib_dirs, libs 43 | 44 | 45 | inc_dirs, lib_dirs, libs = get_mpi_flags() 46 | 47 | # Relative paths for the include/library directories 48 | rel_inc_dirs = ["src"] 49 | rel_lib_dirs = ["lib"] 50 | libs.extend(["paropt"]) 51 | 52 | # Convert from relative to absolute directories 53 | inc_dirs.extend(get_global_dir(rel_inc_dirs)) 54 | lib_dirs.extend(get_global_dir(rel_lib_dirs)) 55 | 56 | # Add the numpy/mpi4py directories 57 | inc_dirs.extend([numpy.get_include(), mpi4py.get_include()]) 58 | 59 | # Add tacs-dev/lib as a runtime directory 60 | runtime_lib_dirs = get_global_dir(["lib"]) 61 | 62 | exts = [] 63 | mod = "ParOpt" 64 | exts.append( 65 | Ext( 66 | "paropt.ParOpt", 67 | sources=["paropt/ParOpt.pyx"], 68 | language="c++", 69 | include_dirs=inc_dirs, 70 | libraries=libs, 71 | library_dirs=lib_dirs, 72 | runtime_library_dirs=runtime_lib_dirs, 73 | ) 74 | ) 75 | exts.append( 76 | Ext( 77 | "paropt.ParOptEig", 78 | sources=["paropt/ParOptEig.pyx"], 79 | language="c++", 80 | include_dirs=inc_dirs, 81 | libraries=libs, 82 | library_dirs=lib_dirs, 83 | runtime_library_dirs=runtime_lib_dirs, 84 | ) 85 | ) 86 | 87 | for e in exts: 88 | e.cython_directives = { 89 | "language_level": "3", 90 | "embedsignature": True, 91 | "binding": True, 92 | } 93 | 94 | optional_dependencies = { 95 | "testing": ["testflo>=1.4.7"], 96 | "docs": [ 97 | "sphinx", 98 | "breathe", 99 | "sphinx-rtd-theme", 100 | "sphinxcontrib-programoutput", 101 | "sphinxcontrib-bibtex", 102 | "nbsphinx", 103 | "ipython", 104 | ], 105 | } 106 | 107 | # Add an optional dependency that concatenates all others 108 | optional_dependencies["all"] = sorted( 109 | [ 110 | dependency 111 | for dependencies in optional_dependencies.values() 112 | for dependency in dependencies 113 | ] 114 | ) 115 | 116 | setup( 117 | name="paropt", 118 | version="2.1.5", 119 | description="Parallel interior-point optimizer", 120 | author="Graeme J. Kennedy", 121 | author_email="graeme.kennedy@ae.gatech.edu", 122 | install_requires=["numpy", "mpi4py>=3.1.1"], 123 | extras_require=optional_dependencies, 124 | ext_modules=cythonize( 125 | exts, 126 | language="c++", 127 | include_path=inc_dirs, 128 | compiler_directives={"language_level": "3"}, 129 | ), 130 | ) 131 | -------------------------------------------------------------------------------- /examples/rosenbrock/rosenbrock.py: -------------------------------------------------------------------------------- 1 | # Import some utilities 2 | import numpy as np 3 | import mpi4py.MPI as MPI 4 | import matplotlib.pyplot as plt 5 | 6 | # Import ParOpt 7 | from paropt import ParOpt 8 | 9 | 10 | # Create the rosenbrock function class 11 | class Rosenbrock(ParOpt.Problem): 12 | def __init__(self): 13 | # Set the communicator pointer 14 | self.comm = MPI.COMM_WORLD 15 | self.nvars = 2 16 | self.ncon = 1 17 | 18 | # The design history file 19 | self.x_hist = [] 20 | 21 | # Initialize the base class 22 | super(Rosenbrock, self).__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 23 | 24 | return 25 | 26 | def getVarsAndBounds(self, x, lb, ub): 27 | """Set the values of the bounds""" 28 | x[:] = -2.0 + np.random.uniform(size=len(x)) 29 | lb[:] = -2.0 30 | ub[:] = 2.0 31 | return 32 | 33 | def evalObjCon(self, x): 34 | """Evaluate the objective and constraint""" 35 | # Append the point to the solution history 36 | self.x_hist.append(np.array(x)) 37 | 38 | # Evaluate the objective and constraints 39 | fail = 0 40 | con = np.zeros(1) 41 | fobj = 100.0 * (x[1] - x[0] ** 2) ** 2 + (1.0 - x[0]) ** 2 42 | con[0] = x[0] + x[1] + 5.0 43 | return fail, fobj, con 44 | 45 | def evalObjConGradient(self, x, g, A): 46 | """Evaluate the objective and constraint gradient""" 47 | fail = 0 48 | 49 | # The objective gradient 50 | g[0] = -400.0 * (x[1] - x[0] ** 2) * x[0] - 2.0 * (1.0 - x[0]) 51 | g[1] = 200.0 * (x[1] - x[0] ** 2) 52 | 53 | # The constraint gradient 54 | A[0][0] = -1.0 55 | A[0][1] = -1.0 56 | return fail 57 | 58 | 59 | def plot_it_all(problem): 60 | """ 61 | Plot a carpet plot with the search histories for steepest descent, 62 | conjugate gradient and BFGS from the same starting point. 63 | """ 64 | 65 | # Create the data for the carpet plot 66 | n = 150 67 | xlow = -4.0 68 | xhigh = 4.0 69 | x1 = np.linspace(xlow, xhigh, n) 70 | r = np.zeros((n, n)) 71 | 72 | for j in range(n): 73 | for i in range(n): 74 | fail, fobj, con = problem.evalObjCon([x1[i], x1[j]]) 75 | r[j, i] = fobj 76 | 77 | # Assign the contour levels 78 | levels = np.min(r) + np.linspace(0, 1.0, 75) ** 2 * (np.max(r) - np.min(r)) 79 | 80 | # Create the plot 81 | fig = plt.figure(facecolor="w") 82 | plt.contour(x1, x1, r, levels) 83 | 84 | colours = ["-bo", "-ko", "-co", "-mo", "-yo", "-bx", "-kx", "-cx", "-mx", "-yx"] 85 | 86 | options = { 87 | "algorithm": "tr", 88 | "qn_type": "bfgs", 89 | "qn_update_type": "damped_update", 90 | "tr_init_size": 0.5, 91 | "tr_min_size": 1e-6, 92 | "tr_max_size": 10.0, 93 | "tr_eta": 0.1, 94 | "tr_adaptive_gamma_update": True, 95 | "tr_max_iterations": 200, 96 | } 97 | 98 | for k in range(len(colours)): 99 | # Optimize the problem 100 | problem.x_hist = [] 101 | rosen.checkGradients(1e-6) 102 | 103 | opt = ParOpt.Optimizer(rosen, options) 104 | opt.optimize() 105 | 106 | # Copy out the steepest descent points 107 | sd = np.zeros((2, len(problem.x_hist))) 108 | for i in range(len(problem.x_hist)): 109 | sd[0, i] = problem.x_hist[i][0] 110 | sd[1, i] = problem.x_hist[i][1] 111 | 112 | plt.plot(sd[0, :], sd[1, :], colours[k], label="IP %d" % (sd.shape[1])) 113 | plt.plot(sd[0, -1], sd[1, -1], "-ro") 114 | 115 | plt.legend() 116 | plt.axis([xlow, xhigh, xlow, xhigh]) 117 | plt.show() 118 | 119 | 120 | # Create the Rosenbrock problem class 121 | rosen = Rosenbrock() 122 | 123 | plot_it_all(rosen) 124 | -------------------------------------------------------------------------------- /examples/dymos/brachistochrone/brachistochrone.py: -------------------------------------------------------------------------------- 1 | import openmdao.api as om 2 | from openmdao.utils.assert_utils import assert_near_equal 3 | import dymos as dm 4 | from dymos.examples.plotting import plot_results 5 | from dymos.examples.brachistochrone import BrachistochroneODE 6 | import matplotlib.pyplot as plt 7 | from paropt.paropt_sparse_driver import ParOptSparseDriver 8 | 9 | # Initialize the Problem and the optimization driver 10 | p = om.Problem(model=om.Group()) 11 | 12 | # Create a trajectory and add a phase to it 13 | traj = p.model.add_subsystem("traj", dm.Trajectory()) 14 | 15 | transcript = dm.GaussLobatto(num_segments=10) 16 | phase = traj.add_phase( 17 | "phase0", 18 | dm.Phase(ode_class=BrachistochroneODE, transcription=transcript), 19 | ) 20 | 21 | # Set the variables 22 | phase.set_time_options(initial_bounds=(0, 0), duration_bounds=(0.5, 10)) 23 | 24 | phase.add_state( 25 | "x", 26 | rate_source="xdot", 27 | units="m", 28 | fix_initial=True, 29 | fix_final=True, 30 | solve_segments=False, 31 | ) 32 | 33 | phase.add_state( 34 | "y", 35 | rate_source="ydot", 36 | units="m", 37 | fix_initial=True, 38 | fix_final=True, 39 | solve_segments=False, 40 | ) 41 | 42 | phase.add_state( 43 | "v", 44 | rate_source="vdot", 45 | units="m/s", 46 | fix_initial=True, 47 | fix_final=False, 48 | solve_segments=False, 49 | ) 50 | 51 | phase.add_control( 52 | "theta", 53 | continuity=True, 54 | rate_continuity=True, 55 | units="deg", 56 | lower=0.01, 57 | upper=179.9, 58 | ) 59 | 60 | phase.add_parameter( 61 | "g", 62 | units="m/s**2", 63 | val=9.80665, 64 | ) 65 | 66 | # Minimize time at the end of the phase 67 | phase.add_objective("time", loc="final", scaler=10) 68 | p.model.linear_solver = om.DirectSolver() 69 | 70 | # Setup the Problem 71 | p.setup() 72 | 73 | # Set the initial values 74 | p["traj.phase0.t_initial"] = 0.0 75 | p["traj.phase0.t_duration"] = 2.0 76 | 77 | p["traj.phase0.states:x"] = phase.interp(ys=[0, 10], nodes="state_input") 78 | p["traj.phase0.states:y"] = phase.interp(ys=[10, 5], nodes="state_input") 79 | p["traj.phase0.states:v"] = phase.interp(ys=[0, 9.9], nodes="state_input") 80 | p["traj.phase0.controls:theta"] = phase.interp(ys=[5, 100.5], nodes="control_input") 81 | 82 | # Create the driver 83 | p.driver = ParOptSparseDriver() 84 | 85 | # Allow OpenMDAO to automatically determine our sparsity pattern. 86 | # Doing so can significant speed up the execution of Dymos. 87 | p.driver.declare_coloring(show_summary=True, show_sparsity=True) 88 | 89 | options = { 90 | "algorithm": "ip", 91 | "norm_type": "infinity", 92 | "qn_type": "bfgs", 93 | "qn_subspace_size": 10, 94 | "starting_point_strategy": "least_squares_multipliers", 95 | "qn_update_type": "damped_update", 96 | "abs_res_tol": 1e-6, 97 | "barrier_strategy": "monotone", 98 | "armijo_constant": 1e-5, 99 | "penalty_gamma": 100.0, 100 | "max_major_iters": 500, 101 | } 102 | 103 | for key in options: 104 | p.driver.options[key] = options[key] 105 | 106 | # Run the driver to solve the problem 107 | p.run_driver() 108 | 109 | # Test the results 110 | assert_near_equal( 111 | p.get_val("traj.phase0.timeseries.time")[-1], 1.8016, tolerance=1.0e-3 112 | ) 113 | 114 | # Generate the explicitly simulated trajectory 115 | exp_out = traj.simulate() 116 | 117 | plot_results( 118 | [ 119 | ( 120 | "traj.phase0.timeseries.states:x", 121 | "traj.phase0.timeseries.states:y", 122 | "x (m)", 123 | "y (m)", 124 | ), 125 | ( 126 | "traj.phase0.timeseries.time", 127 | "traj.phase0.timeseries.controls:theta", 128 | "time (s)", 129 | "theta (deg)", 130 | ), 131 | ], 132 | title="Brachistochrone Solution\nHigh-Order Gauss-Lobatto Method", 133 | p_sol=p, 134 | p_sim=exp_out, 135 | ) 136 | 137 | plt.show() 138 | -------------------------------------------------------------------------------- /docs/source/sellar.rst: -------------------------------------------------------------------------------- 1 | Sellar problem 2 | ============== 3 | 4 | To illustrate the application of ParOpt, consider the following optimization problem with the Sellar objective function: 5 | 6 | .. math:: 7 | 8 | \begin{align} 9 | \text{min} \qquad & x_1 + x_2^2 + x_3 + e^{-x_4} \\ 10 | \text{with respect to} \qquad & 0 \le x_{1} \le 10 \\ 11 | & 0 \le x_{2} \le 10 \\ 12 | & -1 \le x_{3} \le 3.16 \\ 13 | & -1 \le x_{4} \le 24 \\ 14 | \text{subject to} \qquad & x_{1} + x_{2} - 1 \ge 0 \\ 15 | \end{align} 16 | 17 | C++ implementation 18 | ------------------ 19 | 20 | The first step to use the ParOpt optimization library is to create a problem class which inherits from ParOptProblem. 21 | This class is used by ParOpt's interior-point or trust-region algorithms to get the function and gradient values from the problem. 22 | 23 | Key functions required for the implementation of a ParOptProblem class are described below. 24 | 25 | .. code-block:: c++ 26 | 27 | void getVarsAndBounds( ParOptVec *xvec, 28 | ParOptVec *lbvec, ParOptVec *ubvec ); 29 | 30 | To begin the optimization problem, the optimizer must know the starting point and the variable bounds for the problem 31 | The member function getVarsAndBounds retrieves this information. 32 | On return, the initial design variables are written to the design vector x, and the lower and upper bounds are written to the vectors lb and ub, respectively. 33 | 34 | .. code-block:: c++ 35 | 36 | int evalObjCon( ParOptVec *xvec, 37 | ParOptScalar *fobj, ParOptScalar *cons ); 38 | int evalObjConGradient( ParOptVec *xvec, 39 | ParOptVec *gvec, ParOptVec **Ac ); 40 | 41 | The class inheriting from ParOptProblem must also implement member functions to evaluate the objective and constraints and their gradients. 42 | The function evalObjCon takes in the design vector x, and returns a scalar value in fobj, and an array of the dense constraint values in cons. 43 | When the code is run in parallel, the same objective value and constraint values must be returned on all processors. 44 | The function evalObjConGradient sets the values of the objective and constraint gradients into the vector gvec, and the array of vectors Ac, respectively. 45 | If an error is encountered during the evaluation of either the functions or gradients, a non-zero error code should be returned to terminate the optimization. 46 | 47 | When implemented in C++, the complete Sellar problem is: 48 | 49 | .. literalinclude:: ../../examples/sellar/sellar.cpp 50 | :language: c++ 51 | 52 | The local components of the design vector can be accessed by making a call to getArray. 53 | 54 | .. code-block:: c++ 55 | 56 | ParOptScalar *x; 57 | xvec->getArray(&x); 58 | 59 | In this case, the code can only be run in serial, so the design vector is not distributed. 60 | 61 | All objects in ParOpt are reference counted. 62 | Use incref() to increase the reference count after an object is allocated. 63 | When the object is no longer needed, call decref() to decrease the reference count and possibly delete the object. 64 | Direct calls to delete the object should not be used. 65 | 66 | Python implementation 67 | --------------------- 68 | 69 | The python implementation of this problem is also straightforward. 70 | In an analogous manner, the python implemenation uses a class inherited from ParOpt.Problem, a python wrapper for the CyParOptProblem class. 71 | This inherited class must implement a getVarsAndBounds, evalObjCon and evalObjConGradient member functions. 72 | Note that in python, the function signature is slightly different for evalObjCon. 73 | Please note, the vectors returned to python access the underlying memory in ParOpt directly, therefore sometimes care must be taken to avoid expressions that do not assign values to the references returned from ParOpt. 74 | These vectors are of type ParOpt.PVec, but act in many ways like a numpy array. 75 | 76 | .. literalinclude:: ../../examples/sellar/sellar.py 77 | :language: python 78 | -------------------------------------------------------------------------------- /examples/limited_memory_test/limited_memory_test.py: -------------------------------------------------------------------------------- 1 | # Import numpy 2 | import numpy as np 3 | import mpi4py.MPI as MPI 4 | 5 | # Import ParOpt 6 | from paropt import ParOpt 7 | 8 | # Import argparse 9 | import argparse 10 | 11 | # Import matplotlib 12 | import matplotlib.pylab as plt 13 | 14 | 15 | # Random quadratic problem class 16 | class Quadratic(ParOpt.Problem): 17 | def __init__(self, eigs): 18 | # Set the communicator pointer 19 | self.comm = MPI.COMM_WORLD 20 | self.nvars = len(eigs) 21 | self.ncon = 1 22 | 23 | # Record the quadratic terms 24 | self.A = self.createRandomProblem(eigs) 25 | self.b = np.random.uniform(self.nvars) 26 | self.Acon = np.ones(self.nvars) 27 | self.bcon = 0.0 28 | 29 | # Initialize the base class 30 | super(Quadratic, self).__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 31 | 32 | return 33 | 34 | def getVarsAndBounds(self, x, lb, ub): 35 | """Set the values of the bounds""" 36 | x[:] = -2.0 + np.random.uniform(size=len(x)) 37 | lb[:] = -5.0 38 | ub[:] = 5.0 39 | return 40 | 41 | def evalObjCon(self, x): 42 | """Evaluate the objective and constraint""" 43 | # Append the point to the solution history 44 | 45 | # Evaluate the objective and constraints 46 | fail = 0 47 | con = np.zeros(1) 48 | 49 | fobj = 0.5 * np.dot(x, np.dot(self.A, x)) + np.dot(self.b, x) 50 | con[0] = np.dot(x, self.Acon) + self.bcon 51 | return fail, fobj, con 52 | 53 | def evalObjConGradient(self, x, g, A): 54 | """Evaluate the objective and constraint gradient""" 55 | fail = 0 56 | 57 | # The objective gradient 58 | g[:] = np.dot(self.A, x) + self.b 59 | 60 | # The constraint gradient 61 | A[0][:] = self.Acon[:] 62 | 63 | return fail 64 | 65 | def createRandomProblem(self, eigs): 66 | """ 67 | Create a random matrix with the given eigenvalues 68 | """ 69 | 70 | # The dimension of the matrix 71 | n = len(eigs) 72 | 73 | # Create a random square (n x n) matrix 74 | B = np.random.uniform(size=(n, n)) 75 | 76 | # Orthogonalize the columns of B so that Q = range(B) = R^{n} 77 | Q, s, v = np.linalg.svd(B) 78 | 79 | # Compute A = Q*diag(eigs)*Q^{T} 80 | A = np.dot(Q, np.dot(np.diag(eigs), Q.T)) 81 | 82 | return A 83 | 84 | 85 | # Parse the arguments 86 | parser = argparse.ArgumentParser() 87 | parser.add_argument("--qn_type", type=str, default="sr1") 88 | args = parser.parse_args() 89 | qn_type = args.qn_type 90 | 91 | # This test compares a limited-memory Hessian with their dense counterparts 92 | n = 50 93 | eigs = np.linspace(1, 1 + n, n) 94 | problem = Quadratic(eigs) 95 | 96 | if qn_type == "sr1": 97 | # Create the LSR1 object 98 | qn = ParOpt.LSR1(problem, subspace=n) 99 | else: 100 | # Create the limited-memory BFGS object 101 | qn = ParOpt.LBFGS(problem, subspace=n) 102 | 103 | # Create a random set of steps and their corresponding vectors 104 | S = np.random.uniform(size=(n, n)) 105 | Y = np.dot(problem.A, S) 106 | 107 | # Create paropt vectors 108 | ps = problem.createDesignVec() 109 | py = problem.createDesignVec() 110 | 111 | # Compute the update to the 112 | y0 = Y[:, -1] 113 | s0 = S[:, -1] 114 | B = (np.dot(y0, y0) / np.dot(s0, y0)) * np.eye(n) 115 | 116 | for i in range(n): 117 | s = S[:, i] 118 | y = Y[:, i] 119 | 120 | # Update the dense variant 121 | if qn_type == "sr1": 122 | r = y - np.dot(B, s) 123 | B += np.outer(r, r) / np.dot(r, s) 124 | else: 125 | r = np.dot(B, s) 126 | rho = 1.0 / np.dot(y, s) 127 | beta = 1.0 / np.dot(s, r) 128 | B += -beta * np.outer(r, r) + rho * np.outer(y, y) 129 | 130 | # Update the paropt problem 131 | ps[:] = s[:] 132 | py[:] = y[:] 133 | qn.update(ps, py) 134 | 135 | # Now, check that the update works 136 | for i in range(n): 137 | s = np.random.uniform(size=n) 138 | ps[:] = s[:] 139 | qn.mult(ps, py) 140 | 141 | # Compute the residual 142 | r = py[:] - np.dot(B, s) 143 | 144 | # Compute the relative error 145 | print( 146 | "relative err[%2d]: %25.10e" 147 | % (i, np.sqrt(np.dot(r, r) / np.dot(s, np.dot(B, s)))) 148 | ) 149 | -------------------------------------------------------------------------------- /examples/reduced_problem/reduced.py: -------------------------------------------------------------------------------- 1 | """ 2 | This code demonstrates how to create a reduced optimization problem by fixing 3 | a subset of design variables. 4 | 5 | Original problem: 6 | min x0**4 + x1**4 + x2**4 7 | s.t. x0 + x1 + x2 - 1 >= 0 8 | 9 | reduced problem: 10 | fix: x0 = 0.1 11 | """ 12 | 13 | import os 14 | import numpy as np 15 | import mpi4py.MPI as MPI 16 | import matplotlib.pyplot as plt 17 | from paropt import ParOpt 18 | 19 | 20 | class OriginalProblem(ParOpt.Problem): 21 | def __init__(self): 22 | # Set the communicator pointer 23 | self.comm = MPI.COMM_WORLD 24 | self.nvars = 3 25 | self.ncon = 1 26 | 27 | # Initialize the base class 28 | super().__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 29 | 30 | return 31 | 32 | def getVarsAndBounds(self, x, lb, ub): 33 | """Set the values of the bounds""" 34 | x[:] = 1.0 35 | lb[:] = 0.0 36 | ub[:] = 10.0 37 | return 38 | 39 | def evalObjCon(self, x): 40 | """Evaluate the objective and constraint""" 41 | fail = 0 42 | con = np.zeros(1) 43 | fobj = x[0] ** 4 + x[1] ** 4 + x[2] ** 4 44 | con[0] = x[0] + x[1] + x[2] - 1 45 | return fail, fobj, con 46 | 47 | def evalObjConGradient(self, x, g, A): 48 | """Evaluate the objective and constraint gradient""" 49 | fail = 0 50 | 51 | # The objective gradient 52 | g[0] = 4 * x[0] ** 3 53 | g[1] = 4 * x[1] ** 3 54 | g[2] = 4 * x[2] ** 3 55 | 56 | # The constraint gradient 57 | A[0][0] = 1.0 58 | A[0][1] = 1.0 59 | A[0][2] = 1.0 60 | return fail 61 | 62 | 63 | class ReducedProblem(ParOpt.Problem): 64 | def __init__(self, original_prob, fixed_dv_idx, fixed_dv_vals): 65 | self.comm = MPI.COMM_WORLD 66 | self.ncon = 1 67 | self.prob = original_prob 68 | 69 | # Allocate full-size vectors for the original problem 70 | self._x = self.prob.createDesignVec() 71 | self._g = self.prob.createDesignVec() 72 | self._A = [] 73 | for i in range(self.ncon): 74 | self._A.append(self.prob.createDesignVec()) 75 | 76 | # Get indices of fixed design variables, these indices 77 | # are with respect to the original full-sized problem 78 | self.fixed_dv_idx = fixed_dv_idx 79 | self.fixed_dv_vals = fixed_dv_vals 80 | 81 | # Compute the indices of fixed design variables, these indices 82 | # are with respect to the original full-sized problem 83 | self.free_dv_idx = [ 84 | i for i in range(len(self._x)) if i not in self.fixed_dv_idx 85 | ] 86 | self.nvars = len(self.free_dv_idx) 87 | 88 | # Get vars and bounds from the original problem 89 | self._x0 = self.prob.createDesignVec() 90 | self._lb = self.prob.createDesignVec() 91 | self._ub = self.prob.createDesignVec() 92 | self.prob.getVarsAndBounds(self._x0, self._lb, self._ub) 93 | 94 | super().__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 95 | return 96 | 97 | def getVarsAndBounds(self, x, lb, ub): 98 | x[:] = self._x0[self.free_dv_idx] 99 | lb[:] = self._lb[self.free_dv_idx] 100 | ub[:] = self._ub[self.free_dv_idx] 101 | return 102 | 103 | def evalObjCon(self, x): 104 | self._x[self.fixed_dv_idx] = self.fixed_dv_vals 105 | self._x[self.free_dv_idx] = x[:] 106 | return self.prob.evalObjCon(self._x) 107 | 108 | def evalObjConGradient(self, x, g, A): 109 | self._x[self.fixed_dv_idx] = self.fixed_dv_vals 110 | self._x[self.free_dv_idx] = x[:] 111 | fail = self.prob.evalObjConGradient(self._x, self._g, self._A) 112 | g[:] = self._g[self.free_dv_idx] 113 | for i in range(self.ncon): 114 | A[i][:] = self._A[i][self.free_dv_idx] 115 | return fail 116 | 117 | 118 | options = { 119 | "algorithm": "tr", 120 | "tr_init_size": 0.05, 121 | "tr_min_size": 1e-6, 122 | "tr_max_size": 10.0, 123 | "tr_eta": 0.1, 124 | "tr_adaptive_gamma_update": True, 125 | "tr_max_iterations": 200, 126 | } 127 | 128 | original = OriginalProblem() 129 | redu = ReducedProblem(original, fixed_dv_idx=[0], fixed_dv_vals=[0.1]) 130 | opt = ParOpt.Optimizer(redu, options) 131 | opt.optimize() 132 | -------------------------------------------------------------------------------- /src/ParOptOptions.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_OPTIONS_H 2 | #define PAR_OPT_OPTIONS_H 3 | 4 | #include 5 | #include 6 | 7 | #include "ParOptVec.h" 8 | 9 | class ParOptOptions : public ParOptBase { 10 | public: 11 | static const int PAROPT_STRING_OPTION = 1; 12 | static const int PAROPT_BOOLEAN_OPTION = 2; 13 | static const int PAROPT_INT_OPTION = 3; 14 | static const int PAROPT_FLOAT_OPTION = 4; 15 | static const int PAROPT_ENUM_OPTION = 5; 16 | 17 | ParOptOptions(MPI_Comm _comm = MPI_COMM_WORLD); 18 | ~ParOptOptions(); 19 | 20 | // Add the options 21 | int addStringOption(const char *name, const char *value, 22 | const char *descript); 23 | int addBoolOption(const char *name, int value, const char *descript); 24 | int addIntOption(const char *name, int value, int low, int high, 25 | const char *descript); 26 | int addFloatOption(const char *name, double value, double low, double high, 27 | const char *descript); 28 | int addEnumOption(const char *name, const char *value, int size, 29 | const char *options[], const char *descript); 30 | 31 | // Is this an option? 32 | int isOption(const char *name); 33 | 34 | // Set the option values 35 | int setOption(const char *name, const char *value); 36 | int setOption(const char *name, int value); 37 | int setOption(const char *name, double value); 38 | 39 | // Retrieve the option values that have been set 40 | const char *getStringOption(const char *name); 41 | int getBoolOption(const char *name); 42 | int getIntOption(const char *name); 43 | double getFloatOption(const char *name); 44 | const char *getEnumOption(const char *name); 45 | 46 | // Get the type 47 | int getOptionType(const char *name); 48 | 49 | // Get the description 50 | const char *getDescription(const char *name); 51 | 52 | // Get information about the range of possible values 53 | int getIntRange(const char *name, int *low, int *high); 54 | int getFloatRange(const char *name, double *low, double *high); 55 | int getEnumRange(const char *name, int *size, const char *const **values); 56 | 57 | void printSummary(FILE *fp, int output_level); 58 | 59 | void begin(); 60 | const char *getName(); 61 | int next(); 62 | 63 | private: 64 | class ParOptOptionEntry { 65 | public: 66 | ParOptOptionEntry() { 67 | name = descript = NULL; 68 | str_value = NULL; 69 | bool_value = bool_default = 0; 70 | int_value = int_default = int_low = int_high = 0; 71 | float_value = float_default = float_low = float_high = 0.0; 72 | num_enum = 0; 73 | enum_value = enum_default = NULL; 74 | enum_range = NULL; 75 | is_set = 0; 76 | } 77 | ~ParOptOptionEntry() { 78 | if (name) { 79 | delete[] name; 80 | } 81 | if (descript) { 82 | delete[] descript; 83 | } 84 | if (str_value) { 85 | delete[] str_value; 86 | } 87 | if (str_default) { 88 | delete[] str_default; 89 | } 90 | if (enum_value) { 91 | delete[] enum_value; 92 | } 93 | if (enum_default) { 94 | delete[] enum_default; 95 | } 96 | if (enum_range) { 97 | for (int i = 0; i < num_enum; i++) { 98 | delete[] enum_range[i]; 99 | } 100 | } 101 | } 102 | 103 | // Name of the option 104 | char *name; 105 | 106 | // Name of the description 107 | char *descript; 108 | 109 | // Flag to indicate whether the option has been set 110 | // by the user/algorithm 111 | int is_set; 112 | 113 | // Type of entry 114 | int type_info; 115 | 116 | // String value 117 | char *str_value, *str_default; 118 | 119 | // Set the boolean value 120 | int bool_value, bool_default; 121 | 122 | // Store information about the integer value 123 | int int_value, int_default, int_low, int_high; 124 | 125 | // Store information about the float values 126 | double float_value, float_default, float_low, float_high; 127 | 128 | // Store a list of options as strings 129 | int num_enum; 130 | char *enum_value, *enum_default, **enum_range; 131 | }; 132 | 133 | MPI_Comm comm; 134 | std::map entries; 135 | std::map::iterator iter; 136 | }; 137 | 138 | #endif // PAR_OPT_OPTIONS_H 139 | -------------------------------------------------------------------------------- /examples/cholesky/cholesky.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include "ParOptAMD.h" 3 | #include "ParOptSparseCholesky.h" 4 | #include "ParOptSparseUtils.h" 5 | 6 | void build_matrix(int nx, int *_size, int **_colp, int **_rows, 7 | ParOptScalar **_kvals, const int *iperm = NULL) { 8 | ParOptScalar kmat[][4] = { 9 | {4.0, 2.0, 2.0, 1.0}, 10 | {2.0, 4.0, 1.0, 2.0}, 11 | {2.0, 1.0, 4.0, 2.0}, 12 | {1.0, 2.0, 2.0, 4.0}, 13 | }; 14 | 15 | ParOptScalar ke[64]; 16 | for (int k = 0; k < 64; k++) { 17 | ke[k] = 0.0; 18 | } 19 | for (int ki = 0; ki < 2; ki++) { 20 | for (int ii = 0; ii < 4; ii++) { 21 | for (int jj = 0; jj < 4; jj++) { 22 | ke[8 * (2 * ii + ki) + 2 * jj + ki] = kmat[ii][jj] / 9.0; 23 | } 24 | } 25 | } 26 | 27 | int size = 2 * (nx + 1) * (nx + 1); 28 | int *colp = new int[size + 1]; 29 | for (int i = 0; i < size + 1; i++) { 30 | colp[i] = 0; 31 | } 32 | 33 | for (int i = 0; i < nx; i++) { 34 | for (int j = 0; j < nx; j++) { 35 | int nodes[] = {i + j * (nx + 1), i + 1 + j * (nx + 1), 36 | i + (j + 1) * (nx + 1), i + 1 + (j + 1) * (nx + 1)}; 37 | 38 | for (int k = 0; k < 2; k++) { 39 | for (int ii = 0; ii < 4; ii++) { 40 | int ivar = 2 * nodes[ii] + k; 41 | if (iperm) { 42 | ivar = iperm[ivar]; 43 | } 44 | colp[ivar] += 8; 45 | } 46 | } 47 | } 48 | } 49 | 50 | int nnz = 0; 51 | for (int i = 0; i < size; i++) { 52 | int tmp = colp[i]; 53 | colp[i] = nnz; 54 | nnz += tmp; 55 | } 56 | colp[size] = nnz; 57 | 58 | int *rows = new int[nnz]; 59 | ParOptScalar *kvals = new ParOptScalar[nnz]; 60 | 61 | for (int i = 0; i < nx; i++) { 62 | for (int j = 0; j < nx; j++) { 63 | int nodes[] = {i + j * (nx + 1), i + 1 + j * (nx + 1), 64 | i + (j + 1) * (nx + 1), i + 1 + (j + 1) * (nx + 1)}; 65 | 66 | for (int ki = 0; ki < 2; ki++) { 67 | for (int kj = 0; kj < 2; kj++) { 68 | for (int ii = 0; ii < 4; ii++) { 69 | for (int jj = 0; jj < 4; jj++) { 70 | int ivar = 2 * nodes[ii] + ki; 71 | int jvar = 2 * nodes[jj] + kj; 72 | if (iperm) { 73 | ivar = iperm[ivar]; 74 | jvar = iperm[jvar]; 75 | } 76 | rows[colp[ivar]] = jvar; 77 | kvals[colp[ivar]] = ke[8 * (2 * ii + ki) + (2 * jj + kj)]; 78 | colp[ivar]++; 79 | } 80 | } 81 | } 82 | } 83 | } 84 | } 85 | 86 | for (int i = size - 1; i >= 0; i--) { 87 | colp[i + 1] = colp[i]; 88 | } 89 | colp[0] = 0; 90 | 91 | *_size = size; 92 | *_colp = colp; 93 | *_rows = rows; 94 | *_kvals = kvals; 95 | } 96 | 97 | int main(int argc, char *argv[]) { 98 | int nx = 1024; 99 | 100 | int size; 101 | int *colp; 102 | int *rows; 103 | ParOptScalar *kvals; 104 | build_matrix(nx, &size, &colp, &rows, &kvals, NULL); 105 | 106 | ParOptScalar *b = new ParOptScalar[size]; 107 | for (int i = 0; i < size; i++) { 108 | b[i] = 0.0; 109 | } 110 | for (int i = 0; i < size; i++) { 111 | for (int jp = colp[i]; jp < colp[i + 1]; jp++) { 112 | b[rows[jp]] += kvals[jp]; 113 | } 114 | } 115 | 116 | printf("size = %d\n", size); 117 | double t0 = MPI_Wtime(); 118 | ParOptOrderingType order = PAROPT_ND_ORDER; 119 | for (int k = 0; k < argc; k++) { 120 | if (strcmp(argv[k], "ND") == 0) { 121 | order = PAROPT_ND_ORDER; 122 | } else if (strcmp(argv[k], "AMD") == 0) { 123 | order = PAROPT_AMD_ORDER; 124 | } 125 | } 126 | ParOptSparseCholesky *chol = 127 | new ParOptSparseCholesky(size, colp, rows, order); 128 | double t1 = MPI_Wtime(); 129 | chol->setValues(size, colp, rows, kvals); 130 | 131 | delete[] colp; 132 | delete[] rows; 133 | delete[] kvals; 134 | 135 | double t2 = MPI_Wtime(); 136 | chol->factor(); 137 | double t3 = MPI_Wtime(); 138 | chol->solve(b); 139 | double t4 = MPI_Wtime(); 140 | 141 | printf("Setup/order time: %12.5e\n", t1 - t0); 142 | printf("Set values time: %12.5e\n", t2 - t1); 143 | printf("Factor time: %12.5e\n", t3 - t2); 144 | printf("Solve time: %12.5e\n", t4 - t3); 145 | 146 | ParOptScalar err = 0.0; 147 | for (int i = 0; i < size; i++) { 148 | err += (1.0 - b[i]) * (1.0 - b[i]); 149 | } 150 | printf("||x - e||: %25.15e\n", ParOptRealPart(sqrt(err))); 151 | 152 | delete chol; 153 | 154 | return 0; 155 | } -------------------------------------------------------------------------------- /examples/openmdao/distrib_paraboloid.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | 4 | import numpy as np 5 | from mpi4py import MPI 6 | 7 | import openmdao.api as om 8 | from paropt.paropt_driver import ParOptDriver 9 | 10 | """ 11 | Example to demonstrate parallel optimization with OpenMDAO 12 | using distributed components 13 | 14 | Minimize: y = Sum((x - 5)^2) + (w - 10)^2 15 | w.r.t. x 16 | subject to: a = Sum(x^3) <= 10.0 17 | 18 | The size of x depends on the number of procs used: 19 | size(x) = 2*num_procs + 1 20 | """ 21 | 22 | 23 | class DistribParaboloid(om.ExplicitComponent): 24 | def setup(self): 25 | self.options["distributed"] = True 26 | 27 | if self.comm.rank == 0: 28 | ndvs = 3 29 | else: 30 | ndvs = 2 31 | 32 | self.add_input("w", val=1.0) # this will connect to a non-distributed IVC 33 | self.add_input("x", shape=ndvs) # this will connect to a distributed IVC 34 | 35 | self.add_output("y", shape=1) # all-gathered output, duplicated on all procs 36 | self.add_output("z", shape=ndvs) # distributed output 37 | self.add_output("a", shape=1) # all-gathered output, duplicated on all procs 38 | self.declare_partials("y", "x") 39 | self.declare_partials("y", "w") 40 | self.declare_partials("z", "x") 41 | self.declare_partials("a", "x") 42 | 43 | def compute(self, inputs, outputs): 44 | x = inputs["x"] 45 | local_y = np.sum((x - 5) ** 2) 46 | y_g = np.zeros(self.comm.size) 47 | self.comm.Allgather(local_y, y_g) 48 | outputs["y"] = np.sum(y_g) + (inputs["w"] - 10) ** 2 49 | 50 | z = x**3 51 | outputs["z"] = z 52 | 53 | local_a = np.sum(z) 54 | a_g = np.zeros(self.comm.size) 55 | self.comm.Allgather(local_a, a_g) 56 | outputs["a"] = np.sum(a_g) 57 | 58 | def compute_partials(self, inputs, J): 59 | x = inputs["x"] 60 | J["y", "x"] = 2.0 * (x - 5.0) 61 | J["y", "w"] = 2.0 * (inputs["w"] - 10.0) 62 | J["z", "x"] = np.diag(2.0 * x) 63 | J["a", "x"] = 3.0 * x * x 64 | 65 | 66 | if __name__ == "__main__": 67 | # Create an argument parser 68 | parser = argparse.ArgumentParser() 69 | parser.add_argument( 70 | "--driver", 71 | default="paropt", 72 | choices=["paropt", "scipy", "pyoptsparse"], 73 | help="driver", 74 | ) 75 | parser.add_argument( 76 | "--algorithm", default="ip", choices=["ip", "tr"], help="optimizer type" 77 | ) 78 | args = parser.parse_args() 79 | driver = args.driver 80 | algorithm = args.algorithm 81 | 82 | comm = MPI.COMM_WORLD 83 | 84 | # Build the model 85 | p = om.Problem() 86 | 87 | # Set the number of design variables on each processor 88 | if comm.rank == 0: 89 | ndvs = 3 90 | else: 91 | ndvs = 2 92 | 93 | # Define the independent variables that are distributed 94 | d_ivc = p.model.add_subsystem( 95 | "distrib_ivc", om.IndepVarComp(distributed=True), promotes=["*"] 96 | ) 97 | d_ivc.add_output("x", 2 * np.ones(ndvs)) 98 | 99 | # Define the independent variables that are non-distributed 100 | # These non-distributed variables will be duplicated on each processor 101 | ivc = p.model.add_subsystem( 102 | "ivc", om.IndepVarComp(distributed=False), promotes=["*"] 103 | ) 104 | ivc.add_output("w", 2.0) 105 | 106 | # Add the paraboloid model 107 | p.model.add_subsystem("dp", DistribParaboloid(), promotes=["*"]) 108 | 109 | # Define the optimization problem 110 | p.model.add_design_var("x", upper=10.0) 111 | p.model.add_objective("y") 112 | p.model.add_constraint("a", upper=10.0) 113 | 114 | # Create and set the driver 115 | if driver == "paropt": 116 | p.driver = ParOptDriver() 117 | p.driver.options["algorithm"] = algorithm 118 | elif driver == "scipy": 119 | p.driver = ScipyOptimizeDriver() 120 | elif driver == "pyoptsparse": 121 | p.driver = pyOptSparseDriver() 122 | p.driver.options["optimizer"] = "ParOpt" 123 | 124 | p.setup() 125 | p.run_driver() 126 | 127 | # Print the objective and constraint values at the optimized point 128 | if comm.rank == 0: 129 | print("f = {0:.2f}".format(p.get_val("dp.y")[0])) 130 | print("c = {0:.2f}".format(p.get_val("dp.a")[0] - 10.0)) 131 | 132 | # Print the x location of the minimum 133 | print("Rank = {0}; x = {1}".format(comm.rank, p.get_val("dp.x"))) 134 | -------------------------------------------------------------------------------- /examples/maratos_effect/maratos.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is example 15.4 in Numerical Optimization by Nocedal Et al. 3 | which might be able to show the Maratos effect that prevents 4 | the optimizer to converge rapidly. 5 | 6 | Problem: 7 | 8 | min f(x1, x2) = 2(x1^2 + x2^2 - 1) - x1 9 | s.t. 1 - (x1^2 + x2^2) = 0 10 | """ 11 | 12 | import numpy as np 13 | import mpi4py.MPI as MPI 14 | import matplotlib.pyplot as plt 15 | from paropt import ParOpt 16 | import argparse 17 | 18 | 19 | class Maratos(ParOpt.Problem): 20 | def __init__(self, plot_label=False): 21 | self.comm = MPI.COMM_WORLD 22 | self.nvars = 2 23 | self.ncon = 1 24 | self.nineq = 0 25 | self.design_counter = 0 26 | super(Maratos, self).__init__( 27 | self.comm, nvars=self.nvars, ncon=self.ncon, ninequalities=self.nineq 28 | ) 29 | self.plot_label = plot_label 30 | 31 | def fun(self, x): 32 | return 2.0 * (x[0] - 0.5) ** 2 + 2.0 * x[1] ** 2 33 | 34 | def fun_grad(self, x): 35 | return np.array([4.0 * x[0] - 1.0, 4.0 * x[1]]) 36 | 37 | def con(self, x): 38 | return (x[0] ** 2 + x[1] ** 2) - 2.0 39 | 40 | def con_grad(self, x): 41 | return np.array([2.0 * x[0], 2.0 * x[1]]) 42 | 43 | def plot_contour(self): 44 | n = 200 45 | x = np.linspace(-2.0, 2.0, n) 46 | y = np.linspace(-2.0, 2.0, n) 47 | X, Y = np.meshgrid(x, y) 48 | 49 | f = np.zeros((n, n)) 50 | c = np.zeros((n, n)) 51 | 52 | for i in range(n): 53 | for j in range(n): 54 | f[i, j] = self.fun([X[i, j], Y[i, j]]) 55 | c[i, j] = self.con([X[i, j], Y[i, j]]) 56 | 57 | fig, ax = plt.subplots(nrows=1, ncols=1) 58 | ax.contour(X, Y, f, levels=100) 59 | ax.contour(X, Y, c, levels=[0.0], colors=["red"]) 60 | plt.xlabel("x1") 61 | plt.ylabel("x2") 62 | ax.set_aspect("equal", "box") 63 | fig.tight_layout() 64 | 65 | self.ax = ax 66 | self.fig = fig 67 | 68 | return 69 | 70 | def plot_design(self, x): 71 | plt.plot(x[0], x[1], "b.") 72 | label = str(self.design_counter) 73 | 74 | if self.plot_label: 75 | self.ax.annotate(label, (x[0], x[1])) 76 | self.design_counter += 1 77 | 78 | return 79 | 80 | def getVarsAndBounds(self, x, lb, ub): 81 | """Set the values of the bounds""" 82 | x[0] = 1.0 83 | x[1] = 1.0 84 | 85 | lb[:] = -10.0 86 | ub[:] = 10.0 87 | 88 | return 89 | 90 | def evalObjCon(self, x): 91 | """Evaluate the objective and constraint""" 92 | con = np.zeros(self.ncon, dtype=ParOpt.dtype) 93 | fobj = self.fun(x) 94 | con[0] = self.con(x) 95 | fail = 0 96 | self.plot_design(x) 97 | return fail, fobj, con 98 | 99 | def evalObjConGradient(self, x, g, A): 100 | """Evaluate the objective and constraint gradient""" 101 | g[0] = self.fun_grad(x)[0] 102 | g[1] = self.fun_grad(x)[1] 103 | A[0][0] = self.con_grad(x)[0] 104 | A[0][1] = self.con_grad(x)[1] 105 | fail = 0 106 | return fail 107 | 108 | 109 | parser = argparse.ArgumentParser() 110 | parser.add_argument("--algorithm", type=str, default="tr") 111 | parser.add_argument("--no_plot", action="store_false", default=True) 112 | parser.add_argument("--no_label", action="store_false", default=True) 113 | args = parser.parse_args() 114 | algorithm = args.algorithm 115 | plot = args.no_plot 116 | plot_label = args.no_label 117 | 118 | problem = Maratos(plot_label=plot_label) 119 | problem.plot_contour() 120 | 121 | options = { 122 | "algorithm": "tr", 123 | "output_level": 2, 124 | "tr_init_size": 1.0, 125 | "tr_min_size": 1e-6, 126 | "tr_max_size": 1e2, 127 | "tr_eta": 0.25, 128 | "penalty_gamma": 1e2, 129 | "tr_adaptive_gamma_update": False, 130 | "tr_accept_step_strategy": "filter_method", 131 | "tr_use_soc": False, 132 | "tr_penalty_gamma_max": 1e5, 133 | "tr_penalty_gamma_min": 1e-5, 134 | "tr_max_iterations": 50, 135 | "max_major_iters": 100, 136 | } 137 | 138 | if algorithm == "ip": 139 | options = { 140 | "algorithm": "ip", 141 | "qn_subspace_size": 10, 142 | "abs_res_tol": 1e-6, 143 | "barrier_strategy": "monotone", 144 | "output_level": 1, 145 | "armijo_constant": 1e-5, 146 | "max_major_iters": 500, 147 | } 148 | 149 | opt = ParOpt.Optimizer(problem, options) 150 | opt.optimize() 151 | if plot: 152 | plt.show() 153 | -------------------------------------------------------------------------------- /paropt/ParOptEig.pyx: -------------------------------------------------------------------------------- 1 | # For the use of MPI 2 | from mpi4py.libmpi cimport * 3 | cimport mpi4py.MPI as MPI 4 | 5 | # Import numpy 6 | import numpy as np 7 | cimport numpy as np 8 | 9 | # Ensure that numpy is initialized 10 | np.import_array() 11 | 12 | # Import the TACS module 13 | from paropt.ParOpt cimport * 14 | 15 | # Import tracebacks for callbacks 16 | import traceback 17 | 18 | # Include the definitions 19 | include "ParOptDefs.pxi" 20 | 21 | # Include the mpi4py header 22 | cdef extern from "mpi-compat.h": 23 | pass 24 | 25 | cdef extern from "ParOptCompactEigenvalueApprox.h": 26 | cppclass ParOptCompactEigenApprox(ParOptBase): 27 | ParOptCompactEigenApprox(ParOptProblem*, int) 28 | void getApproximation(ParOptScalar**, ParOptVec**, 29 | int*, ParOptScalar**, ParOptScalar**, ParOptVec***) 30 | 31 | cppclass ParOptEigenQuasiNewton(ParOptCompactQuasiNewton): 32 | ParOptEigenQuasiNewton(ParOptCompactQuasiNewton*, ParOptCompactEigenApprox*, int) 33 | 34 | ctypedef void (*updateeigenmodel)(void*, ParOptVec*, ParOptCompactEigenApprox*) except * 35 | 36 | cppclass ParOptEigenSubproblem(ParOptTrustRegionSubproblem): 37 | ParOptEigenSubproblem(ParOptProblem*, ParOptEigenQuasiNewton*) 38 | void setEigenModelUpdate(void*, updateeigenmodel) 39 | 40 | cdef class CompactEigenApprox: 41 | cdef ParOptCompactEigenApprox *ptr 42 | def __cinit__(self, ProblemBase problem=None, N=None): 43 | if problem is not None and N is not None: 44 | self.ptr = new ParOptCompactEigenApprox(problem.ptr, N) 45 | self.ptr.incref() 46 | else: 47 | self.ptr = NULL 48 | 49 | def __dealloc__(self): 50 | if self.ptr != NULL: 51 | self.ptr.decref() 52 | 53 | def getApproximationVectors(self): 54 | cdef int N = 0 55 | cdef ParOptVec *g0 = NULL 56 | cdef ParOptVec **hvecs = NULL 57 | 58 | if self.ptr is not NULL: 59 | self.ptr.getApproximation(NULL, &g0, &N, NULL, NULL, &hvecs) 60 | 61 | hlist = [] 62 | for i in range(N): 63 | hlist.append(_init_PVec(hvecs[i])) 64 | 65 | return _init_PVec(g0), hlist 66 | 67 | def setApproximationValues(self, c=None, M=None, Minv=None): 68 | cdef int N = 0 69 | cdef ParOptScalar *c0 = NULL 70 | cdef ParOptScalar *M0 = NULL 71 | cdef ParOptScalar *M0inv = NULL 72 | 73 | if self.ptr is not NULL: 74 | self.ptr.getApproximation(&c0, NULL, &N, &M0, &M0inv, NULL) 75 | 76 | if c is not None and c0 is not NULL: 77 | c0[0] = c 78 | 79 | if M is not None and M0 is not NULL: 80 | for i in range(N): 81 | for j in range(N): 82 | M0[N*i + j] = M[i][j] 83 | 84 | if Minv is not None and M0inv is not NULL: 85 | for i in range(N): 86 | for j in range(N): 87 | M0inv[N*i + j] = Minv[i][j] 88 | 89 | return 90 | 91 | cdef _init_CompactEigenApprox(ParOptCompactEigenApprox *ptr): 92 | obj = CompactEigenApprox() 93 | obj.ptr = ptr 94 | obj.ptr.incref() 95 | return obj 96 | 97 | cdef class EigenQuasiNewton(CompactQuasiNewton): 98 | cdef ParOptEigenQuasiNewton *eptr 99 | def __cinit__(self, CompactQuasiNewton qn, CompactEigenApprox eigh, int index=0): 100 | cdef ParOptCompactQuasiNewton *qn_ptr = NULL 101 | if qn is not None: 102 | qn_ptr = qn.ptr 103 | self.eptr = new ParOptEigenQuasiNewton(qn_ptr, eigh.ptr, index) 104 | self.ptr = self.eptr 105 | self.ptr.incref() 106 | return 107 | 108 | cdef void _updateeigenmodel(void *_self, ParOptVec *_x, 109 | ParOptCompactEigenApprox *_approx): 110 | fail = 0 111 | try: 112 | obj = _self 113 | x = _init_PVec(_x) 114 | approx = _init_CompactEigenApprox(_approx) 115 | obj(x, approx) 116 | except: 117 | tb = traceback.format_exc() 118 | print(tb) 119 | exit(0) 120 | 121 | cdef class EigenSubproblem(TrustRegionSubproblem): 122 | cdef ParOptEigenSubproblem *me 123 | cdef object callback 124 | def __init__(self, ProblemBase problem, EigenQuasiNewton eig): 125 | self.me = new ParOptEigenSubproblem(problem.ptr, eig.eptr) 126 | self.subproblem = self.me 127 | self.subproblem.incref() 128 | self.ptr = self.subproblem 129 | self.callback = None 130 | return 131 | 132 | def setUpdateEigenModel(self, callback): 133 | self.callback = callback 134 | self.me.setEigenModelUpdate(self.callback, _updateeigenmodel) 135 | return 136 | -------------------------------------------------------------------------------- /examples/plot_history/ipopt_plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import matplotlib 4 | import matplotlib.pylab as plt 5 | import numpy as np 6 | import argparse 7 | import re 8 | import os 9 | 10 | 11 | def ipopt_plot(filename, savefig): 12 | colors = [ 13 | "#1f77b4", 14 | "#ff7f0e", 15 | "#2ca02c", 16 | "#d62728", 17 | "#9467bd", 18 | "#8c564b", 19 | "#e377c2", 20 | "#7f7f7f", 21 | "#bcbd22", 22 | "#17becf", 23 | ] 24 | 25 | # Read in all lines 26 | with open(filename, "r") as f: 27 | lines = f.readlines() 28 | 29 | # metadata line text 30 | metadata_line = ( 31 | "iter objective inf_pr inf_du " 32 | "lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls\n" 33 | ) 34 | 35 | # Find the line index where history data start 36 | line_index = 0 37 | for line in lines: 38 | if line == metadata_line: 39 | break 40 | line_index += 1 41 | datastart_index = line_index + 1 42 | 43 | # Find the total number of lines 44 | dataend_index = len(lines) 45 | 46 | # Parse data 47 | itern = [] # iter count, if in restoration phase will have 'r' appended 48 | obj = [] # unscaled(original) objective 49 | inf_pr = [] # unscaled constraint violation, infinity norm by default 50 | inf_du = [] # scaled dual infeasibility, infinity norm 51 | lg_mu = [] # log_10 of barrier parameter \mu 52 | dnorm = [] # infinity norm of primal step 53 | alpha_du = [] # stepsize for dual variables 54 | alpha_pr = [] # stepsize for primal variables 55 | for line_index in range(datastart_index, dataend_index): 56 | if lines[line_index] == metadata_line: 57 | continue 58 | elif lines[line_index] == "\n": 59 | break 60 | else: 61 | data = lines[line_index].split() 62 | intPattern = r"[\+-]?\d+" 63 | sciPattern = r"[\+-]?\d+\.\d+[eE][\+-]\d+" 64 | floPattern = r"[\+-]?\d+\.\d+" 65 | itern.append(re.findall(intPattern, data[0].replace("r", ""))[0]) 66 | obj.append(re.findall(sciPattern, data[1])[0]) 67 | inf_pr.append(re.findall(sciPattern, data[2])[0]) 68 | inf_du.append(re.findall(sciPattern, data[3])[0]) 69 | lg_mu.append(re.findall(floPattern, data[4])[0]) 70 | dnorm.append(re.findall(sciPattern, data[5])[0]) 71 | alpha_du.append(re.findall(sciPattern, data[7])[0]) 72 | alpha_pr.append(re.findall(sciPattern, data[8])[0]) 73 | 74 | # Store data into numpy arrays 75 | itern = np.array(itern).astype(np.int) 76 | obj = np.array(obj).astype(np.float) 77 | inf_pr = np.array(inf_pr).astype(np.float) 78 | inf_du = np.array(inf_du).astype(np.float) 79 | mu = 10 ** np.array(lg_mu).astype(np.float) 80 | dnorm = np.array(dnorm).astype(np.float) 81 | alpha_du = np.array(alpha_du).astype(np.float) 82 | alpha_pr = np.array(alpha_pr).astype(np.float) 83 | 84 | # Set up axes and plot objective 85 | fig, ax1 = plt.subplots() 86 | l1 = ax1.plot(itern, obj, color=colors[0], label="objective") 87 | ax1.set_xlabel("Iteration") 88 | ax1.set_ylabel("Function value") 89 | 90 | # Set up another y axis and plot others 91 | ax2 = ax1.twinx() 92 | l2 = ax2.semilogy(itern, inf_du, color=colors[1], label="scaled dual infeas") 93 | l3 = ax2.semilogy(itern, inf_pr, color=colors[4], label="infeas") 94 | l4 = ax2.semilogy(itern, mu, color=colors[3], label="barrier") 95 | l5 = ax2.semilogy(itern, dnorm, color=colors[2], label="linfty primal step") 96 | l6 = ax2.semilogy(itern, alpha_pr, color=colors[5], label="primal stepsize") 97 | l7 = ax2.semilogy(itern, alpha_du, color=colors[6], label="dual stepsize") 98 | ax2.set_ylabel("Optimality and Feasibility") 99 | 100 | # Set labels 101 | lns = l1 + l2 + l3 + l4 + l5 + l6 + l7 102 | labs = [l.get_label() for l in lns] 103 | ax2.legend(lns, labs, loc="upper right", framealpha=0.2) 104 | 105 | # Plot 106 | plt.title(filename) 107 | if savefig: 108 | fname = os.path.splitext(filename)[0] # Delete suffix 109 | fname += "_history" 110 | plt.savefig(fname + ".png") 111 | plt.close() 112 | else: 113 | plt.show() 114 | 115 | 116 | if __name__ == "__main__": 117 | # Set up parser 118 | p = argparse.ArgumentParser("Plot values from an IPOPT output file") 119 | p.add_argument( 120 | "filename", metavar="IPOPT.out", type=str, help="IPOPT output file name" 121 | ) 122 | p.add_argument("--savefig", action="store_true") 123 | args = p.parse_args() 124 | 125 | # Call plot function 126 | ipopt_plot(args.filename, args.savefig) 127 | -------------------------------------------------------------------------------- /examples/sparse/sparse_rosenbrock.py: -------------------------------------------------------------------------------- 1 | # Import some utilities 2 | import numpy as np 3 | import mpi4py.MPI as MPI 4 | import matplotlib.pyplot as plt 5 | 6 | # Import ParOpt 7 | from paropt import ParOpt 8 | 9 | 10 | # Create the rosenbrock function class 11 | class Rosenbrock(ParOpt.Problem): 12 | def __init__(self): 13 | # Set the communicator pointer 14 | self.comm = MPI.COMM_WORLD 15 | self.nvars = 2 16 | self.ncon = 0 17 | self.nwcon = 1 18 | self.nwblock = 1 19 | 20 | # The design history file 21 | self.x_hist = [] 22 | 23 | # Initialize the base class 24 | super(Rosenbrock, self).__init__( 25 | self.comm, 26 | nvars=self.nvars, 27 | ncon=self.ncon, 28 | nwcon=self.nwcon, 29 | nwblock=self.nwblock, 30 | ) 31 | return 32 | 33 | def getVarsAndBounds(self, x, lb, ub): 34 | """Set the values of the bounds""" 35 | x[:] = -2.0 + np.random.uniform(size=len(x)) 36 | lb[:] = -2.0 37 | ub[:] = 2.0 38 | return 39 | 40 | def evalObjCon(self, x): 41 | """Evaluate the objective and constraint""" 42 | # Append the point to the solution history 43 | self.x_hist.append(np.array(x)) 44 | 45 | # Evaluate the objective and dense constraints 46 | fail = 0 47 | con = np.zeros(1) 48 | fobj = 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2 49 | return fail, fobj, con 50 | 51 | def evalObjConGradient(self, x, g, A): 52 | """Evaluate the objective and constraint gradient""" 53 | fail = 0 54 | 55 | # The objective gradient 56 | g[0] = 200 * (x[1] - x[0] ** 2) * (-2 * x[0]) - 2 * (1 - x[0]) 57 | g[1] = 200 * (x[1] - x[0] ** 2) 58 | 59 | # Evaluate the dense constraint gradient, if any 60 | return fail 61 | 62 | def evalSparseCon(self, x, con): 63 | """Evaluate the sparse constraints""" 64 | con[0] = x[0] + x[1] + 5.0 65 | return 66 | 67 | def addSparseJacobian(self, alpha, x, px, con): 68 | """Compute the Jacobian-vector product con += alpha*J(x)*px""" 69 | con[0] += alpha * (px[0] + px[1]) 70 | return 71 | 72 | def addSparseJacobianTranspose(self, alpha, x, pz, out): 73 | """Compute the transpose Jacobian-vector product out += alpha*J^{T}*pz""" 74 | out[0] += alpha * pz[0] 75 | out[1] += alpha * pz[0] 76 | return 77 | 78 | def addSparseInnerProduct(self, alpha, x, c, A): 79 | """Add the results from the product alpha * J(x)*C*J(x)^{T} to A""" 80 | A[0] += alpha * (c[0] + c[1]) 81 | return 82 | 83 | 84 | def plot_it_all(problem): 85 | """ 86 | Plot a carpet plot with the search histories for steepest descent, 87 | conjugate gradient and BFGS from the same starting point. 88 | """ 89 | 90 | # Create the data for the carpet plot 91 | n = 150 92 | xlow = -4.0 93 | xhigh = 4.0 94 | x1 = np.linspace(xlow, xhigh, n) 95 | r = np.zeros((n, n)) 96 | 97 | for j in range(n): 98 | for i in range(n): 99 | fail, fobj, con = problem.evalObjCon([x1[i], x1[j]]) 100 | r[j, i] = fobj 101 | 102 | # Assign the contour levels 103 | levels = np.min(r) + np.linspace(0, 1.0, 75) ** 2 * (np.max(r) - np.min(r)) 104 | 105 | # Create the plot 106 | fig = plt.figure(facecolor="w", figsize=(6.0, 6.0)) 107 | plt.contour(x1, x1, r, levels) 108 | 109 | colours = ["-bo", "-ko", "-co", "-mo", "-yo", "-bx", "-kx", "-cx", "-mx", "-yx"] 110 | 111 | options = { 112 | "algorithm": "tr", 113 | "tr_init_size": 0.5, 114 | "tr_min_size": 1e-6, 115 | "tr_max_size": 10.0, 116 | "tr_eta": 0.1, 117 | "tr_adaptive_gamma_update": True, 118 | "tr_max_iterations": 200, 119 | } 120 | 121 | for k in range(len(colours)): 122 | # Optimize the problem 123 | problem.x_hist = [] 124 | 125 | opt = ParOpt.Optimizer(rosen, options) 126 | opt.optimize() 127 | 128 | # Copy out the steepest descent points 129 | sd = np.zeros((2, len(problem.x_hist))) 130 | for i in range(len(problem.x_hist)): 131 | sd[0, i] = problem.x_hist[i][0] 132 | sd[1, i] = problem.x_hist[i][1] 133 | 134 | plt.plot(sd[0, :], sd[1, :], colours[k], label="IP nit:%d" % (sd.shape[1])) 135 | plt.plot(sd[0, -1], sd[1, -1], "-ro") 136 | 137 | plt.legend() 138 | plt.axis([xlow, xhigh, xlow, xhigh]) 139 | plt.gca().set_aspect("equal") 140 | plt.show() 141 | 142 | 143 | # Create the Rosenbrock problem class 144 | rosen = Rosenbrock() 145 | rosen.checkGradients() 146 | 147 | plot_it_all(rosen) 148 | -------------------------------------------------------------------------------- /.github/workflows/unit_tests.yml: -------------------------------------------------------------------------------- 1 | name: Build, unit tests, and docs 2 | 3 | on: 4 | # Triggers the workflow on push or pull request events but only for the main branch 5 | # Remove push when finally merging. 6 | push: 7 | branches: [ master ] 8 | pull_request: 9 | branches: [ master ] 10 | 11 | # Allows you to run this workflow manually from the Actions tab. 12 | workflow_dispatch: 13 | inputs: 14 | debug_enabled: 15 | description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)' 16 | required: false 17 | default: false 18 | 19 | jobs: 20 | # This job is called test_docs. 21 | unit_test_and_docs: 22 | # Run on Ubuntu 23 | runs-on: ubuntu-latest 24 | timeout-minutes: 30 25 | # Necessary to prevent mpi tests failing due to lack of slots 26 | env: 27 | OMPI_MCA_btl: self,tcp 28 | OMPI_MCA_rmaps_base_oversubscribe: 1 29 | # Ensures conda environment is initialized for all steps 30 | defaults: 31 | run: 32 | shell: bash -l {0} 33 | 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | include: 38 | # real versions 39 | - NAME: Real 40 | OPTIONAL: 'debug' 41 | INTERFACE: 'interface' 42 | PUBLISH_DOCS: true 43 | 44 | # complex versions 45 | - NAME: Complex 46 | OPTIONAL: 'complex_debug' 47 | INTERFACE: 'complex_interface' 48 | PUBLISH_DOCS: false 49 | 50 | name: ParOpt ${{ matrix.NAME }} Build/Test/Docs 51 | 52 | # Recommended if you intend to make multiple deployments in quick succession. 53 | # This will kill any currently running CI from previous commits to the same branch 54 | concurrency: 55 | group: ci-${{ github.ref }}-${{ matrix.NAME }} 56 | cancel-in-progress: true 57 | 58 | steps: 59 | - name: Display run details 60 | run: | 61 | echo "============================================================="; 62 | echo "Run #${GITHUB_RUN_NUMBER}"; 63 | echo "Run ID: ${GITHUB_RUN_ID}"; 64 | lscpu; 65 | echo "Testing: ${GITHUB_REPOSITORY}"; 66 | echo "Triggered by: ${GITHUB_EVENT_NAME}"; 67 | echo "Initiated by: ${GITHUB_ACTOR}"; 68 | echo "============================================================="; 69 | 70 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it. 71 | - uses: actions/checkout@v2 72 | - name: Setup miniconda 73 | uses: conda-incubator/setup-miniconda@v2 74 | with: 75 | auto-update-conda: true 76 | python-version: 3.9 77 | 78 | - name: Install ParOpt 79 | run: | 80 | export PAROPT_DIR=${GITHUB_WORKSPACE}; 81 | echo "PAROPT_DIR=${GITHUB_WORKSPACE}" >> $GITHUB_ENV 82 | conda install -c anaconda openmpi -q -y; 83 | conda install gxx_linux-64=9.3.0 -q -y; 84 | conda install -c anaconda openblas -q -y; 85 | conda install -c conda-forge lapack -q -y; 86 | conda install -c conda-forge metis -q -y; 87 | cd $PAROPT_DIR; 88 | cp Makefile.in.info Makefile.in; 89 | make ${{ matrix.OPTIONAL }} PAROPT_DIR=$PAROPT_DIR METIS_INCLUDE=-I${CONDA_PREFIX}/include/ METIS_LIB="-L${CONDA_PREFIX}/lib/ -lmetis"; 90 | cd $PAROPT_DIR; 91 | make ${{ matrix.INTERFACE }}; 92 | - name: Build docs 93 | run: | 94 | sudo apt-get install -y --no-install-recommends doxygen graphviz 95 | conda install pandoc; 96 | cd $PAROPT_DIR/docs; 97 | doxygen 98 | make html BUILDDIR=.; 99 | cd html; 100 | zip -r ../paropt-docs.zip .; 101 | - name: 'Upload docs' 102 | if: ${{ matrix.PUBLISH_DOCS }} 103 | uses: actions/upload-artifact@v4 104 | with: 105 | name: paropt-docs 106 | path: docs/paropt-docs.zip 107 | retention-days: 7 108 | - name: 'Deploy docs' 109 | if: ${{ github.event_name == 'push' && matrix.PUBLISH_DOCS }} 110 | uses: JamesIves/github-pages-deploy-action@v4.2.5 111 | with: 112 | branch: gh-pages # The branch the action should deploy to. 113 | folder: docs/html/ # The folder the action should deploy. 114 | # This allows the user to ssh into the github runner and debug a job upon failure 115 | # This will only trigger if the job was run using workflow_dispatch and debug_enabled input flag was set to true 116 | - name: Setup interactive debug session on failure 117 | if: ${{ failure() && github.event.inputs.debug_enabled }} 118 | uses: mxschmitt/action-tmate@v3 119 | # To access the terminal through the web-interface: 120 | # 1. Click on the web-browser link printed out in this action from the github workkflow terminal 121 | # 2. Press cntrl + c in the new tab that opens up to reveal the terminal 122 | # 3. To activate the conda environment used to build ParOpt run: 123 | # $ source $CONDA/etc/profile.d/conda.sh 124 | # $ conda activate test 125 | -------------------------------------------------------------------------------- /examples/random_convex/random_convex.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import mpi4py.MPI as MPI 3 | from paropt import ParOpt 4 | import argparse 5 | import os 6 | import matplotlib.pylab as plt 7 | 8 | 9 | # Create the rosenbrock function class 10 | class ConvexProblem(ParOpt.Problem): 11 | def __init__(self, Q, Affine, b, Acon, bcon): 12 | # Set the communicator pointer 13 | self.comm = MPI.COMM_WORLD 14 | self.nvars = len(b) 15 | self.ncon = 1 16 | 17 | # Record the quadratic terms 18 | self.Q = Q 19 | self.Affine = Affine 20 | self.b = b 21 | self.Acon = Acon 22 | self.bcon = bcon 23 | 24 | self.obj_scale = -1.0 25 | 26 | # Initialize the base class 27 | super(ConvexProblem, self).__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 28 | 29 | return 30 | 31 | def getVarsAndBounds(self, x, lb, ub): 32 | """Set the values of the bounds""" 33 | x[:] = 0.05 + 0.9 * np.random.uniform(size=len(x)) 34 | lb[:] = 0.0 35 | ub[:] = 1.0 36 | return 37 | 38 | def evalObjCon(self, x): 39 | """Evaluate the objective and constraint""" 40 | # Evaluate the objective and constraints 41 | fail = 0 42 | con = np.zeros(1, dtype=ParOpt.dtype) 43 | 44 | # Compute the artificial stiffness matrix 45 | self.K = self.Affine + np.dot(self.Q, np.dot(np.diag(x), self.Q.T)) 46 | 47 | # Compute the displacements 48 | self.u = np.linalg.solve(self.K, self.b) 49 | 50 | # Compute the artifical compliance 51 | fobj = np.dot(self.u, self.b) 52 | 53 | if self.obj_scale < 0.0: 54 | self.obj_scale = 1.0 / fobj 55 | 56 | # Compute the linear constraint 57 | con[0] = self.bcon - np.dot(self.Acon, x) 58 | 59 | return fail, fobj, con 60 | 61 | def evalObjConGradient(self, x, g, A): 62 | """Evaluate the objective and constraint gradient""" 63 | fail = 0 64 | 65 | # The objective gradient 66 | g[:] = -np.dot(self.Q.T, self.u) ** 2 67 | 68 | # The constraint gradient 69 | A[0][:] = -self.Acon[:] 70 | 71 | return fail 72 | 73 | 74 | def create_random_spd(n): 75 | """ 76 | Create a random positive definite matrix with the given 77 | eigenvalues 78 | """ 79 | # Create the eigenvalues for the matrix 80 | eigs = np.random.uniform(size=n) 81 | 82 | # Create a random square (n x n) matrix 83 | B = np.random.uniform(size=(n, n)) 84 | 85 | # Orthogonalize the columns of B so that Q = range(B) = R^{n} 86 | Q, s, v = np.linalg.svd(B) 87 | 88 | # Compute A = Q*diag(eigs)*Q^{T} 89 | A = np.dot(Q, np.dot(np.diag(eigs), Q.T)) 90 | 91 | return A 92 | 93 | 94 | def solve_problem(eigs, filename=None, data_type="orthogonal", use_tr=False): 95 | # Create a random orthogonal Q vector 96 | if data_type == "orthogonal": 97 | B = np.random.uniform(size=(n, n)) 98 | Q, s, v = np.linalg.svd(B) 99 | 100 | # Create a random Affine matrix 101 | Affine = create_random_spd(eigs) 102 | else: 103 | Q = np.random.uniform(size=(n, n)) 104 | Affine = np.diag(1e-3 * np.ones(n)) 105 | 106 | # Create the random right-hand-side 107 | b = np.random.uniform(size=n) 108 | 109 | # Create the constraint data 110 | Acon = np.random.uniform(size=n) 111 | bcon = 0.25 * np.sum(Acon) 112 | 113 | # Create the convex problem 114 | problem = ConvexProblem(Q, Affine, b, Acon, bcon) 115 | 116 | options = { 117 | "algorithm": "ip", 118 | "abs_res_tol": 1e-8, 119 | "starting_point_strategy": "affine_step", 120 | "barrier_strategy": "monotone", 121 | "start_affine_multiplier_min": 0.01, 122 | "penalty_gamma": 1000.0, 123 | "qn_subspace_size": 10, 124 | "qn_type": "bfgs", 125 | "output_file": filename, 126 | } 127 | 128 | if use_tr: 129 | options = { 130 | "algorithm": "tr", 131 | "tr_init_size": 0.05, 132 | "tr_min_size": 1e-6, 133 | "tr_max_size": 10.0, 134 | "tr_eta": 0.25, 135 | "tr_adaptive_gamma_update": True, 136 | "tr_max_iterations": 200, 137 | "penalty_gamma": 10.0, 138 | "qn_subspace_size": 10, 139 | "qn_type": "bfgs", 140 | "abs_res_tol": 1e-8, 141 | "output_file": filename, 142 | "tr_output_file": os.path.splitext(filename)[0] + ".tr", 143 | "starting_point_strategy": "affine_step", 144 | "barrier_strategy": "monotone", 145 | "use_line_search": False, 146 | } 147 | 148 | opt = ParOpt.Optimizer(problem, options) 149 | 150 | # Set a new starting point 151 | opt.optimize() 152 | x, z, zw, zl, zu = opt.getOptimizedPoint() 153 | 154 | return x 155 | 156 | 157 | # Parse the arguments 158 | parser = argparse.ArgumentParser() 159 | parser.add_argument("--n", type=int, default=100, help="Dimension of the problem") 160 | parser.add_argument("--algorithm", type=str, default="ip") 161 | args = parser.parse_args() 162 | 163 | use_tr = False 164 | if args.algorithm != "ip": 165 | use_tr = True 166 | 167 | # Set the eigenvalues for the matrix 168 | n = args.n 169 | print("n = ", n) 170 | 171 | np.random.seed(0) 172 | 173 | # Solve the problem 174 | x = solve_problem(n, filename="paropt.out", use_tr=use_tr) 175 | -------------------------------------------------------------------------------- /src/ParOptVec.cpp: -------------------------------------------------------------------------------- 1 | #include "ParOptVec.h" 2 | 3 | #include 4 | #include 5 | 6 | #include "ParOptBlasLapack.h" 7 | #include "ParOptComplexStep.h" 8 | 9 | /** 10 | Create a parallel vector for optimization 11 | 12 | @param comm the communicator for this vector 13 | @param n the number of vector components on this processor 14 | */ 15 | ParOptBasicVec::ParOptBasicVec(MPI_Comm _comm, int n) { 16 | comm = _comm; 17 | size = n; 18 | x = new ParOptScalar[size]; 19 | memset(x, 0, size * sizeof(ParOptScalar)); 20 | } 21 | 22 | /** 23 | Free the internally stored data 24 | */ 25 | ParOptBasicVec::~ParOptBasicVec() { delete[] x; } 26 | 27 | /** 28 | Set the vector value 29 | 30 | @param alpha the scalar value to set in all components 31 | */ 32 | void ParOptBasicVec::set(ParOptScalar alpha) { 33 | for (int i = 0; i < size; i++) { 34 | x[i] = alpha; 35 | } 36 | } 37 | 38 | /** 39 | Zero the entries of the vector 40 | */ 41 | void ParOptBasicVec::zeroEntries() { 42 | memset(x, 0, size * sizeof(ParOptScalar)); 43 | } 44 | 45 | /** 46 | Copy the values from the given vector 47 | 48 | @param pvec copy the values from pvec to this vector 49 | */ 50 | void ParOptBasicVec::copyValues(ParOptVec *pvec) { 51 | ParOptBasicVec *vec = dynamic_cast(pvec); 52 | 53 | if (vec) { 54 | memcpy(x, vec->x, size * sizeof(ParOptScalar)); 55 | } 56 | } 57 | 58 | /** 59 | Compute the l2 norm of the vector 60 | 61 | @return the l2 norm of the vector 62 | */ 63 | double ParOptBasicVec::norm() { 64 | double res = 0.0; 65 | #ifdef PAROPT_USE_COMPLEX 66 | for (int i = 0; i < size; i++) { 67 | res += (ParOptRealPart(x[i]) * ParOptRealPart(x[i]) + 68 | ParOptImagPart(x[i]) * ParOptImagPart(x[i])); 69 | } 70 | #else 71 | int one = 1; 72 | res = BLASdnrm2(&size, x, &one); 73 | res *= res; 74 | #endif 75 | 76 | double sum = 0.0; 77 | MPI_Allreduce(&res, &sum, 1, MPI_DOUBLE, MPI_SUM, comm); 78 | 79 | return sqrt(sum); 80 | } 81 | 82 | /** 83 | Compute the l-infinity norm of the vector 84 | 85 | @return the l-infinity norm of the vector 86 | */ 87 | double ParOptBasicVec::maxabs() { 88 | double res = 0.0; 89 | for (int i = 0; i < size; i++) { 90 | if (fabs(ParOptRealPart(x[i])) > res) { 91 | res = fabs(ParOptRealPart(x[i])); 92 | } 93 | } 94 | 95 | double infty_norm = 0.0; 96 | MPI_Allreduce(&res, &infty_norm, 1, MPI_DOUBLE, MPI_MAX, comm); 97 | 98 | return infty_norm; 99 | } 100 | 101 | /** 102 | Compute the l1 norm of the vector 103 | 104 | @return the l1 norm of the vector 105 | */ 106 | double ParOptBasicVec::l1norm() { 107 | double res = 0.0; 108 | for (int i = 0; i < size; i++) { 109 | res += fabs(ParOptRealPart(x[i])); 110 | } 111 | 112 | double l1_norm = 0.0; 113 | MPI_Allreduce(&res, &l1_norm, 1, MPI_DOUBLE, MPI_SUM, comm); 114 | 115 | return l1_norm; 116 | } 117 | 118 | /** 119 | Compute the dot-product of two vectors and return the result. 120 | 121 | @param pvec the other vector in the dot product 122 | @return the dot product of the two vectors 123 | */ 124 | ParOptScalar ParOptBasicVec::dot(ParOptVec *pvec) { 125 | ParOptBasicVec *vec = dynamic_cast(pvec); 126 | 127 | ParOptScalar sum = 0.0; 128 | if (vec) { 129 | ParOptScalar res = 0.0; 130 | #ifdef PAROPT_USE_COMPLEX 131 | for (int i = 0; i < size; i++) { 132 | res += x[i] * vec->x[i]; 133 | } 134 | #else 135 | int one = 1; 136 | res = BLASddot(&size, x, &one, vec->x, &one); 137 | #endif 138 | 139 | MPI_Allreduce(&res, &sum, 1, PAROPT_MPI_TYPE, MPI_SUM, comm); 140 | } 141 | 142 | return sum; 143 | } 144 | 145 | /** 146 | Compute multiple dot-products simultaneously. This reduces the 147 | parallel communication overhead. 148 | 149 | @param pvecs an array of vectors 150 | @param output an array of the dot product results 151 | */ 152 | void ParOptBasicVec::mdot(ParOptVec **pvecs, int nvecs, ParOptScalar *output) { 153 | for (int i = 0; i < nvecs; i++) { 154 | output[i] = 0.0; 155 | ParOptBasicVec *vec = dynamic_cast(pvecs[i]); 156 | 157 | if (vec) { 158 | #ifdef PAROPT_USE_COMPLEX 159 | for (int j = 0; j < size; j++) { 160 | output[i] += x[j] * vec->x[j]; 161 | } 162 | #else 163 | int one = 1; 164 | output[i] = BLASddot(&size, x, &one, vec->x, &one); 165 | #endif 166 | } 167 | } 168 | 169 | MPI_Allreduce(MPI_IN_PLACE, output, nvecs, PAROPT_MPI_TYPE, MPI_SUM, comm); 170 | } 171 | 172 | /** 173 | Scale the components of the vector 174 | 175 | @param alpha the scalar factor 176 | */ 177 | void ParOptBasicVec::scale(ParOptScalar alpha) { 178 | #ifdef PAROPT_USE_COMPLEX 179 | for (int i = 0; i < size; i++) { 180 | x[i] *= alpha; 181 | } 182 | #else 183 | int one = 1; 184 | BLASdscal(&size, &alpha, x, &one); 185 | #endif 186 | } 187 | 188 | /** 189 | Compute: self <- self + alpha*x 190 | */ 191 | void ParOptBasicVec::axpy(ParOptScalar alpha, ParOptVec *pvec) { 192 | ParOptBasicVec *vec = dynamic_cast(pvec); 193 | 194 | if (vec) { 195 | #ifdef PAROPT_USE_COMPLEX 196 | for (int i = 0; i < size; i++) { 197 | x[i] = x[i] + alpha * vec->x[i]; 198 | } 199 | #else 200 | int one = 1; 201 | BLASdaxpy(&size, &alpha, vec->x, &one, x, &one); 202 | #endif 203 | } 204 | } 205 | 206 | /** 207 | Retrieve the locally stored values from the array 208 | 209 | @param array pointer assigned to the memory location of the 210 | local vector components 211 | */ 212 | int ParOptBasicVec::getArray(ParOptScalar **array) { 213 | if (array) { 214 | *array = x; 215 | } 216 | return size; 217 | } 218 | -------------------------------------------------------------------------------- /examples/COPS/electron/electron.py: -------------------------------------------------------------------------------- 1 | """ 2 | ref: Benchmarking Optimization Software with COPS 3.0 3 | problem 2. Distribution of Electrons on a Sphere 4 | n: number of electrons 5 | dv: xi, yi, zi, i=0,1,...,n-1 6 | max sum of ((x_i - x_j)^2 + (y_i - y_j)^2 + (z_i - z_j)^2)^{-1/2} 7 | where i=0,1, ...,n-2; j=i+1,...,n-1 8 | s.t. c = x_i^2 + y_i^2 + z_i^2 - 1 = 0, i = 0,1,...,n-1 9 | """ 10 | 11 | from paropt import ParOpt 12 | import mpi4py.MPI as MPI 13 | import numpy as np 14 | import argparse 15 | 16 | 17 | class Electron(ParOpt.Problem): 18 | def __init__(self, n, epsilon): 19 | # Set the communicator pointer 20 | self.comm = MPI.COMM_WORLD 21 | self.n = n 22 | self.nvars = 3 * n 23 | self.num_sparse_constraints = n 24 | self.epsilon = epsilon 25 | 26 | rowp = [0] 27 | cols = [] 28 | for i in range(self.n): 29 | cols.extend([i, n + i, 2 * n + i]) 30 | rowp.append(len(cols)) 31 | 32 | # Initialize the base class 33 | super(Electron, self).__init__( 34 | self.comm, 35 | nvars=self.nvars, 36 | num_sparse_constraints=self.num_sparse_constraints, 37 | num_sparse_inequalities=0, 38 | rowp=rowp, 39 | cols=cols, 40 | ) 41 | 42 | return 43 | 44 | def getVarsAndBounds(self, x, lb, ub): 45 | """Set the values of the bounds""" 46 | n = self.n 47 | 48 | # x = [x_1, ..., x_n, y_1, ..., y_n, z_1, ..., z_n] 49 | np.random.seed(0) 50 | alpha = np.random.uniform(low=0.0, high=2 * np.pi, size=n) 51 | beta = np.random.uniform(low=-np.pi, high=np.pi, size=n) 52 | for i in range(n): 53 | x[i] = np.cos(beta[i]) * np.cos(alpha[i]) 54 | x[n + i] = np.cos(beta[i]) * np.sin(alpha[i]) 55 | x[2 * n + i] = np.sin(beta[i]) 56 | 57 | lb[:] = -10.0 58 | ub[:] = 10.0 59 | 60 | return 61 | 62 | def evalSparseObjCon(self, x, sparse_cons): 63 | """Evaluate the objective and constraint""" 64 | n = self.n 65 | epsilon = self.epsilon 66 | _x = x[:n] 67 | _y = x[n : 2 * n] 68 | _z = x[2 * n :] 69 | 70 | fobj = 0.0 71 | for i in range(n - 2): 72 | for j in range(i + 1, n - 1): 73 | dsq = (_x[i] - _x[j]) ** 2 + (_y[i] - _y[j]) ** 2 + (_z[i] - _z[j]) ** 2 74 | if dsq < epsilon: 75 | dsq = epsilon 76 | fobj += dsq ** (-1 / 2) 77 | 78 | for i in range(n): 79 | sparse_cons[i] = 1.0 - (_x[i] ** 2 + _y[i] ** 2 + _z[i] ** 2) 80 | 81 | con = [] 82 | fail = 0 83 | 84 | return fail, fobj, con 85 | 86 | def evalSparseObjConGradient(self, x, g, A, data): 87 | """Evaluate the objective and constraint gradient""" 88 | n = self.n 89 | epsilon = self.epsilon 90 | _x = x[:n] 91 | _y = x[n : 2 * n] 92 | _z = x[2 * n :] 93 | 94 | g[:] = 0.0 95 | for i in range(n - 2): 96 | for j in range(i + 1, n - 1): 97 | dsq = (_x[i] - _x[j]) ** 2 + (_y[i] - _y[j]) ** 2 + (_z[i] - _z[j]) ** 2 98 | if dsq < epsilon: 99 | dsq = epsilon 100 | else: 101 | fact = dsq ** (-3 / 2) 102 | g[i] += -(_x[i] - _x[j]) * fact 103 | g[j] += (_x[i] - _x[j]) * fact 104 | g[n + i] += -(_y[i] - _y[j]) * fact 105 | g[n + j] += (_y[i] - _y[j]) * fact 106 | g[2 * n + i] += -(_z[i] - _z[j]) * fact 107 | g[2 * n + j] += (_z[i] - _z[j]) * fact 108 | 109 | for i in range(n): 110 | data[3 * i] = -2.0 * _x[i] 111 | data[3 * i + 1] = -2.0 * _y[i] 112 | data[3 * i + 2] = -2.0 * _z[i] 113 | 114 | fail = 0 115 | 116 | return fail 117 | 118 | 119 | if __name__ == "__main__": 120 | # Parse the command line arguments 121 | parser = argparse.ArgumentParser() 122 | parser.add_argument("--algorithm", type=str, default="tr") 123 | parser.add_argument("--n", type=int, default=10, help="number of electron") 124 | args = parser.parse_args() 125 | 126 | use_tr = False 127 | if args.algorithm != "ip": 128 | use_tr = True 129 | 130 | # use interior point algorithm 131 | options = { 132 | "algorithm": "ip", 133 | "norm_type": "infinity", 134 | "qn_type": "bfgs", 135 | "qn_subspace_size": 10, 136 | "starting_point_strategy": "least_squares_multipliers", 137 | "qn_update_type": "damped_update", 138 | "abs_res_tol": 1e-6, 139 | "barrier_strategy": "monotone", 140 | "armijo_constant": 1e-5, 141 | "penalty_gamma": 100.0, 142 | "max_major_iters": 500, 143 | } 144 | 145 | # use trust region algorithm 146 | if use_tr: 147 | options = { 148 | "algorithm": "tr", 149 | "output_level": 0, 150 | "tr_l1_tol": 1e-30, 151 | "tr_linfty_tol": 1e-30, 152 | "tr_init_size": 0.05, 153 | "tr_min_size": 1e-6, 154 | "tr_max_size": 1e3, 155 | "tr_eta": 0.25, 156 | "tr_adaptive_gamma_update": False, 157 | "tr_accept_step_strategy": "penalty_method", 158 | "tr_use_soc": False, 159 | "tr_max_iterations": 200, 160 | "max_major_iters": 100, 161 | } 162 | 163 | problem = Electron(args.n, 1e-15) 164 | problem.checkGradients() 165 | opt = ParOpt.Optimizer(problem, options) 166 | opt.optimize() 167 | -------------------------------------------------------------------------------- /examples/rosenbrock/sparse_rosenbrock.cpp: -------------------------------------------------------------------------------- 1 | #include "ParOptOptimizer.h" 2 | 3 | /* 4 | The following is a simple implementation of a scalable Rosenbrock 5 | function with constraints that can be used to test the parallel 6 | optimizer. 7 | */ 8 | 9 | class SparseRosenbrock : public ParOptSparseProblem { 10 | public: 11 | SparseRosenbrock(MPI_Comm comm, int _nvars) : ParOptSparseProblem(comm) { 12 | // Set the base class problem sizes 13 | setProblemSizes(_nvars, 2, _nvars - 1); 14 | 15 | setNumInequalities(2, _nvars - 1); 16 | 17 | // Set the non-zero pattern for the inequality constraints 18 | int *rowp = new int[nwcon + 1]; 19 | int *cols = new int[2 * nwcon]; 20 | 21 | rowp[0] = 0; 22 | for (int i = 0; i < nwcon; i++) { 23 | rowp[i + 1] = 2 * (i + 1); 24 | cols[2 * i] = i; 25 | cols[2 * i + 1] = i + 1; 26 | } 27 | setSparseJacobianData(rowp, cols); 28 | delete[] rowp; 29 | delete[] cols; 30 | } 31 | 32 | int isSparseInequality() { return 1; } 33 | 34 | //! Get the variables/bounds 35 | void getVarsAndBounds(ParOptVec *xvec, ParOptVec *lbvec, ParOptVec *ubvec) { 36 | ParOptScalar *x, *lb, *ub; 37 | xvec->getArray(&x); 38 | lbvec->getArray(&lb); 39 | ubvec->getArray(&ub); 40 | 41 | // Set the design variable bounds 42 | for (int i = 0; i < nvars; i++) { 43 | x[i] = -1.0; 44 | lb[i] = -2.0; 45 | ub[i] = 2.0; 46 | } 47 | } 48 | 49 | //! Evaluate the objective and constraints 50 | int evalSparseObjCon(ParOptVec *xvec, ParOptScalar *fobj, ParOptScalar *cons, 51 | ParOptVec *sparse) { 52 | ParOptScalar obj = 0.0; 53 | ParOptScalar *x; 54 | xvec->getArray(&x); 55 | 56 | for (int i = 0; i < nvars - 1; i++) { 57 | obj += ((1.0 - x[i]) * (1.0 - x[i]) + 58 | 100.0 * (x[i + 1] - x[i] * x[i]) * (x[i + 1] - x[i] * x[i])); 59 | } 60 | 61 | ParOptScalar con[2]; 62 | con[0] = con[1] = 0.0; 63 | for (int i = 0; i < nvars; i++) { 64 | con[0] -= x[i] * x[i]; 65 | } 66 | 67 | for (int i = 0; i < nvars; i += 2) { 68 | con[1] += x[i]; 69 | } 70 | 71 | MPI_Allreduce(&obj, fobj, 1, PAROPT_MPI_TYPE, MPI_SUM, comm); 72 | MPI_Allreduce(con, cons, 2, PAROPT_MPI_TYPE, MPI_SUM, comm); 73 | 74 | cons[0] += 0.25; 75 | cons[1] += 10.0; 76 | 77 | // Evaluate the sparse constraints 78 | ParOptScalar *c; 79 | sparse->getArray(&c); 80 | for (int i = 0; i < nwcon; i++) { 81 | c[i] = 1.0 - x[i] * x[i] - x[i + 1] * x[i + 1]; 82 | } 83 | 84 | return 0; 85 | } 86 | 87 | //! Evaluate the objective and constraint gradients 88 | int evalSparseObjConGradient(ParOptVec *xvec, ParOptVec *gvec, ParOptVec **Ac, 89 | ParOptScalar *data) { 90 | ParOptScalar *x, *g, *c; 91 | xvec->getArray(&x); 92 | gvec->getArray(&g); 93 | gvec->zeroEntries(); 94 | 95 | for (int i = 0; i < nvars - 1; i++) { 96 | g[i] += (-2.0 * (1.0 - x[i]) + 97 | 200.0 * (x[i + 1] - x[i] * x[i]) * (-2.0 * x[i])); 98 | g[i + 1] += 200.0 * (x[i + 1] - x[i] * x[i]); 99 | } 100 | 101 | Ac[0]->getArray(&c); 102 | for (int i = 0; i < nvars; i++) { 103 | c[i] = -2.0 * x[i]; 104 | } 105 | 106 | Ac[1]->getArray(&c); 107 | for (int i = 0; i < nvars; i += 2) { 108 | c[i] = 1.0; 109 | } 110 | 111 | // Compute the sparse constraint Jacobian 112 | for (int i = 0; i < nwcon; i++) { 113 | data[2 * i] = -2.0 * x[i]; 114 | data[2 * i + 1] = -2.0 * x[i + 1]; 115 | } 116 | 117 | return 0; 118 | } 119 | }; 120 | 121 | int main(int argc, char *argv[]) { 122 | MPI_Init(&argc, &argv); 123 | 124 | // Set the MPI communicator 125 | MPI_Comm comm = MPI_COMM_WORLD; 126 | 127 | // Get the rank 128 | int mpi_rank = 0; 129 | MPI_Comm_rank(comm, &mpi_rank); 130 | 131 | // Get the prefix from the input arguments 132 | int nvars = 100; 133 | const char *prefix = NULL; 134 | char buff[512]; 135 | for (int k = 0; k < argc; k++) { 136 | if (sscanf(argv[k], "prefix=%s", buff) == 1) { 137 | prefix = buff; 138 | } 139 | if (sscanf(argv[k], "nvars=%d", &nvars) == 1) { 140 | if (nvars < 100) { 141 | nvars = 100; 142 | } 143 | } 144 | } 145 | 146 | if (mpi_rank == 0) { 147 | printf("prefix = %s\n", prefix); 148 | fflush(stdout); 149 | } 150 | 151 | // Allocate the Rosenbrock function 152 | SparseRosenbrock *rosen = new SparseRosenbrock(comm, nvars); 153 | rosen->incref(); 154 | 155 | // Create the options class, and create default values 156 | ParOptOptions *options = new ParOptOptions(); 157 | ParOptOptimizer::addDefaultOptions(options); 158 | 159 | options->setOption("algorithm", "tr"); 160 | options->setOption("barrier_strategy", "mehrotra"); 161 | options->setOption("output_level", 0); 162 | options->setOption("qn_type", "bfgs"); 163 | options->setOption("qn_subspace_size", 10); 164 | options->setOption("abs_res_tol", 1e-7); 165 | options->setOption("output_file", "paropt.out"); 166 | options->setOption("tr_output_file", "paropt.tr"); 167 | options->setOption("mma_output_file", "paropt.mma"); 168 | options->setOption("use_line_search", 0); 169 | 170 | ParOptOptimizer *opt = new ParOptOptimizer(rosen, options); 171 | opt->incref(); 172 | 173 | // Set the checkpoint file 174 | double start = MPI_Wtime(); 175 | if (prefix) { 176 | char output[512]; 177 | snprintf(output, sizeof(output), "%s/rosenbrock_output.bin", prefix); 178 | options->setOption("ip_checkpoint_file", output); 179 | } 180 | opt->optimize(); 181 | double diff = MPI_Wtime() - start; 182 | 183 | if (mpi_rank == 0) { 184 | printf("ParOpt time: %f seconds \n", diff); 185 | } 186 | 187 | opt->decref(); 188 | rosen->decref(); 189 | 190 | MPI_Finalize(); 191 | return (0); 192 | } 193 | -------------------------------------------------------------------------------- /src/ParOptSparseMat.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_SPARSE_MAT_H 2 | #define PAR_OPT_SPARSE_MAT_H 3 | 4 | /* 5 | Forward declare the matrices 6 | */ 7 | class ParOptQuasiDefMat; 8 | class ParOptQuasiDefSparseMat; 9 | 10 | #include "ParOptProblem.h" 11 | #include "ParOptSparseCholesky.h" 12 | #include "ParOptSparseUtils.h" 13 | #include "ParOptVec.h" 14 | 15 | /* 16 | Abstract base class for the quasi-definite matrix 17 | */ 18 | class ParOptQuasiDefMat : public ParOptBase { 19 | public: 20 | virtual ~ParOptQuasiDefMat() {} 21 | 22 | /* 23 | Factor the matrix 24 | */ 25 | virtual int factor(ParOptVec *x, ParOptVec *Dinv, ParOptVec *Cdiag) = 0; 26 | 27 | /** 28 | Solve the quasi-definite system of equations 29 | 30 | [ D Aw^{T} ][ yx ] = [ bx ] 31 | [ Aw - C ][ -yw ] = [ 0 ] 32 | 33 | Here bx is unmodified. Note the negative sign on the yw variables. 34 | 35 | @param bx the design variable right-hand-side 36 | @param yx the design variable solution 37 | @param yw the sparse multiplier solution 38 | */ 39 | virtual void apply(ParOptVec *bx, ParOptVec *yx, ParOptVec *yw) = 0; 40 | 41 | /** 42 | Solve the quasi-definite system of equations 43 | 44 | [ D Aw^{T} ][ yx ] = [ bx ] 45 | [ Aw - C ][ -yw ] = [ bw ] 46 | 47 | In the call bx and bw must remain unmodified. Note the negative sign on the 48 | yw variables. 49 | 50 | @param bx the design variable right-hand-side 51 | @param bx the sparse multiplier right-hand-side 52 | @param yx the design variable solution 53 | @param yw the sparse multiplier solution 54 | */ 55 | virtual void apply(ParOptVec *bx, ParOptVec *bw, ParOptVec *yx, 56 | ParOptVec *yw) = 0; 57 | 58 | /* 59 | Get a description of the factorization for the print file 60 | */ 61 | virtual const char *getFactorInfo() { return NULL; } 62 | }; 63 | 64 | /* 65 | Interface for the quasi-definite matrix 66 | 67 | [ D Aw^{T} ] 68 | [ Aw -C ] 69 | 70 | The goal of this interface is to provide access to the factorization of the 71 | matrix without explicitly dictating how the constraints are stored. 72 | */ 73 | class ParOptQuasiDefBlockMat : public ParOptQuasiDefMat { 74 | public: 75 | ParOptQuasiDefBlockMat(ParOptProblem *prob0, int _nwblock); 76 | ~ParOptQuasiDefBlockMat(); 77 | 78 | /** 79 | Factor the matrix 80 | 81 | @param x The design variables 82 | @param Dinv The diagonal inverse of the D matrix (size of dvs) 83 | @param C The diagonal for multipliers (size of sparse multipliers) 84 | */ 85 | int factor(ParOptVec *x, ParOptVec *Dinv, ParOptVec *C); 86 | 87 | /** 88 | Solve the quasi-definite system of equations 89 | 90 | [ D Aw^{T} ][ yx ] = [ bx ] 91 | [ Aw - C ][ -yw ] = [ 0 ] 92 | 93 | Here bx is unmodified. Note the negative sign on the yw variables. 94 | 95 | @param bx Design variable right-hand-side (not modified) 96 | @param yx Design variable solution output 97 | @param yw Multiplier variable solution output 98 | */ 99 | void apply(ParOptVec *bx, ParOptVec *yx, ParOptVec *yw); 100 | 101 | /** 102 | Solve the quasi-definite system of equations 103 | 104 | [ D Aw^{T} ][ yx ] = [ bx ] 105 | [ Aw - C ][ -yw ] = [ bw ] 106 | 107 | In the call bx and bw must remain unmodified. Note the negative sign on the 108 | yw variables. 109 | 110 | @param bx Design variable right-hand-side (not modified) 111 | @param bw Multiplier right-hand-side (not modified) 112 | @param yx Design variable solution output 113 | @param yw Multiplier variable solution output 114 | */ 115 | void apply(ParOptVec *bx, ParOptVec *bw, ParOptVec *yx, ParOptVec *yw); 116 | 117 | /* 118 | Get a description of the factorization for the print file 119 | */ 120 | const char *getFactorInfo(); 121 | 122 | private: 123 | /* 124 | Apply the factored Cw-matrix that is stored as a series of block-symmetric 125 | matrices. 126 | */ 127 | int applyFactor(ParOptVec *vec); 128 | 129 | // Problem data 130 | ParOptProblem *prob; 131 | 132 | // Vectors that point to the input data 133 | ParOptVec *x, *Dinv, *C; 134 | 135 | // The data for the block-diagonal matrix 136 | int nvars; // The number of variables 137 | int nwcon; // The number of sparse constraints 138 | int nwblock; // The nuber of constraints per block 139 | ParOptScalar *Cw; // Block diagonal matrix 140 | 141 | // Information about the factorization 142 | char info[128]; 143 | }; 144 | 145 | /* 146 | Interface for a generic sparse quasi-definite matrix 147 | */ 148 | class ParOptQuasiDefSparseMat : public ParOptQuasiDefMat { 149 | public: 150 | ParOptQuasiDefSparseMat(ParOptSparseProblem *problem); 151 | ~ParOptQuasiDefSparseMat(); 152 | 153 | int factor(ParOptVec *x, ParOptVec *Dinv, ParOptVec *C); 154 | void apply(ParOptVec *bx, ParOptVec *yx, ParOptVec *yw); 155 | void apply(ParOptVec *bx, ParOptVec *bw, ParOptVec *yx, ParOptVec *yw); 156 | const char *getFactorInfo(); 157 | 158 | private: 159 | // The sparse problem 160 | ParOptSparseProblem *prob; 161 | 162 | // Sparse Cholesky factorization 163 | ParOptSparseCholesky *chol; 164 | 165 | // Vectors that point to the input data 166 | ParOptVec *Dinv; 167 | 168 | // Number of variables 169 | int nvars, nwcon; 170 | 171 | // Number of dense or nearly dense columns in A with over 50 % fill in 172 | int ndense; 173 | 174 | // Non-zero pattern of the Jacobian matrix transpose 175 | int *colp, *rows; 176 | ParOptScalar *Atvals; 177 | 178 | // The values of the Schur complement C + A * D^{-1} * A^{T} 179 | int *Kcolp, *Krows; 180 | ParOptScalar *Kvals; 181 | 182 | // Right-hand-side/solution data 183 | ParOptScalar *rhs; 184 | 185 | // Information about the factorization 186 | char info[128]; 187 | }; 188 | 189 | #endif // PAR_OPT_SPARSE_MAT_H -------------------------------------------------------------------------------- /examples/random_quadratic/random_quadratic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import mpi4py.MPI as MPI 3 | from paropt import ParOpt 4 | import argparse 5 | import os 6 | import matplotlib.pylab as plt 7 | 8 | 9 | # Random quadratic problem class 10 | class Quadratic(ParOpt.Problem): 11 | def __init__(self, A, b, Acon, bcon): 12 | # Set the communicator pointer 13 | self.comm = MPI.COMM_WORLD 14 | self.nvars = len(b) 15 | self.ncon = 1 16 | 17 | # Record the quadratic terms 18 | self.A = A 19 | self.b = b 20 | self.Acon = Acon 21 | self.bcon = bcon 22 | 23 | # Initialize the base class 24 | super(Quadratic, self).__init__(self.comm, nvars=self.nvars, ncon=self.ncon) 25 | 26 | return 27 | 28 | def getVarsAndBounds(self, x, lb, ub): 29 | """Set the values of the bounds""" 30 | x[:] = -2.0 + np.random.uniform(size=len(x)) 31 | lb[:] = -5.0 32 | ub[:] = 5.0 33 | return 34 | 35 | def evalObjCon(self, x): 36 | """Evaluate the objective and constraint""" 37 | # Append the point to the solution history 38 | 39 | # Evaluate the objective and constraints 40 | fail = 0 41 | con = np.zeros(1, dtype=ParOpt.dtype) 42 | 43 | fobj = 0.5 * np.dot(x, np.dot(self.A, x)) + np.dot(self.b, x) 44 | con[0] = np.dot(x, self.Acon) + self.bcon 45 | return fail, fobj, con 46 | 47 | def evalObjConGradient(self, x, g, A): 48 | """Evaluate the objective and constraint gradient""" 49 | fail = 0 50 | 51 | # The objective gradient 52 | g[:] = np.dot(self.A, x) + self.b 53 | 54 | # The constraint gradient 55 | A[0][:] = self.Acon[:] 56 | 57 | return fail 58 | 59 | 60 | def create_random_problem(eigs): 61 | """ 62 | Create a random positive definite matrix with the given 63 | eigenvalues 64 | """ 65 | 66 | # The dimension of the matrix 67 | n = len(eigs) 68 | 69 | # Create a random square (n x n) matrix 70 | B = np.random.uniform(size=(n, n)) 71 | 72 | # Orthogonalize the columns of B so that Q = range(B) = R^{n} 73 | Q, s, v = np.linalg.svd(B) 74 | 75 | # Compute A = Q*diag(eigs)*Q^{T} 76 | A = np.dot(Q, np.dot(np.diag(eigs), Q.T)) 77 | 78 | return A 79 | 80 | 81 | def solve_problem(eigs, filename=None, use_stdout=False, use_tr=False): 82 | # Get the A matrix 83 | A = create_random_problem(eigs) 84 | 85 | # Create the other problem data 86 | b = np.random.uniform(size=len(eigs)) 87 | Acon = np.random.uniform(size=len(eigs)) 88 | bcon = np.random.uniform() 89 | 90 | problem = Quadratic(A, b, Acon, bcon) 91 | 92 | options = { 93 | "algorithm": "ip", 94 | "abs_res_tol": 1e-8, 95 | "starting_point_strategy": "affine_step", 96 | "barrier_strategy": "monotone", 97 | "start_affine_multiplier_min": 0.01, 98 | "penalty_gamma": 1000.0, 99 | "qn_subspace_size": 10, 100 | "qn_type": "bfgs", 101 | "output_file": filename, 102 | } 103 | 104 | if use_tr: 105 | options = { 106 | "algorithm": "tr", 107 | "tr_init_size": 0.05, 108 | "tr_min_size": 1e-6, 109 | "tr_max_size": 10.0, 110 | "tr_eta": 0.25, 111 | "tr_adaptive_gamma_update": True, 112 | "tr_max_iterations": 1000, 113 | "penalty_gamma": 10.0, 114 | "qn_subspace_size": 10, 115 | "qn_type": "bfgs", 116 | "abs_res_tol": 1e-8, 117 | "output_file": filename, 118 | "tr_output_file": os.path.splitext(filename)[0] + ".tr", 119 | "starting_point_strategy": "affine_step", 120 | "barrier_strategy": "monotone", 121 | "use_line_search": False, 122 | } 123 | 124 | opt = ParOpt.Optimizer(problem, options) 125 | 126 | # Set a new starting point 127 | opt.optimize() 128 | x, z, zw, zl, zu = opt.getOptimizedPoint() 129 | 130 | return 131 | 132 | 133 | # Parse the arguments 134 | parser = argparse.ArgumentParser() 135 | parser.add_argument("--algorithm", type=str, default="ip") 136 | parser.add_argument("--n", type=int, default=100, help="Dimension of the problem") 137 | parser.add_argument("--eig_min", type=float, default=1.0, help="Minimum eigenvalue") 138 | parser.add_argument("--eig_max", type=float, default=1e5, help="Minimum eigenvalue") 139 | parser.add_argument("--use_stdout", dest="use_stdout", action="store_true") 140 | parser.set_defaults(use_stdout=False) 141 | args = parser.parse_args() 142 | 143 | use_tr = False 144 | if args.algorithm != "ip": 145 | use_tr = True 146 | 147 | # Set the eigenvalues for the matrix 148 | n = args.n 149 | eig_min = args.eig_min 150 | eig_max = args.eig_max 151 | use_stdout = args.use_stdout 152 | 153 | print("n = ", n) 154 | print("eig_min = %g" % (eig_min)) 155 | print("eig_max = %g" % (eig_max)) 156 | print("cond = %g" % (eig_max / eig_min)) 157 | 158 | # Solve the problem with linear spacing of eigenvalues 159 | eigs_linear = np.linspace(eig_min, eig_max, n) 160 | 161 | # Solve the problem with a clustered spacing of the eigenvalues 162 | eigs_clustered = np.zeros(n) 163 | for i in range(1, n + 1): 164 | u = (1.0 * n) / (n - 1) * (1.0 / (n + 1 - i) - 1.0 / n) 165 | eigs_clustered[i - 1] = eig_min + (eig_max - eig_min) * u**0.9 166 | 167 | # Solve the two problem types 168 | solve_problem( 169 | eigs_linear, filename="opt_linear_eigs.out", use_stdout=use_stdout, use_tr=use_tr 170 | ) 171 | solve_problem( 172 | eigs_clustered, 173 | filename="opt_cluster_eigs.out", 174 | use_stdout=use_stdout, 175 | use_tr=use_tr, 176 | ) 177 | 178 | plt.plot(range(1, n + 1), eigs_linear, "-o", linewidth=2, label="linear") 179 | plt.plot(range(1, n + 1), eigs_clustered, "-s", linewidth=2, label="clustered") 180 | plt.xlabel("Index", fontsize=17) 181 | plt.ylabel("Eigenvalue", fontsize=17) 182 | plt.legend() 183 | plt.show() 184 | -------------------------------------------------------------------------------- /examples/openmdao/simple.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script is used to debug PyOptDriver() 3 | """ 4 | 5 | import numpy as np 6 | import openmdao.api as om 7 | import dymos as dm 8 | import matplotlib 9 | import matplotlib.pyplot as plt 10 | import argparse 11 | from paropt.paropt_driver import ParOptDriver 12 | 13 | 14 | class SimpleODE(om.ExplicitComponent): 15 | def initialize(self): 16 | self.options.declare("num_nodes", types=int) 17 | 18 | def setup(self): 19 | nn = self.options["num_nodes"] 20 | 21 | # Inputs 22 | self.add_input("m", val=1.0, desc="mass", units="kg") 23 | self.add_input("v", val=np.zeros(nn), desc="velocity", units="m/s") 24 | self.add_input("u", val=np.zeros(nn), desc="control force", units="N") 25 | self.add_output( 26 | "xdot", val=np.zeros(nn), desc="horizontal velocity", units="m/s" 27 | ) 28 | self.add_output( 29 | "vdot", val=np.zeros(nn), desc="acceleration mag.", units="m/s**2" 30 | ) 31 | self.add_output( 32 | "Jdot", 33 | val=np.zeros(nn), 34 | desc="time derivative of total control", 35 | units="N**2", 36 | ) 37 | 38 | # Setup partials 39 | arange = np.arange(self.options["num_nodes"], dtype=int) 40 | self.declare_partials(of="xdot", wrt="v", rows=arange, cols=arange) 41 | self.declare_partials(of="vdot", wrt="u", rows=arange, cols=arange) 42 | self.declare_partials(of="Jdot", wrt="u", rows=arange, cols=arange) 43 | 44 | def compute(self, inputs, outputs): 45 | u = inputs["u"] 46 | v = inputs["v"] 47 | m = inputs["m"] 48 | 49 | outputs["xdot"] = v 50 | outputs["vdot"] = u / m 51 | outputs["Jdot"] = u**2 52 | 53 | def compute_partials(self, inputs, jacobian): 54 | u = inputs["u"] 55 | m = inputs["m"] 56 | 57 | jacobian["xdot", "v"] = 1.0 58 | jacobian["vdot", "u"] = 1.0 / m 59 | jacobian["Jdot", "u"] = 2 * u 60 | 61 | 62 | # Add options 63 | parser = argparse.ArgumentParser() 64 | parser.add_argument( 65 | "--driver", 66 | default="paropt", 67 | choices=["paropt", "scipy", "pyoptsparse"], 68 | help="driver", 69 | ) 70 | args = parser.parse_args() 71 | 72 | driver = args.driver 73 | 74 | # Define the OpenMDAO problem 75 | p = om.Problem(model=om.Group()) 76 | 77 | # Define a Trajectory object 78 | traj = dm.Trajectory() 79 | 80 | p.model.add_subsystem("traj", subsys=traj) 81 | 82 | # Define a Dymos Phase object with GaussLobatto Transcription 83 | phase = dm.Phase( 84 | ode_class=SimpleODE, transcription=dm.GaussLobatto(num_segments=10, order=3) 85 | ) 86 | 87 | traj.add_phase(name="phase0", phase=phase) 88 | 89 | # Set the time options 90 | phase.set_time_options(fix_initial=True, fix_duration=True, duration_val=5.0, units="s") 91 | 92 | # Define state variables 93 | phase.add_state("x", fix_initial=True, fix_final=True, units="m", rate_source="xdot") 94 | phase.add_state( 95 | "J", fix_initial=True, fix_final=False, units="N*N*s", rate_source="Jdot" 96 | ) 97 | phase.add_state( 98 | "v", 99 | fix_initial=True, 100 | fix_final=True, 101 | units="m/s", 102 | rate_source="vdot", 103 | targets=["v"], 104 | ) 105 | 106 | # Define control variable 107 | phase.add_control( 108 | name="u", 109 | units="N", 110 | lower=-10.0, 111 | upper=10.0, 112 | targets=["u"], 113 | fix_initial=False, 114 | fix_final=False, 115 | ) 116 | 117 | # Minimize final time. 118 | phase.add_objective("J", loc="final") 119 | 120 | # Set the driver. 121 | if driver == "pyoptsparse": 122 | p.driver = om.pyOptSparseDriver() 123 | p.driver.options["optimizer"] = "ParOpt" 124 | p.driver.opt_settings["algorithm"] = "tr" 125 | p.driver.opt_settings["tr_max_iterations"] = 100 126 | p.driver.opt_settings["tr_max_size"] = 100.0 127 | elif driver == "paropt": 128 | p.driver = ParOptDriver() 129 | p.driver.options["algorithm"] = "tr" 130 | p.driver.options["tr_max_iterations"] = 100 131 | p.driver.options["tr_max_size"] = 100.0 132 | elif driver == "scipy": 133 | p.driver = om.ScipyOptimizeDriver() 134 | 135 | # Setup the problem 136 | p.setup(check=True) 137 | 138 | # Now that the OpenMDAO problem is setup, we can set the values of the states. 139 | p.set_val( 140 | "traj.phase0.states:x", phase.interpolate(ys=[0, 5], nodes="state_input"), units="m" 141 | ) 142 | 143 | p.set_val( 144 | "traj.phase0.states:v", 145 | phase.interpolate(ys=[0, 0], nodes="state_input"), 146 | units="m/s", 147 | ) 148 | 149 | p.set_val( 150 | "traj.phase0.controls:u", 151 | phase.interpolate(ys=[0, 0], nodes="control_input"), 152 | units="N", 153 | ) 154 | 155 | # Run the driver to solve the problem 156 | p.run_driver() 157 | 158 | # Check the validity of our results by using scipy.integrate.solve_ivp to 159 | # integrate the solution. 160 | sim_out = traj.simulate() 161 | 162 | # Plot the results 163 | fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 4.5)) 164 | 165 | axes[0].plot( 166 | p.get_val("traj.phase0.timeseries.time"), 167 | p.get_val("traj.phase0.timeseries.states:x"), 168 | "ro", 169 | label="solution", 170 | ) 171 | 172 | axes[0].plot( 173 | sim_out.get_val("traj.phase0.timeseries.time"), 174 | sim_out.get_val("traj.phase0.timeseries.states:x"), 175 | "b-", 176 | label="simulation", 177 | ) 178 | 179 | axes[0].set_xlabel("time") 180 | axes[0].set_ylabel("x") 181 | axes[0].legend() 182 | axes[0].grid() 183 | 184 | axes[1].plot( 185 | p.get_val("traj.phase0.timeseries.time"), 186 | p.get_val("traj.phase0.timeseries.controls:u"), 187 | "ro", 188 | label="solution", 189 | ) 190 | 191 | axes[1].plot( 192 | sim_out.get_val("traj.phase0.timeseries.time"), 193 | sim_out.get_val("traj.phase0.timeseries.controls:u"), 194 | "b-", 195 | label="simulation", 196 | ) 197 | 198 | axes[1].set_xlabel("time") 199 | axes[1].set_ylabel("u") 200 | axes[1].legend() 201 | axes[1].grid() 202 | 203 | plt.show() 204 | -------------------------------------------------------------------------------- /src/ParOptSparseCholesky.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_SPARSE_CHOLESKY_H 2 | #define PAR_OPT_SPARSE_CHOLESKY_H 3 | 4 | #include "ParOptComplexStep.h" 5 | #include "ParOptVec.h" 6 | 7 | enum ParOptOrderingType { 8 | PAROPT_NATURAL_ORDER, 9 | PAROPT_AMD_ORDER, 10 | PAROPT_ND_ORDER, 11 | }; 12 | 13 | /* 14 | Class for the sparse Cholesky factorization. 15 | 16 | This class computes the Cholesky factorization of the matrix A such that 17 | 18 | L * L^{T} = P * A * P^{T} 19 | 20 | where P is the optional permutation matrix. 21 | 22 | This code uses a supernode/supervariable approach in which groups of columns 23 | with the same nonzero pattern are aggregated into a single block column. This 24 | enables the use of more level-3 BLAS. 25 | 26 | This is used as one method to solve the sparse systems that arise in the 27 | interior point method. 28 | */ 29 | class ParOptSparseCholesky { 30 | public: 31 | ParOptSparseCholesky(int _size, const int *Acolp, const int *Arows, 32 | ParOptOrderingType order = PAROPT_ND_ORDER, 33 | const int *_perm = NULL); 34 | ~ParOptSparseCholesky(); 35 | 36 | // Set values into the Cholesky matrix 37 | void setValues(int n, const int Acolp[], const int Arows[], 38 | const ParOptScalar Avals[]); 39 | 40 | // Factor the matrix 41 | int factor(); 42 | 43 | // Solve the factored system with the specified right-hand-side 44 | void solve(ParOptScalar *x); 45 | 46 | // Get information about the factorization 47 | void getInfo(int *_size, int *_num_snodes, int *_nnzL); 48 | 49 | private: 50 | // Build the elimination tree/forest 51 | void buildForest(const int Acolp[], const int Arows[], int parent[], 52 | int Lnz[]); 53 | 54 | // Initialize the supernodes/supervariables by detecting identical column 55 | // non-zero patterns 56 | int initSupernodes(const int parent[], const int Lnz[], int vtn[]); 57 | 58 | // Build the non-zero pattern for the Cholesky factorization 59 | void buildNonzeroPattern(const int Acolp[], const int Arows[], 60 | const int parent[], int Lnz[]); 61 | 62 | // Perform the update to the diagonal matrix 63 | void updateDiag(const int lsize, const int nlrows, const int lfirst_var, 64 | const int *lrows, ParOptScalar *L, const int diag_size, 65 | ParOptScalar *diag, ParOptScalar *work); 66 | 67 | // Apply the update to the work column - uses BLAS level 3 68 | void updateWorkColumn(int lsize, int nl1rows, ParOptScalar *L1, int nl2rows, 69 | ParOptScalar *L2, ParOptScalar *T); 70 | 71 | // Apply the sparse column update 72 | void updateColumn(const int lwidth, const int nlcols, const int lfirst_var, 73 | const int *lrows, int nrows, const int *arows, 74 | const ParOptScalar *A, const int *brows, ParOptScalar *B); 75 | 76 | // Perform Cholesky factorization on the diagonal 77 | int factorDiag(const int diag_size, ParOptScalar *D); 78 | 79 | // Solve L * y = x and output x = y 80 | void solveDiag(int diag_size, ParOptScalar *L, int nhrs, ParOptScalar *x); 81 | 82 | // Solve L^{T} * y = x and output x = y 83 | void solveDiagTranspose(int diag_size, ParOptScalar *L, int nhrs, 84 | ParOptScalar *x); 85 | 86 | // The following are short cut inline functions. 87 | // Get the diagonal block index 88 | inline int get_diag_index(const int i, const int j) { 89 | if (i >= j) { 90 | return j + i * (i + 1) / 2; 91 | } else { 92 | return i + j * (j + 1) / 2; 93 | } 94 | } 95 | 96 | // Given the supernode index, return the pointer to the diagonal matrix 97 | inline ParOptScalar *get_diag_pointer(const int i) { 98 | return &data[data_ptr[i]]; 99 | } 100 | 101 | // Given the supernode index, the supernode size and the index into the rows 102 | // data, return the pointer to the lower factor 103 | inline ParOptScalar *get_factor_pointer(const int i, const int size, 104 | const int index) { 105 | const int dsize = size * (size + 1) / 2; 106 | const int offset = index - colp[i]; 107 | return &data[data_ptr[i] + dsize + size * offset]; 108 | } 109 | 110 | // Given the supernode index, the supernode size and the index into the rows 111 | // data, return the pointer to the lower factor 112 | inline ParOptScalar *get_factor_pointer(const int i, const int size) { 113 | const int dsize = size * (size + 1) / 2; 114 | return &data[data_ptr[i] + dsize]; 115 | } 116 | 117 | // The dimension of the square matrix 118 | int size; 119 | 120 | // Permutation and inverese permultation for the matrix. Both of these may be 121 | // NULL. 122 | int *perm, *iperm; 123 | 124 | // Temporary vector for solving with the permutation arrays - only allocated 125 | // if perm and iperm are allocated, otherwise it is NULL 126 | ParOptScalar *temp; 127 | 128 | // The row indices for the strict lower-diagonal entries of each super node. 129 | // This does not contain the row indices for the supernode itself. Only 130 | // entries below the supernode. 131 | int *rows; 132 | 133 | // Pointer into the row indices for the strict lower block of the 134 | // matrix. This does not include the row indices for the supernode. 135 | int *colp; 136 | 137 | // Number of supernodes 138 | int num_snodes; 139 | 140 | // Supernode sizes - How many consecutive variables belong to this 141 | // supernode? sum_{i=1}^{num_snodes} snode_size = size 142 | int *snode_size; 143 | 144 | // Given the variable index, what is the corresponding supernode? 145 | int *var_to_snode; 146 | 147 | // Given the supernode, what is the first variable in the node? 148 | int *snode_to_first_var; 149 | 150 | // Given the supernode index, a pointer into the supernode data 151 | // This is computed as the following for k = 0 ... num_snodes 152 | // data_ptr[k] = sum_{i = 0}^{k} snode_size[i] * ((snode_size[i] + 1)/2 + 153 | // colp[i + 1] - colp[i]) 154 | int *data_ptr; 155 | 156 | // Work_size = max(max_{i} (snode_size[i] * (colp[i+1] - colp[i])) 157 | // max_{i} snode_size[i]**2) 158 | int work_size; 159 | 160 | // The numerical data for all entries size = data_ptr[num_snodes] 161 | ParOptScalar *data; 162 | }; 163 | 164 | #endif // PAR_OPT_SPARSE_CHOLESKY_H -------------------------------------------------------------------------------- /examples/dymos/simple/simple.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import openmdao.api as om 3 | import dymos as dm 4 | import matplotlib.pyplot as plt 5 | import argparse 6 | from paropt.paropt_sparse_driver import ParOptSparseDriver 7 | 8 | 9 | class SimpleODE(om.ExplicitComponent): 10 | def initialize(self): 11 | self.options.declare("num_nodes", types=int) 12 | 13 | def setup(self): 14 | nn = self.options["num_nodes"] 15 | 16 | # Inputs 17 | self.add_input("m", val=1.0, desc="mass", units="kg") 18 | self.add_input("v", val=np.zeros(nn), desc="velocity", units="m/s") 19 | self.add_input("u", val=np.zeros(nn), desc="control force", units="N") 20 | self.add_output( 21 | "xdot", val=np.zeros(nn), desc="horizontal velocity", units="m/s" 22 | ) 23 | self.add_output( 24 | "vdot", val=np.zeros(nn), desc="acceleration mag.", units="m/s**2" 25 | ) 26 | self.add_output( 27 | "Jdot", 28 | val=np.zeros(nn), 29 | desc="time derivative of total control", 30 | units="N**2", 31 | ) 32 | 33 | # Setup partials 34 | arange = np.arange(self.options["num_nodes"], dtype=int) 35 | self.declare_partials(of="xdot", wrt="v", rows=arange, cols=arange) 36 | self.declare_partials(of="vdot", wrt="u", rows=arange, cols=arange) 37 | self.declare_partials(of="Jdot", wrt="u", rows=arange, cols=arange) 38 | 39 | def compute(self, inputs, outputs): 40 | u = inputs["u"] 41 | v = inputs["v"] 42 | m = inputs["m"] 43 | 44 | outputs["xdot"] = v 45 | outputs["vdot"] = u / m 46 | outputs["Jdot"] = u**2 47 | 48 | def compute_partials(self, inputs, jacobian): 49 | u = inputs["u"] 50 | m = inputs["m"] 51 | 52 | jacobian["xdot", "v"] = 1.0 53 | jacobian["vdot", "u"] = 1.0 / m 54 | jacobian["Jdot", "u"] = 2 * u 55 | 56 | 57 | # Add options 58 | parser = argparse.ArgumentParser() 59 | parser.add_argument( 60 | "--optimizer", default="ParOpt", help="Optimizer name from pyOptSparse" 61 | ) 62 | parser.add_argument("--algorithm", default="tr", help="algorithm used in ParOpt") 63 | parser.add_argument("--less_force", action="store_true", default=False) 64 | parser.add_argument("--show_sparsity", action="store_true", default=False) 65 | 66 | args = parser.parse_args() 67 | 68 | optimizer = args.optimizer 69 | algorithm = args.algorithm 70 | less_force = args.less_force 71 | show_sparsity = args.show_sparsity 72 | 73 | # Define the OpenMDAO problem 74 | p = om.Problem(model=om.Group()) 75 | 76 | # Define a Trajectory object 77 | traj = dm.Trajectory() 78 | 79 | p.model.add_subsystem("traj", subsys=traj) 80 | 81 | # Define a Dymos Phase object with GaussLobatto Transcription 82 | transcript = dm.GaussLobatto(num_segments=20, order=3) 83 | phase = dm.Phase(ode_class=SimpleODE, transcription=transcript) 84 | 85 | traj.add_phase(name="phase0", phase=phase) 86 | 87 | # Set the time options 88 | phase.set_time_options(fix_initial=True, fix_duration=True, duration_val=5.0, units="s") 89 | 90 | # Define state variables 91 | phase.add_state("x", units="m", fix_initial=True, fix_final=True, rate_source="xdot") 92 | phase.add_state( 93 | "v", 94 | units="m/s", 95 | fix_initial=True, 96 | fix_final=True, 97 | rate_source="vdot", 98 | targets=["v"], 99 | ) 100 | phase.add_state( 101 | "J", units="N*N*s", fix_initial=True, fix_final=False, rate_source="Jdot" 102 | ) 103 | 104 | # Define control variable 105 | if less_force: 106 | max_force = 0.5 107 | else: 108 | max_force = 10.0 109 | 110 | phase.add_control( 111 | name="u", 112 | units="N", 113 | lower=-max_force, 114 | upper=max_force, 115 | targets=["u"], 116 | fix_initial=False, 117 | fix_final=False, 118 | ) 119 | 120 | # Minimize final time. 121 | phase.add_objective("J", loc="final") 122 | 123 | # Setup the problem 124 | p.setup(check=True) 125 | 126 | # Now that the OpenMDAO problem is setup, we can set the values of the states. 127 | p.set_val( 128 | "traj.phase0.states:x", phase.interpolate(ys=[0, 5], nodes="state_input"), units="m" 129 | ) 130 | 131 | p.set_val( 132 | "traj.phase0.states:v", 133 | phase.interpolate(ys=[0, 0], nodes="state_input"), 134 | units="m/s", 135 | ) 136 | 137 | p.set_val( 138 | "traj.phase0.controls:u", 139 | phase.interpolate(ys=[0, 0], nodes="control_input"), 140 | units="N", 141 | ) 142 | 143 | # Create the driver 144 | p.driver = ParOptSparseDriver() 145 | 146 | options = { 147 | "algorithm": "ip", 148 | "norm_type": "infinity", 149 | "qn_type": "bfgs", 150 | "qn_subspace_size": 10, 151 | "starting_point_strategy": "least_squares_multipliers", 152 | "qn_update_type": "damped_update", 153 | "abs_res_tol": 1e-6, 154 | "barrier_strategy": "monotone", 155 | "armijo_constant": 1e-5, 156 | "penalty_gamma": 100.0, 157 | "max_major_iters": 500, 158 | } 159 | 160 | for key in options: 161 | p.driver.options[key] = options[key] 162 | 163 | # Allow OpenMDAO to automatically determine our sparsity pattern. 164 | # Doing so can significantly speed up the execution of Dymos. 165 | p.driver.declare_coloring(show_summary=True, show_sparsity=show_sparsity) 166 | 167 | # Run the driver to solve the problem 168 | p.run_driver() 169 | 170 | # Check the validity of our results by using scipy.integrate.solve_ivp to 171 | # integrate the solution. 172 | sim_out = traj.simulate() 173 | 174 | # Plot the results 175 | fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 4.5)) 176 | 177 | axes[0].plot( 178 | p.get_val("traj.phase0.timeseries.time"), 179 | p.get_val("traj.phase0.timeseries.states:x"), 180 | "ro", 181 | label="solution", 182 | ) 183 | 184 | axes[0].plot( 185 | sim_out.get_val("traj.phase0.timeseries.time"), 186 | sim_out.get_val("traj.phase0.timeseries.states:x"), 187 | "b-", 188 | label="simulation", 189 | ) 190 | 191 | axes[0].set_xlabel("time") 192 | axes[0].set_ylabel("x") 193 | axes[0].legend() 194 | axes[0].grid() 195 | 196 | axes[1].plot( 197 | p.get_val("traj.phase0.timeseries.time"), 198 | p.get_val("traj.phase0.timeseries.controls:u"), 199 | "ro", 200 | label="solution", 201 | ) 202 | 203 | axes[1].plot( 204 | sim_out.get_val("traj.phase0.timeseries.time"), 205 | sim_out.get_val("traj.phase0.timeseries.controls:u"), 206 | "b-", 207 | label="simulation", 208 | ) 209 | 210 | axes[1].set_xlabel("time") 211 | axes[1].set_ylabel("u") 212 | axes[1].legend() 213 | axes[1].grid() 214 | 215 | plt.show() 216 | -------------------------------------------------------------------------------- /src/ParOptMMA.h: -------------------------------------------------------------------------------- 1 | #ifndef PAR_OPT_QUASI_SEPARABLE_H 2 | #define PAR_OPT_QUASI_SEPARABLE_H 3 | 4 | #include 5 | 6 | #include "ParOptInteriorPoint.h" 7 | #include "ParOptOptions.h" 8 | #include "ParOptProblem.h" 9 | 10 | /* 11 | The following code is designed to be used to implement 12 | MMA-type methods that also include sparse constraints. 13 | There are two modes of operation: 14 | 15 | The first mode is to run the method of moving asymptotes (MMA), 16 | a sequential, separable convex approximation technique, 17 | developed by Svanberg, that is commonly used in topology 18 | optimization. This method cannot incorporate sparse constraints 19 | directly and so they are ignored. 20 | 21 | The second mode is can be used to set up and run a convex sub-problem 22 | where the objective is governed by the same approximation 23 | technique used in MMA, but the constraints and sparse constraints 24 | defined by the original problem class are linearized about 25 | the original point. 26 | */ 27 | 28 | class ParOptMMA : public ParOptProblem { 29 | public: 30 | ParOptMMA(ParOptProblem *_prob, ParOptOptions *_options); 31 | ~ParOptMMA(); 32 | 33 | // Get the default option values 34 | static void addDefaultOptions(ParOptOptions *options); 35 | ParOptOptions *getOptions(); 36 | 37 | // Optimize using MMA 38 | void optimize(ParOptInteriorPoint *optimizer); 39 | 40 | // Set the MMA iteration 41 | void setIteration(int _mma_iter); 42 | 43 | // Set the new values of the multipliers 44 | void setMultipliers(ParOptScalar *_z, ParOptVec *_zw = NULL, 45 | ParOptVec *_zlvec = NULL, ParOptVec *_zuvec = NULL); 46 | 47 | // Initialize data for the subproblem 48 | int initializeSubProblem(ParOptVec *x); 49 | 50 | // Compute the KKT error based on the current multiplier estimates 51 | void computeKKTError(double *l1, double *linfty, double *infeas); 52 | 53 | // Get the optimized point 54 | void getOptimizedPoint(ParOptVec **x); 55 | 56 | // Get the asymptotes 57 | void getAsymptotes(ParOptVec **_L, ParOptVec **_U); 58 | 59 | // Get the previous design iterations 60 | void getDesignHistory(ParOptVec **_x1, ParOptVec **_x2); 61 | 62 | // Set the print level 63 | void setPrintLevel(int _print_level); 64 | 65 | // Set parameters in the optimizer 66 | void setAsymptoteContract(double val); 67 | void setAsymptoteRelax(double val); 68 | void setInitAsymptoteOffset(double val); 69 | void setMinAsymptoteOffset(double val); 70 | void setMaxAsymptoteOffset(double val); 71 | void setBoundRelax(double val); 72 | void setRegularization(double eps, double delta); 73 | 74 | // Create the design vectors 75 | ParOptVec *createDesignVec(); 76 | ParOptVec *createConstraintVec(); 77 | ParOptQuasiDefMat *createQuasiDefMat(); 78 | 79 | // Get the communicator for the problem 80 | MPI_Comm getMPIComm(); 81 | 82 | // Function to indicate the type of sparse constraints 83 | int isSparseInequality(); 84 | int useLowerBounds(); 85 | int useUpperBounds(); 86 | 87 | // Get the variables and bounds from the problem 88 | void getVarsAndBounds(ParOptVec *x, ParOptVec *lb, ParOptVec *ub); 89 | 90 | // Evaluate the objective and constraints 91 | int evalObjCon(ParOptVec *x, ParOptScalar *fobj, ParOptScalar *cons); 92 | 93 | // Evaluate the objective and constraint gradients 94 | int evalObjConGradient(ParOptVec *x, ParOptVec *g, ParOptVec **Ac); 95 | 96 | // Evaluate the product of the Hessian with a given vector 97 | int evalHvecProduct(ParOptVec *x, ParOptScalar *z, ParOptVec *zw, 98 | ParOptVec *px, ParOptVec *hvec); 99 | 100 | // Evaluate the diagonal Hessian 101 | int evalHessianDiag(ParOptVec *x, ParOptScalar *z, ParOptVec *zw, 102 | ParOptVec *hdiag); 103 | 104 | // Evaluate the constraints 105 | void evalSparseCon(ParOptVec *x, ParOptVec *out); 106 | 107 | // Compute the Jacobian-vector product out = J(x)*px 108 | void addSparseJacobian(ParOptScalar alpha, ParOptVec *x, ParOptVec *px, 109 | ParOptVec *out); 110 | 111 | // Compute the transpose Jacobian-vector product out = J(x)^{T}*pzw 112 | // ----------------------------------------------------------------- 113 | void addSparseJacobianTranspose(ParOptScalar alpha, ParOptVec *x, 114 | ParOptVec *pzw, ParOptVec *out); 115 | 116 | // Add the inner product of the constraints to the matrix such 117 | // that A += J(x)*cvec*J(x)^{T} where cvec is a diagonal matrix 118 | void addSparseInnerProduct(ParOptScalar alpha, ParOptVec *x, ParOptVec *cvec, 119 | ParOptScalar *A); 120 | 121 | // Over-write this function if you'd like to print out 122 | // something with the same frequency as the output files 123 | // ----------------------------------------------------- 124 | void writeOutput(int iter, ParOptVec *x) {} 125 | 126 | private: 127 | // Initialize the data 128 | void initialize(); 129 | 130 | // Set the output file (only on the root proc) 131 | void setOutputFile(const char *filename); 132 | 133 | // Print the options summary 134 | void printOptionsSummary(FILE *fp); 135 | 136 | // File pointer for the summary file - depending on the settings 137 | FILE *fp; 138 | int first_print; 139 | 140 | // Pointer to the optimization problem 141 | ParOptProblem *prob; 142 | 143 | // Options 144 | ParOptOptions *options; 145 | 146 | // Flag which controls the constraint approximation 147 | int use_true_mma; 148 | 149 | // Communicator for this problem 150 | MPI_Comm comm; 151 | 152 | // Keep track of the number of iterations 153 | int mma_iter; 154 | int subproblem_iter; 155 | 156 | int m; // The number of constraints (global) 157 | int n; // The number of design variables (local) 158 | 159 | // The design variables, and the previous two vectors 160 | ParOptVec *xvec, *x1vec, *x2vec; 161 | 162 | // The values of the multipliers 163 | ParOptVec *lbvec, *ubvec; 164 | 165 | // The objective, constraint and gradient information 166 | ParOptScalar fobj, *cons; 167 | ParOptVec *gvec, **Avecs; 168 | 169 | // The assymptotes 170 | ParOptVec *Lvec, *Uvec; 171 | 172 | // The move limits 173 | ParOptVec *alphavec, *betavec; 174 | 175 | // The coefficients for the approximation 176 | ParOptVec *p0vec, *q0vec; // The objective coefs 177 | ParOptVec **pivecs, **qivecs; // The constraint coefs 178 | 179 | // The right-hand side for the constraints in the subproblem 180 | ParOptScalar *b; 181 | 182 | // The sparse constraint vector 183 | ParOptVec *cwvec; 184 | 185 | // Additional data required for computing the KKT conditions 186 | ParOptVec *rvec; 187 | 188 | // The multipliers/constraints 189 | ParOptScalar *z; 190 | ParOptVec *zwvec; 191 | ParOptVec *zlvec, *zuvec; 192 | }; 193 | 194 | #endif // PAR_OPT_QUASI_SEPARABLE_H 195 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | import os 16 | import sys 17 | 18 | sys.path.insert(0, os.path.abspath("../paropt")) 19 | 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = "ParOpt" 24 | copyright = "2023, Graeme Kennedy" 25 | author = "Graeme Kennedy" 26 | 27 | # The short X.Y version 28 | version = "" 29 | # The full version, including alpha/beta/rc tags 30 | release = "" 31 | 32 | 33 | # -- General configuration --------------------------------------------------- 34 | 35 | # If your documentation needs a minimal Sphinx version, state it here. 36 | # 37 | # needs_sphinx = '1.0' 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | "breathe", 44 | "nbsphinx", 45 | "sphinx.ext.doctest", 46 | "sphinx.ext.intersphinx", 47 | "sphinx.ext.todo", 48 | "sphinx.ext.ifconfig", 49 | "sphinx.ext.autodoc", 50 | "sphinx.ext.napoleon", 51 | "sphinx.ext.viewcode", 52 | "sphinx.ext.githubpages", 53 | "sphinx.ext.mathjax", 54 | "sphinxcontrib.bibtex", 55 | "sphinxcontrib.programoutput", 56 | ] 57 | 58 | # Breathe configuration 59 | breathe_projects = {"paropt": "../xml"} 60 | breathe_default_project = "paropt" 61 | 62 | # Add any paths that contain templates here, relative to this directory. 63 | templates_path = ["_templates"] 64 | 65 | # The suffix(es) of source filenames. 66 | # You can specify multiple suffix as a list of string: 67 | # 68 | # source_suffix = ['.rst', '.md'] 69 | source_suffix = ".rst" 70 | 71 | # The master toctree document. 72 | master_doc = "index" 73 | 74 | # The language for content autogenerated by Sphinx. Refer to documentation 75 | # for a list of supported languages. 76 | # 77 | # This is also used if you do content translation via gettext catalogs. 78 | # Usually you set "language" from the command line for these cases. 79 | language = "python" 80 | 81 | # List of patterns, relative to source directory, that match files and 82 | # directories to ignore when looking for source files. 83 | # This pattern also affects html_static_path and html_extra_path. 84 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 85 | 86 | # The name of the Pygments (syntax highlighting) style to use. 87 | # pygments_style = "default" 88 | 89 | 90 | # -- Options for HTML output ------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. See the documentation for 93 | # a list of builtin themes. 94 | # 95 | 96 | # The theme to use for HTML and HTML Help pages. See the documentation for 97 | # a list of builtin themes. 98 | html_theme = "sphinx_rtd_theme" 99 | 100 | # Add any paths that contain custom themes here, relative to this directory. 101 | 102 | # Theme options are theme-specific and customize the look and feel of a theme 103 | # further. For a list of options available for each theme, see the 104 | # documentation. 105 | # 106 | # html_theme_options = {"rightsidebar": "true"} 107 | 108 | bibtex_bibfiles = ["../refs.bib"] 109 | 110 | # Add any paths that contain custom static files (such as style sheets) here, 111 | # relative to this directory. They are copied after the builtin static files, 112 | # so a file named "default.css" will overwrite the builtin "default.css". 113 | html_static_path = ["_static"] 114 | 115 | html_css_files = [ 116 | "custom.css", 117 | ] 118 | 119 | # Custom sidebar templates, must be a dictionary that maps document names 120 | # to template names. 121 | # 122 | # The default sidebars (for documents that don't match any pattern) are 123 | # defined by theme itself. Builtin themes are using these templates by 124 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 125 | # 'searchbox.html']``. 126 | # 127 | # html_sidebars = {} 128 | 129 | 130 | # -- Options for HTMLHelp output --------------------------------------------- 131 | 132 | # Output file base name for HTML help builder. 133 | htmlhelp_basename = "paroptdoc" 134 | 135 | html_show_sourcelink = False 136 | 137 | 138 | # -- Options for LaTeX output ------------------------------------------------ 139 | 140 | latex_elements = { 141 | # The paper size ('letterpaper' or 'a4paper'). 142 | # 143 | # 'papersize': 'letterpaper', 144 | # The font size ('10pt', '11pt' or '12pt'). 145 | # 146 | # 'pointsize': '10pt', 147 | # Additional stuff for the LaTeX preamble. 148 | # 149 | # 'preamble': '', 150 | # Latex figure (float) alignment 151 | # 152 | # 'figure_align': 'htbp', 153 | } 154 | 155 | # Grouping the document tree into LaTeX files. List of tuples 156 | # (source start file, target name, title, 157 | # author, documentclass [howto, manual, or own class]). 158 | latex_documents = [ 159 | (master_doc, "paropt.tex", "paropt Documentation", "Graeme Kennedy", "manual"), 160 | ] 161 | 162 | 163 | # -- Options for manual page output ------------------------------------------ 164 | 165 | # One entry per manual page. List of tuples 166 | # (source start file, name, description, authors, manual section). 167 | man_pages = [(master_doc, "paropt", "paropt Documentation", [author], 1)] 168 | 169 | 170 | # -- Options for Texinfo output ---------------------------------------------- 171 | 172 | # Grouping the document tree into Texinfo files. List of tuples 173 | # (source start file, target name, title, author, 174 | # dir menu entry, description, category) 175 | texinfo_documents = [ 176 | ( 177 | master_doc, 178 | "paropt", 179 | "paropt Documentation", 180 | author, 181 | "paropt", 182 | "A library for parallel optimization.", 183 | "Miscellaneous", 184 | ), 185 | ] 186 | 187 | 188 | # -- Options for Epub output ------------------------------------------------- 189 | 190 | # Bibliographic Dublin Core info. 191 | epub_title = project 192 | 193 | # The unique identifier of the text. This can be a ISBN number 194 | # or the project homepage. 195 | # 196 | # epub_identifier = '' 197 | 198 | # A unique identification for the text. 199 | # 200 | # epub_uid = '' 201 | 202 | # A list of files that should not be packed into the epub file. 203 | epub_exclude_files = ["search.html"] 204 | 205 | 206 | # -- Extension configuration ------------------------------------------------- 207 | -------------------------------------------------------------------------------- /src/ParOptOptimizer.cpp: -------------------------------------------------------------------------------- 1 | #include "ParOptOptimizer.h" 2 | 3 | #include 4 | 5 | ParOptOptimizer::ParOptOptimizer(ParOptProblem *_problem, 6 | ParOptOptions *_options) { 7 | problem = _problem; 8 | problem->incref(); 9 | 10 | options = _options; 11 | options->incref(); 12 | 13 | ip = NULL; 14 | tr = NULL; 15 | mma = NULL; 16 | subproblem = NULL; 17 | } 18 | 19 | ParOptOptimizer::~ParOptOptimizer() { 20 | problem->decref(); 21 | options->decref(); 22 | if (ip) { 23 | ip->decref(); 24 | } 25 | if (tr) { 26 | tr->decref(); 27 | } 28 | if (mma) { 29 | mma->decref(); 30 | } 31 | if (subproblem) { 32 | subproblem->decref(); 33 | } 34 | } 35 | 36 | /* 37 | Get default optimization options 38 | */ 39 | void ParOptOptimizer::addDefaultOptions(ParOptOptions *options) { 40 | const char *optimizers[3] = {"ip", "tr", "mma"}; 41 | options->addEnumOption("algorithm", "tr", 3, optimizers, 42 | "The type of optimization algorithm"); 43 | 44 | options->addStringOption("ip_checkpoint_file", NULL, 45 | "Checkpoint file for the interior point method"); 46 | 47 | ParOptInteriorPoint::addDefaultOptions(options); 48 | ParOptTrustRegion::addDefaultOptions(options); 49 | ParOptMMA::addDefaultOptions(options); 50 | } 51 | 52 | /* 53 | Get the options set into the optimizer 54 | */ 55 | ParOptOptions *ParOptOptimizer::getOptions() { return options; } 56 | 57 | /* 58 | Get the problem class 59 | */ 60 | ParOptProblem *ParOptOptimizer::getProblem() { return problem; } 61 | 62 | /* 63 | Perform the optimization 64 | */ 65 | void ParOptOptimizer::optimize() { 66 | int rank; 67 | MPI_Comm_rank(problem->getMPIComm(), &rank); 68 | 69 | // Check what type of optimization algorithm has been requirested 70 | int algo_type = 0; 71 | const char *algorithm = options->getEnumOption("algorithm"); 72 | 73 | if (strcmp(algorithm, "ip") == 0) { 74 | algo_type = 1; 75 | } else if (strcmp(algorithm, "tr") == 0) { 76 | algo_type = 2; 77 | } else if (strcmp(algorithm, "mma") == 0) { 78 | algo_type = 3; 79 | } else { 80 | if (rank == 0) { 81 | fprintf(stderr, 82 | "ParOptOptimizer Error: Unrecognized algorithm option %s\n", 83 | algorithm); 84 | } 85 | return; 86 | } 87 | 88 | if (algo_type == 1) { 89 | if (tr && ip) { 90 | tr->decref(); 91 | tr = NULL; 92 | ip->decref(); 93 | ip = NULL; 94 | } else if (mma && ip) { 95 | mma->decref(); 96 | mma = NULL; 97 | ip->decref(); 98 | ip = NULL; 99 | } 100 | 101 | if (!ip) { 102 | ip = new ParOptInteriorPoint(problem, options); 103 | ip->incref(); 104 | } 105 | 106 | const char *checkpoint = options->getStringOption("ip_checkpoint_file"); 107 | ip->optimize(checkpoint); 108 | } else if (algo_type == 2) { 109 | if (mma && ip) { 110 | mma->decref(); 111 | mma = NULL; 112 | ip->decref(); 113 | ip = NULL; 114 | } 115 | 116 | // Create the trust region subproblem 117 | const char *qn_type = options->getEnumOption("qn_type"); 118 | const int qn_subspace_size = options->getIntOption("qn_subspace_size"); 119 | 120 | if (!subproblem) { 121 | ParOptCompactQuasiNewton *qn = NULL; 122 | if (strcmp(qn_type, "bfgs") == 0 || strcmp(qn_type, "scaled_bfgs") == 0) { 123 | ParOptLBFGS *bfgs = new ParOptLBFGS(problem, qn_subspace_size); 124 | if (strcmp(qn_type, "scaled_bfgs") == 0) { 125 | // This very specific type of bfgs is only used when ncon = 1 126 | int nvars, ncon, nwcon; 127 | problem->getProblemSizes(&nvars, &ncon, &nwcon); 128 | if (ncon != 1) { 129 | if (rank == 0) { 130 | fprintf(stderr, 131 | "Can't use scaled_bfgs with more than one constraint!, " 132 | "ncon = %d\n", 133 | ncon); 134 | } 135 | } else { 136 | ParOptScaledQuasiNewton *scaled_bfgs = 137 | new ParOptScaledQuasiNewton(problem, bfgs); 138 | qn = scaled_bfgs; 139 | } 140 | } else { 141 | qn = bfgs; 142 | } 143 | qn->incref(); 144 | 145 | const char *update_type = options->getEnumOption("qn_update_type"); 146 | if (strcmp(update_type, "skip_negative_curvature") == 0) { 147 | bfgs->setBFGSUpdateType(PAROPT_SKIP_NEGATIVE_CURVATURE); 148 | } else if (strcmp(update_type, "damped_update") == 0) { 149 | bfgs->setBFGSUpdateType(PAROPT_DAMPED_UPDATE); 150 | } 151 | } else if (strcmp(qn_type, "sr1") == 0) { 152 | qn = new ParOptLSR1(problem, qn_subspace_size); 153 | qn->incref(); 154 | } 155 | 156 | if (qn) { 157 | const char *diag_type = options->getEnumOption("qn_diag_type"); 158 | if (strcmp(diag_type, "yty_over_yts") == 0) { 159 | qn->setInitDiagonalType(PAROPT_YTY_OVER_YTS); 160 | } else if (strcmp(diag_type, "yts_over_sts") == 0) { 161 | qn->setInitDiagonalType(PAROPT_YTS_OVER_STS); 162 | } else if (strcmp(diag_type, "inner_yty_over_yts") == 0) { 163 | qn->setInitDiagonalType(PAROPT_INNER_PRODUCT_YTY_OVER_YTS); 164 | } else { 165 | qn->setInitDiagonalType(PAROPT_INNER_PRODUCT_YTS_OVER_STS); 166 | } 167 | } 168 | 169 | subproblem = new ParOptQuadraticSubproblem(problem, qn); 170 | subproblem->incref(); 171 | } 172 | 173 | if (!ip) { 174 | ip = new ParOptInteriorPoint(subproblem, options); 175 | ip->incref(); 176 | } 177 | 178 | if (!tr) { 179 | tr = new ParOptTrustRegion(subproblem, options); 180 | tr->incref(); 181 | } 182 | 183 | tr->optimize(ip); 184 | } else { // algo_type == 3 185 | // Set up the MMA optimizer 186 | if (tr && ip) { 187 | tr->decref(); 188 | tr = NULL; 189 | ip->decref(); 190 | ip = NULL; 191 | } 192 | 193 | // Create the the mma object 194 | if (!mma) { 195 | mma = new ParOptMMA(problem, options); 196 | mma->incref(); 197 | } 198 | 199 | if (!ip) { 200 | ip = new ParOptInteriorPoint(mma, options); 201 | ip->incref(); 202 | } 203 | 204 | mma->optimize(ip); 205 | } 206 | } 207 | 208 | // Get the optimized point 209 | void ParOptOptimizer::getOptimizedPoint(ParOptVec **x, ParOptScalar **z, 210 | ParOptVec **zw, ParOptVec **zl, 211 | ParOptVec **zu) { 212 | if (tr && ip) { 213 | tr->getOptimizedPoint(x); 214 | ip->getOptimizedPoint(NULL, z, zw, zl, zu); 215 | } else if (mma && ip) { 216 | mma->getOptimizedPoint(x); 217 | ip->getOptimizedPoint(NULL, z, zw, zl, zu); 218 | } else if (ip) { 219 | ip->getOptimizedPoint(x, z, zw, zl, zu); 220 | } 221 | } 222 | 223 | /* 224 | Set the trust-region subproblem 225 | */ 226 | void ParOptOptimizer::setTrustRegionSubproblem( 227 | ParOptTrustRegionSubproblem *_subproblem) { 228 | // Should check here if the subproblem's problem 229 | // is our problem, for consistency?? 230 | if (_subproblem) { 231 | _subproblem->incref(); 232 | } 233 | if (subproblem) { 234 | subproblem->decref(); 235 | } 236 | subproblem = _subproblem; 237 | } 238 | -------------------------------------------------------------------------------- /src/ParOptQuasiNewton.h: -------------------------------------------------------------------------------- 1 | #ifndef PAROPT_QUASI_NEWTON_H 2 | #define PAROPT_QUASI_NEWTON_H 3 | 4 | #include "ParOptProblem.h" 5 | #include "ParOptVec.h" 6 | 7 | /* 8 | The type of BFGS update to use 9 | */ 10 | enum ParOptBFGSUpdateType { 11 | PAROPT_SKIP_NEGATIVE_CURVATURE, 12 | PAROPT_DAMPED_UPDATE 13 | }; 14 | 15 | /* 16 | The type of diagonal approximation to use in the BFGS update 17 | */ 18 | enum ParOptQuasiNewtonDiagonalType { 19 | PAROPT_YTY_OVER_YTS, 20 | PAROPT_YTS_OVER_STS, 21 | PAROPT_INNER_PRODUCT_YTY_OVER_YTS, 22 | PAROPT_INNER_PRODUCT_YTS_OVER_STS 23 | }; 24 | 25 | /** 26 | This is the abstract base class for compact limited-memory 27 | quasi-Newton update schemes. 28 | 29 | This class can be used to implement both limited-memory BFGS and SR1 30 | update schemes for quasi-Newton optimization methods. 31 | */ 32 | class ParOptCompactQuasiNewton : public ParOptBase { 33 | public: 34 | ParOptCompactQuasiNewton() {} 35 | virtual ~ParOptCompactQuasiNewton() {} 36 | 37 | // Set the type of diagonal to use 38 | virtual void setInitDiagonalType( 39 | ParOptQuasiNewtonDiagonalType _diagonal_type) {} 40 | 41 | // Reset the internal data 42 | virtual void reset() = 0; 43 | 44 | // Perform the quasi-Newton update with the specified multipliers 45 | virtual int update(ParOptVec *x, const ParOptScalar *z, ParOptVec *zw, 46 | ParOptVec *s, ParOptVec *y) = 0; 47 | 48 | // Update the approximation with only multiplier values - this is used 49 | // only for certain classes of compact Hessian approximations and does 50 | // not need to be implemented in general. 51 | virtual int update(ParOptVec *x, const ParOptScalar *z, ParOptVec *zw) { 52 | return 0; 53 | } 54 | 55 | // Perform a matrix-vector multiplication 56 | virtual void mult(ParOptVec *x, ParOptVec *y) = 0; 57 | 58 | // Perform a matrix-vector multiplication and add the result to y 59 | virtual void multAdd(ParOptScalar alpha, ParOptVec *x, ParOptVec *y) = 0; 60 | 61 | // Get the compact representation for the limited-memory quasi-Newton method 62 | virtual int getCompactMat(ParOptScalar *_b0, const ParOptScalar **_d, 63 | const ParOptScalar **_M, ParOptVec ***Z) = 0; 64 | 65 | // Get the maximum size of the compact representation 66 | virtual int getMaxLimitedMemorySize() = 0; 67 | }; 68 | 69 | /** 70 | This class implements a limited-memory BFGS updating scheme based on 71 | computed differences in the step and Lagrange graidents during a 72 | line search. 73 | 74 | This is based on the paper by Byrd, Nocedal and Schnabel, 75 | "Representations of quasi-Newton matrices and their use in 76 | limited-memory methods". 77 | 78 | The limited-memory BFGS formula takes the following form: 79 | 80 | b0*I - Z*diag{d)*M^{-1}*diag{d}*Z^{T} 81 | 82 | Here b0 is a scalar, d is a vector whose entries are either 1.0 or 83 | b0, M is a matrix and Z is a rectagular matrix stored as a series of 84 | vectors. 85 | 86 | Note that this class implements a damped update when the curvature 87 | condition is violated. 88 | */ 89 | class ParOptLBFGS : public ParOptCompactQuasiNewton { 90 | public: 91 | ParOptLBFGS(ParOptProblem *prob, int _subspace_size); 92 | ~ParOptLBFGS(); 93 | 94 | // Set the curvature update type 95 | void setBFGSUpdateType(ParOptBFGSUpdateType _hessian_update_type); 96 | void setInitDiagonalType(ParOptQuasiNewtonDiagonalType _diagonal_type); 97 | 98 | // Reset the internal data 99 | void reset(); 100 | 101 | // Perform the BFGS update 102 | int update(ParOptVec *x, const ParOptScalar *z, ParOptVec *zw, ParOptVec *s, 103 | ParOptVec *y); 104 | 105 | // Perform a matrix-vector multiplication 106 | void mult(ParOptVec *x, ParOptVec *y); 107 | void multAdd(ParOptScalar alpha, ParOptVec *x, ParOptVec *y); 108 | 109 | // Get the information for the limited-memory BFGS update 110 | int getCompactMat(ParOptScalar *_b0, const ParOptScalar **_d, 111 | const ParOptScalar **_M, ParOptVec ***Z); 112 | 113 | // Get the maximum size of the limited-memory BFGS 114 | int getMaxLimitedMemorySize(); 115 | 116 | protected: 117 | // Update the coefficients 118 | void computeMatUpdate(); 119 | 120 | // Store the type of curvature handling update 121 | ParOptBFGSUpdateType hessian_update_type; 122 | ParOptQuasiNewtonDiagonalType diagonal_type; 123 | 124 | // Set the finite-precision tolerance 125 | double epsilon_precision; 126 | 127 | // The size of the BFGS subspace 128 | int msub, msub_max; 129 | 130 | // The full list of vectors 131 | ParOptVec **Z; 132 | 133 | // Temporary data for internal usage 134 | ParOptVec *r; 135 | ParOptScalar *rz; // rz = Z^{T}*x 136 | 137 | // The update S/Y vectors 138 | ParOptVec **S, **Y; 139 | ParOptScalar b0; // The diagonal scalar 140 | 141 | // The M-matrix 142 | ParOptScalar *M, *M_factor; 143 | int *mfpiv; // The pivot array for the M-factorization 144 | 145 | // Data for the internal storage of M/M_factor 146 | ParOptScalar *B, *L, *D; 147 | ParOptScalar *d0; // The diagonal matrix 148 | }; 149 | 150 | /** 151 | This class implements a limited-memory SR1 updating scheme based on 152 | computed differences in the step and Lagrange graidents during a 153 | line search. 154 | 155 | This is based on the paper by Byrd, Nocedal and Schnabel, 156 | "Representations of quasi-Newton matrices and their use in 157 | limited-memory methods". 158 | 159 | The limited-memory SR1 formula takes the following form: 160 | 161 | b0*I - Z*M^{-1}*Z^{T} 162 | 163 | Here b0 is a scalar, M is a matrix and Z is a rectagular matrix 164 | stored as a series of vectors. 165 | */ 166 | class ParOptLSR1 : public ParOptCompactQuasiNewton { 167 | public: 168 | ParOptLSR1(ParOptProblem *prob, int _subspace_size); 169 | ~ParOptLSR1(); 170 | 171 | // Set the type of initial diagonal approximation to use 172 | void setInitDiagonalType(ParOptQuasiNewtonDiagonalType _diagonal_type); 173 | 174 | // Reset the internal data 175 | void reset(); 176 | 177 | // Perform the BFGS update 178 | int update(ParOptVec *x, const ParOptScalar *z, ParOptVec *zw, ParOptVec *s, 179 | ParOptVec *y); 180 | 181 | // Perform a matrix-vector multiplication 182 | void mult(ParOptVec *x, ParOptVec *y); 183 | void multAdd(ParOptScalar alpha, ParOptVec *x, ParOptVec *y); 184 | 185 | // Get the information for the limited-memory BFGS update 186 | int getCompactMat(ParOptScalar *_b0, const ParOptScalar **_d, 187 | const ParOptScalar **_M, ParOptVec ***Z); 188 | 189 | // Get the maximum size of the limited-memory BFGS 190 | int getMaxLimitedMemorySize(); 191 | 192 | protected: 193 | // The type of initial diagonal approximation to use 194 | ParOptQuasiNewtonDiagonalType diagonal_type; 195 | 196 | // The size of the BFGS subspace 197 | int msub, msub_max; 198 | 199 | // Set the finite-precision tolerance 200 | double epsilon_precision; 201 | 202 | // The full list of vectors 203 | ParOptVec **Z; 204 | 205 | // Temporary data for internal usage 206 | ParOptVec *r; 207 | ParOptScalar *rz; // rz = Z^{T}*x 208 | 209 | // The update S/Y vectors 210 | ParOptVec **S, **Y; 211 | ParOptScalar b0; // The diagonal scalar 212 | 213 | // The M-matrix 214 | ParOptScalar *M, *M_factor; 215 | int *mfpiv; // The pivot array for the M-factorization 216 | 217 | // Data for the internal storage of M/M_factor 218 | ParOptScalar *B, *L, *D; 219 | ParOptScalar *d0; // The diagonal matrix 220 | }; 221 | 222 | #endif // PAROPT_QUASI_NEWTON_H 223 | -------------------------------------------------------------------------------- /paropt/plot_history.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import matplotlib.pylab as plt 3 | import numpy as np 4 | import argparse 5 | import os 6 | 7 | 8 | # This is used for multiple y axis in same plot 9 | def make_patch_spines_invisible(ax): 10 | ax.set_frame_on(True) 11 | ax.patch.set_visible(False) 12 | for sp in ax.spines.values(): 13 | sp.set_visible(False) 14 | 15 | 16 | def plot_history(filename, savefig=False): 17 | # Import ParOpt so that we can read the ParOpt output file 18 | from paropt import ParOpt 19 | 20 | colors = [ 21 | "#1f77b4", 22 | "#ff7f0e", 23 | "#2ca02c", 24 | "#d62728", 25 | "#9467bd", 26 | "#8c564b", 27 | "#e377c2", 28 | "#7f7f7f", 29 | "#bcbd22", 30 | "#17becf", 31 | ] 32 | 33 | # Try to unpack values for the interior point code 34 | header, values = ParOpt.unpack_output(filename) 35 | 36 | if len(values[0]) > 0: 37 | # You can get more stuff out of this array 38 | iteration = np.linspace(1, len(values[0]), len(values[0])) 39 | objective = values[7] 40 | opt = values[8] 41 | infeas = values[9] 42 | barrier = values[11] 43 | 44 | # Just make the iteration linear 45 | iteration = np.linspace(1, len(iteration), len(iteration)) 46 | 47 | # Make the subplots 48 | fig, ax1 = plt.subplots() 49 | l1 = ax1.plot(iteration, objective, color=colors[0], label="objective") 50 | ax1.set_xlabel("Iteration") 51 | ax1.set_ylabel("Function value") 52 | 53 | ax2 = ax1.twinx() 54 | l2 = ax2.semilogy(iteration, opt, color=colors[1], label="opt") 55 | l3 = ax2.semilogy(iteration, infeas, color=colors[2], label="infeas") 56 | l4 = ax2.semilogy(iteration, barrier, color=colors[3], label="barrier") 57 | ax2.set_ylabel("Optimality and Feasibility") 58 | 59 | # Manually add all the lines to the legend 60 | lns = l1 + l2 + l3 + l4 61 | labs = [l.get_label() for l in lns] 62 | ax1.legend(lns, labs, loc=0) 63 | plt.title(filename) 64 | else: 65 | # Unpack the output file 66 | header, values = ParOpt.unpack_tr_output(filename) 67 | 68 | # Try to unpack and plot secondary tr outputs 69 | header2, values2 = ParOpt.unpack_tr_2nd_output(filename) 70 | have_2nd_tr_data = len(values2[0]) > 0 71 | 72 | if len(values[0]) > 0: 73 | # You can get more stuff out of this array 74 | iteration = np.linspace(1, len(values[0]), len(values[0])) 75 | objective = values[header.index("fobj")] 76 | opt_linfty = values[header.index("linfty")] 77 | infeas = values[header.index("infes")] 78 | tr = values[header.index("tr")] 79 | avg_gamma = values[header.index("avg pen.")] 80 | avg_z = values[header.index("avg z")] 81 | 82 | # Just make the iteration linear 83 | iteration = np.linspace(1, len(iteration), len(iteration)) 84 | 85 | # Make the subplots 86 | fig, ax1 = plt.subplots() 87 | 88 | # Change fig size if we need to plot the third axis 89 | if have_2nd_tr_data: 90 | fig.subplots_adjust(right=0.75) 91 | fig.set_size_inches(7.9, 4.8) 92 | 93 | l1 = ax1.plot(iteration, objective, color=colors[0], label="objective") 94 | ax1.set_xlabel("Iteration") 95 | ax1.set_ylabel("Function value") 96 | 97 | ax2 = ax1.twinx() 98 | l2 = ax2.semilogy( 99 | iteration, opt_linfty, color=colors[1], label="opt-linfty" 100 | ) 101 | l3 = ax2.semilogy(iteration, avg_gamma, color=colors[2], label="avg. pen.") 102 | l4 = ax2.semilogy(iteration, avg_z, color=colors[3], label="avg z") 103 | l5 = ax2.semilogy(iteration, infeas, color=colors[4], label="infeas") 104 | l6 = ax2.semilogy(iteration, tr, color=colors[5], label="tr") 105 | ax2.set_ylabel("Optimality and Feasibility") 106 | 107 | if have_2nd_tr_data: 108 | aredf = values2[header2.index("ared(f)")] 109 | predf = values2[header2.index("pred(f)")] 110 | aredc = values2[header2.index("ared(c)")] 111 | predc = values2[header2.index("pred(c)")] 112 | rho = values[header.index("rho")] 113 | 114 | # Compute rho for function and constraint 115 | rhof = aredf / predf 116 | rhoc = aredc / predc 117 | 118 | ax3 = ax1.twinx() 119 | ax3.spines["right"].set_position(("axes", 1.2)) 120 | make_patch_spines_invisible(ax3) 121 | ax3.spines["right"].set_visible(True) 122 | l7 = ax3.plot(iteration, rhof, ":", color=colors[0], label="rho(f)") 123 | l8 = ax3.plot(iteration, rhoc, ":", color=colors[4], label="rho(c)") 124 | l9 = ax3.plot(iteration, rho, ":", color=colors[-1], label="rho") 125 | lns2 = l7 + l8 + l9 126 | ax3.set_ylabel("Model prediction ratios") 127 | ax3.set_ylim([-2.0, 2.0]) 128 | 129 | # Manually add all the lines to the legend 130 | lns = l1 + l2 + l3 + l4 + l5 + l6 131 | if have_2nd_tr_data: 132 | lns += lns2 133 | labs = [l.get_label() for l in lns] 134 | ax2.legend(lns, labs, loc="upper right") 135 | plt.title(filename) 136 | 137 | else: 138 | # Unpack the output file 139 | header, values = ParOpt.unpack_mma_output(filename) 140 | 141 | # You can get more stuff out of this array 142 | iteration = np.linspace(1, len(values[0]), len(values[0])) 143 | objective = values[2] 144 | lone = values[3] 145 | linfty = values[4] 146 | lambd = values[5] 147 | 148 | # Just make the iteration linear 149 | iteration = np.linspace(1, len(iteration), len(iteration)) 150 | 151 | # Make the subplots 152 | fig, ax1 = plt.subplots() 153 | l1 = ax1.plot(iteration, objective, color=colors[0], label="objective") 154 | ax1.set_xlabel("Iteration") 155 | ax1.set_ylabel("Function value") 156 | 157 | ax2 = ax1.twinx() 158 | l2 = ax2.semilogy(iteration, lone, color=colors[1], label="l1-opt") 159 | l3 = ax2.semilogy(iteration, linfty, color=colors[3], label="linf-opt") 160 | l4 = ax2.semilogy(iteration, lambd, color=colors[2], label="l1-lambda") 161 | ax2.set_ylabel("Optimality error") 162 | 163 | # Manually add all the lines to the legend 164 | lns = l1 + l2 + l3 + l4 165 | labs = [l.get_label() for l in lns] 166 | ax1.legend(lns, labs, loc=0) 167 | plt.title(filename) 168 | 169 | if savefig: 170 | fname = os.path.splitext(filename)[0] # Delete suffix 171 | fname += "_history" 172 | plt.savefig(fname + ".pdf") 173 | plt.close() 174 | else: 175 | plt.show() 176 | 177 | 178 | if __name__ == "__main__": 179 | # Set up parser 180 | p = argparse.ArgumentParser("Plot values from a paropt output file") 181 | p.add_argument( 182 | "filename", metavar="paropt.out", type=str, help="ParOpt output file name" 183 | ) 184 | p.add_argument("--savefig", action="store_true") 185 | args = p.parse_args() 186 | 187 | # call plot_history 188 | plot_history(args.filename, args.savefig) 189 | -------------------------------------------------------------------------------- /src/ParOptCompactEigenvalueApprox.h: -------------------------------------------------------------------------------- 1 | #ifndef PAROPT_COMPACT_EIGENVALUE_APPROX_H 2 | #define PAROPT_COMPACT_EIGENVALUE_APPROX_H 3 | 4 | #include "ParOptQuasiNewton.h" 5 | #include "ParOptTrustRegion.h" 6 | 7 | class ParOptCompactEigenApprox : public ParOptBase { 8 | public: 9 | ParOptCompactEigenApprox(ParOptProblem *problem, int _N); 10 | ~ParOptCompactEigenApprox(); 11 | 12 | void multAdd(ParOptScalar alpha, ParOptVec *x, ParOptVec *y); 13 | void getApproximation(ParOptScalar **_c0, ParOptVec **_g0, int *_N, 14 | ParOptScalar **_M, ParOptScalar **_Minv, 15 | ParOptVec ***_hvecs); 16 | ParOptScalar evalApproximation(ParOptVec *s, ParOptVec *t); 17 | void evalApproximationGradient(ParOptVec *s, ParOptVec *grad); 18 | 19 | private: 20 | // The constraint value and gradient 21 | ParOptScalar c0; 22 | ParOptVec *g0; 23 | 24 | // The Hessian approximation of the constraint 25 | int N; 26 | ParOptScalar *M; 27 | ParOptScalar *Minv; 28 | ParOptVec **hvecs; 29 | 30 | // Temporary vector for matrix-vector products 31 | ParOptScalar *tmp; 32 | }; 33 | 34 | class ParOptEigenQuasiNewton : public ParOptCompactQuasiNewton { 35 | public: 36 | ParOptEigenQuasiNewton(ParOptCompactQuasiNewton *_qn, 37 | ParOptCompactEigenApprox *_eigh, int _index = 0); 38 | ~ParOptEigenQuasiNewton(); 39 | 40 | // Set whether or not to use the terms from the objective 41 | void setUseQuasiNewtonObjective(int truth); 42 | 43 | // Reset the internal data 44 | void reset(); 45 | 46 | // In this case, the quasi-Newton update is not performed here. 47 | // The quasi-Newton update will be performed directly on the 48 | // quasi-Newton object itself. 49 | int update(ParOptVec *x, const ParOptScalar *z, ParOptVec *zw, ParOptVec *s, 50 | ParOptVec *y); 51 | int update(ParOptVec *x, const ParOptScalar *z, ParOptVec *zw); 52 | 53 | // Perform a matrix-vector multiplication 54 | void mult(ParOptVec *x, ParOptVec *y); 55 | void multAdd(ParOptScalar alpha, ParOptVec *x, ParOptVec *y); 56 | 57 | // Get the compact representation of the quasi-Newton method 58 | int getCompactMat(ParOptScalar *_b0, const ParOptScalar **_d, 59 | const ParOptScalar **_M, ParOptVec ***Z); 60 | 61 | // Get the maximum size of the limited-memory BFGS 62 | int getMaxLimitedMemorySize(); 63 | 64 | // Get the compact eigenvalue approximation 65 | ParOptCompactQuasiNewton *getCompactQuasiNewton(); 66 | ParOptCompactEigenApprox *getCompactEigenApprox(); 67 | int getMultiplierIndex(); 68 | 69 | private: 70 | // Flag to indicate whether to include the quasi-Newton terms 71 | // or just the constraint approximation in the compact approx. 72 | int use_quasi_newton_objective; 73 | 74 | // The two contributions to the Hessian of the Lagrangian 75 | int index; 76 | ParOptScalar z0; 77 | ParOptCompactQuasiNewton *qn; 78 | ParOptCompactEigenApprox *eigh; 79 | 80 | // Objects to store the vectors 81 | int max_vecs; 82 | ParOptScalar *M, *d; 83 | ParOptVec **Z; 84 | }; 85 | 86 | class ParOptEigenSubproblem : public ParOptTrustRegionSubproblem { 87 | public: 88 | ParOptEigenSubproblem(ParOptProblem *_problem, ParOptEigenQuasiNewton *_qn); 89 | ~ParOptEigenSubproblem(); 90 | 91 | // Set the update function for the eigenvalue approximation 92 | void setEigenModelUpdate(void *data, 93 | void (*update)(void *, ParOptVec *, 94 | ParOptCompactEigenApprox *)); 95 | 96 | // Implementation for the trust-region specific functions 97 | ParOptCompactQuasiNewton *getQuasiNewton(); 98 | void initModelAndBounds(double tr_size); 99 | void setTrustRegionBounds(double tr_size); 100 | int evalTrialStepAndUpdate(int update_flag, ParOptVec *step, ParOptScalar *z, 101 | ParOptVec *zw, ParOptScalar *fobj, 102 | ParOptScalar *cons); 103 | int acceptTrialStep(ParOptVec *step, ParOptScalar *z, ParOptVec *zw); 104 | void rejectTrialStep(); 105 | int getQuasiNewtonUpdateType(); 106 | 107 | // Create the design vectors 108 | ParOptVec *createDesignVec(); 109 | ParOptVec *createConstraintVec(); 110 | ParOptQuasiDefMat *createQuasiDefMat(); 111 | 112 | // Get the communicator for the problem 113 | MPI_Comm getMPIComm(); 114 | 115 | // Function to indicate the type of sparse constraints 116 | int isDenseInequality(); 117 | int isSparseInequality(); 118 | int useLowerBounds(); 119 | int useUpperBounds(); 120 | 121 | // Get the variables and bounds from the problem 122 | void getVarsAndBounds(ParOptVec *x, ParOptVec *lb, ParOptVec *ub); 123 | 124 | // Evaluate the objective and constraints 125 | int evalObjCon(ParOptVec *x, ParOptScalar *fobj, ParOptScalar *cons); 126 | 127 | // Evaluate the objective and constraint gradients 128 | int evalObjConGradient(ParOptVec *x, ParOptVec *g, ParOptVec **Ac); 129 | 130 | // Evaluate the product of the Hessian with a given vector 131 | int evalHvecProduct(ParOptVec *x, ParOptScalar *z, ParOptVec *zw, 132 | ParOptVec *px, ParOptVec *hvec) { 133 | return 0; 134 | } 135 | 136 | // Evaluate the diagonal Hessian 137 | int evalHessianDiag(ParOptVec *x, ParOptScalar *z, ParOptVec *zw, 138 | ParOptVec *hdiag) { 139 | return 0; 140 | } 141 | 142 | // Evaluate the constraints 143 | void evalSparseCon(ParOptVec *x, ParOptVec *out); 144 | 145 | // Compute the Jacobian-vector product out = J(x)*px 146 | void addSparseJacobian(ParOptScalar alpha, ParOptVec *x, ParOptVec *px, 147 | ParOptVec *out); 148 | 149 | // Compute the transpose Jacobian-vector product out = J(x)^{T}*pzw 150 | void addSparseJacobianTranspose(ParOptScalar alpha, ParOptVec *x, 151 | ParOptVec *pzw, ParOptVec *out); 152 | 153 | // Add the inner product of the constraints to the matrix such 154 | // that A += J(x)*cvec*J(x)^{T} where cvec is a diagonal matrix 155 | void addSparseInnerProduct(ParOptScalar alpha, ParOptVec *x, ParOptVec *cvec, 156 | ParOptScalar *A); 157 | 158 | // Over-write this function if you'd like to print out 159 | // something with the same frequency as the output files 160 | void writeOutput(int iter, ParOptVec *x); 161 | 162 | int getLinearModel(ParOptVec **_xk = NULL, ParOptScalar *_fk = NULL, 163 | ParOptVec **_gk = NULL, const ParOptScalar **_ck = NULL, 164 | ParOptVec ***_Ak = NULL, ParOptVec **_lb = NULL, 165 | ParOptVec **_ub = NULL); 166 | 167 | private: 168 | void *data; 169 | void (*updateEigenModel)(void *, ParOptVec *, ParOptCompactEigenApprox *); 170 | 171 | // Pointer to the optimization problem 172 | ParOptProblem *prob; 173 | 174 | // Set the quadratic model parameters for this problem 175 | ParOptEigenQuasiNewton *approx; 176 | int qn_update_type; 177 | 178 | int n; // The number of design variables (local) 179 | int m; // The number of dense constraints (global) 180 | 181 | // Lower/upper bounds for the original problem 182 | ParOptVec *lb, *ub; 183 | 184 | // Lower/upper bounds for the trust region problem (these lie within 185 | // the problem lower/upper bounds) 186 | ParOptVec *lk, *uk; 187 | 188 | // Current design point 189 | ParOptVec *xk; 190 | 191 | // Current objective and constraint values and gradients 192 | ParOptScalar fk, *ck; 193 | ParOptVec *gk; 194 | ParOptVec **Ak; 195 | 196 | // Temporary objective/constraint values and their gradients 197 | ParOptScalar ft, *ct; 198 | ParOptVec *gt; 199 | ParOptVec **At; 200 | 201 | // Temporary vectors 202 | ParOptVec *t; 203 | ParOptVec *xtemp; 204 | }; 205 | 206 | #endif // PAROPT_COMPACT_EIGENVALUE_APPROX_H 207 | --------------------------------------------------------------------------------