├── .github └── workflows │ ├── execute_notebook.yaml │ ├── pypi_release.yaml │ └── tests.yaml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── PEPit ├── __init__.py ├── block_partition.py ├── constraint.py ├── examples │ ├── __init__.py │ ├── adaptive_methods │ │ ├── __init__.py │ │ ├── polyak_steps_in_distance_to_optimum.py │ │ └── polyak_steps_in_function_value.py │ ├── composite_convex_minimization │ │ ├── __init__.py │ │ ├── accelerated_douglas_rachford_splitting.py │ │ ├── accelerated_proximal_gradient.py │ │ ├── bregman_proximal_point.py │ │ ├── douglas_rachford_splitting.py │ │ ├── douglas_rachford_splitting_contraction.py │ │ ├── frank_wolfe.py │ │ ├── improved_interior_algorithm.py │ │ ├── no_lips_in_bregman_divergence.py │ │ ├── no_lips_in_function_value.py │ │ ├── proximal_gradient.py │ │ ├── proximal_gradient_quadratics.py │ │ └── three_operator_splitting.py │ ├── continuous_time_models │ │ ├── __init__.py │ │ ├── accelerated_gradient_flow_convex.py │ │ ├── accelerated_gradient_flow_strongly_convex.py │ │ ├── gradient_flow_convex.py │ │ └── gradient_flow_strongly_convex.py │ ├── example_template.py │ ├── fixed_point_problems │ │ ├── __init__.py │ │ ├── halpern_iteration.py │ │ ├── inconsistent_halpern_iteration.py │ │ ├── krasnoselskii_mann_constant_step_sizes.py │ │ ├── krasnoselskii_mann_increasing_step_sizes.py │ │ └── optimal_contractive_halpern_iteration.py │ ├── inexact_proximal_methods │ │ ├── __init__.py │ │ ├── accelerated_inexact_forward_backward.py │ │ ├── partially_inexact_douglas_rachford_splitting.py │ │ └── relatively_inexact_proximal_point_algorithm.py │ ├── low_dimensional_worst_cases_scenarios │ │ ├── __init__.py │ │ ├── alternate_projections.py │ │ ├── averaged_projections.py │ │ ├── dykstra.py │ │ ├── frank_wolfe.py │ │ ├── gradient_descent.py │ │ ├── halpern_iteration.py │ │ ├── inexact_gradient.py │ │ ├── optimized_gradient.py │ │ └── proximal_point.py │ ├── monotone_inclusions_variational_inequalities │ │ ├── __init__.py │ │ ├── accelerated_proximal_point.py │ │ ├── douglas_rachford_splitting.py │ │ ├── optimal_strongly_monotone_proximal_point.py │ │ ├── optimistic_gradient.py │ │ ├── past_extragradient.py │ │ ├── proximal_point.py │ │ └── three_operator_splitting.py │ ├── nonconvex_optimization │ │ ├── __init__.py │ │ ├── gradient_descent.py │ │ ├── no_lips_1.py │ │ └── no_lips_2.py │ ├── potential_functions │ │ ├── __init__.py │ │ ├── accelerated_gradient_method.py │ │ ├── gradient_descent_lyapunov_1.py │ │ └── gradient_descent_lyapunov_2.py │ ├── stochastic_and_randomized_convex_minimization │ │ ├── __init__.py │ │ ├── point_saga.py │ │ ├── randomized_coordinate_descent_smooth_convex.py │ │ ├── randomized_coordinate_descent_smooth_strongly_convex.py │ │ ├── saga.py │ │ ├── sgd.py │ │ └── sgd_overparametrized.py │ ├── tutorials │ │ ├── __init__.py │ │ └── gradient_descent_contraction.py │ └── unconstrained_convex_minimization │ │ ├── __init__.py │ │ ├── accelerated_gradient_convex.py │ │ ├── accelerated_gradient_strongly_convex.py │ │ ├── accelerated_proximal_point.py │ │ ├── conjugate_gradient.py │ │ ├── conjugate_gradient_qg_convex.py │ │ ├── cyclic_coordinate_descent.py │ │ ├── epsilon_subgradient_method.py │ │ ├── gradient_descent.py │ │ ├── gradient_descent_lc.py │ │ ├── gradient_descent_qg_convex.py │ │ ├── gradient_descent_qg_convex_decreasing.py │ │ ├── gradient_descent_quadratics.py │ │ ├── gradient_descent_silver_stepsize_convex.py │ │ ├── gradient_descent_silver_stepsize_strongly_convex.py │ │ ├── gradient_exact_line_search.py │ │ ├── heavy_ball_momentum.py │ │ ├── heavy_ball_momentum_qg_convex.py │ │ ├── inexact_accelerated_gradient.py │ │ ├── inexact_gradient_descent.py │ │ ├── inexact_gradient_exact_line_search.py │ │ ├── information_theoretic_exact_method.py │ │ ├── optimized_gradient.py │ │ ├── optimized_gradient_for_gradient.py │ │ ├── proximal_point.py │ │ ├── robust_momentum.py │ │ ├── subgradient_method.py │ │ ├── subgradient_method_rsi_eb.py │ │ └── triple_momentum.py ├── expression.py ├── function.py ├── functions │ ├── __init__.py │ ├── block_smooth_convex_function.py │ ├── convex_function.py │ ├── convex_indicator.py │ ├── convex_lipschitz_function.py │ ├── convex_qg_function.py │ ├── convex_support_function.py │ ├── rsi_eb_function.py │ ├── smooth_convex_function.py │ ├── smooth_convex_lipschitz_function.py │ ├── smooth_function.py │ ├── smooth_strongly_convex_function.py │ ├── smooth_strongly_convex_quadratic_function.py │ └── strongly_convex_function.py ├── operators │ ├── __init__.py │ ├── cocoercive.py │ ├── cocoercive_strongly_monotone.py │ ├── linear.py │ ├── lipschitz.py │ ├── lipschitz_strongly_monotone.py │ ├── monotone.py │ ├── negatively_comonotone.py │ ├── nonexpansive.py │ ├── skew_symmetric_linear.py │ ├── strongly_monotone.py │ └── symmetric_linear.py ├── pep.py ├── point.py ├── primitive_steps │ ├── __init__.py │ ├── bregman_gradient_step.py │ ├── bregman_proximal_step.py │ ├── epsilon_subgradient_step.py │ ├── exact_linesearch_step.py │ ├── inexact_gradient_step.py │ ├── inexact_proximal_step.py │ ├── linear_optimization_step.py │ └── proximal_step.py ├── psd_matrix.py ├── tools │ ├── __init__.py │ ├── dict_operations.py │ └── expressions_to_matrices.py ├── wrapper.py └── wrappers │ ├── __init__.py │ ├── cvxpy_wrapper.py │ └── mosek_wrapper.py ├── README.md ├── codecov.yaml ├── docs ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── api.rst │ ├── api │ ├── functions.rst │ ├── functions_and_operators.rst │ ├── main_modules.rst │ ├── operators.rst │ ├── steps.rst │ ├── tools.rst │ └── wrappers.rst │ ├── conf.py │ ├── contributing.rst │ ├── examples.rst │ ├── examples │ ├── a.rst │ ├── b.rst │ ├── c.rst │ ├── d.rst │ ├── e.rst │ ├── f.rst │ ├── g.rst │ ├── h.rst │ ├── i.rst │ ├── j.rst │ ├── k.rst │ └── l.rst │ ├── index.rst │ ├── quickstart.rst │ ├── whatsnew.rst │ └── whatsnew │ ├── 0.1.0.rst │ ├── 0.2.0.rst │ ├── 0.2.1.rst │ ├── 0.3.2.rst │ └── 0.3.3.rst ├── pyproject.toml ├── requirements.txt ├── ressources └── demo │ └── PEPit_demo.ipynb ├── setup.cfg ├── setup.py └── tests ├── __init__.py ├── additional_complexified_examples_tests ├── __init__.py ├── gradient_descent_blocks.py ├── gradient_descent_useless_blocks.py ├── gradient_exact_line_search.py ├── inexact_gradient_exact_line_search.py ├── inexact_gradient_exact_line_search2.py ├── inexact_gradient_exact_line_search3.py ├── proximal_gradient.py ├── proximal_gradient_useless_partition.py ├── proximal_point.py ├── proximal_point_LMI.py ├── proximal_point_useless_partition.py ├── randomized_coordinate_descent_smooth_convex.py └── randomized_coordinate_descent_smooth_strongly_convex.py ├── test_block_partition.py ├── test_block_smooth_convex_functions.py ├── test_constraints.py ├── test_dict_operations.py ├── test_examples.py ├── test_expression.py ├── test_expression_to_matrices.py ├── test_function.py ├── test_functions_and_operators.py ├── test_pep.py ├── test_point.py ├── test_psd_matrix.py ├── test_uselessly_complexified_examples.py └── test_wrappers.py /.github/workflows/execute_notebook.yaml: -------------------------------------------------------------------------------- 1 | name: Execute notebook 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: 7 | - 'master' 8 | - 'develop' 9 | 10 | push: 11 | branches: 12 | - 'master' 13 | - 'develop' 14 | create: 15 | branches: 16 | - 'master' 17 | tags: 18 | - '**' 19 | 20 | jobs: 21 | linux: 22 | 23 | runs-on: ubuntu-latest 24 | if: "!contains(github.event.head_commit.message, 'no ci')" 25 | strategy: 26 | max-parallel: 5 27 | matrix: 28 | os: [ubuntu-latest] 29 | python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] 30 | 31 | steps: 32 | - uses: actions/checkout@v1 33 | - name: Set up Python ${{ matrix.python-version }} 34 | uses: actions/setup-python@v1 35 | with: 36 | python-version: ${{ matrix.python-version }} 37 | - name: Install dependencies 38 | run: | 39 | python -m pip install --upgrade pip 40 | pip install lxml_html_clean 41 | pip install -r requirements.txt 42 | pip install mosek 43 | - name: Update version in setup.py 44 | run: >- 45 | sed -i "s/{{VERSION_PLACEHOLDER}}/0.0.0/g" setup.py 46 | - name: Install PEPit 47 | run: | 48 | pip install -e . 49 | - uses: yaananth/run-notebook@v2 50 | env: 51 | MOSEKLM_LICENSE_FILE: ${{ secrets.MSK_LICENSE }} 52 | RUNNER: ${{ toJson(runner) }} 53 | SECRETS: ${{ toJson(secrets) }} 54 | GITHUB: ${{ toJson(github) }} 55 | with: 56 | notebook: "ressources/demo/PEPit_demo.ipynb" 57 | isReport: False 58 | poll: True 59 | -------------------------------------------------------------------------------- /.github/workflows/pypi_release.yaml: -------------------------------------------------------------------------------- 1 | name: Publish Python 🐍 distributions 📦 to PyPI 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | build-n-publish: 10 | name: Build and publish Python 🐍 distributions 📦 to PyPI 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@master 14 | - name: Set up Python 3.10 15 | uses: actions/setup-python@v3 16 | with: 17 | python-version: '3.10' 18 | - name: Install pypa/setuptools 19 | run: >- 20 | python -m 21 | pip install wheel 22 | - name: Extract tag name 23 | id: tag 24 | run: echo ::set-output name=TAG_NAME::$(echo $GITHUB_REF | cut -d / -f 3) 25 | - name: Update version in setup.py 26 | run: >- 27 | sed -i "s/{{VERSION_PLACEHOLDER}}/${{ steps.tag.outputs.TAG_NAME }}/g" setup.py 28 | - name: Build a binary wheel 29 | run: >- 30 | python setup.py sdist bdist_wheel 31 | - name: Publish distribution 📦 to PyPI 32 | uses: pypa/gh-action-pypi-publish@master 33 | with: 34 | password: ${{ secrets.PYPI_API_TOKEN }} 35 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: 7 | - 'master' 8 | - 'develop' 9 | 10 | push: 11 | branches: 12 | - 'master' 13 | - 'develop' 14 | 15 | create: 16 | branches: 17 | - 'master' 18 | tags: 19 | - '**' 20 | 21 | 22 | jobs: 23 | linux: 24 | 25 | runs-on: ubuntu-latest 26 | if: "!contains(github.event.head_commit.message, 'no ci')" 27 | strategy: 28 | max-parallel: 5 29 | matrix: 30 | python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] 31 | 32 | steps: 33 | - uses: actions/checkout@v1 34 | - name: Set up Python ${{ matrix.python-version }} 35 | uses: actions/setup-python@v1 36 | with: 37 | python-version: ${{ matrix.python-version }} 38 | - name: Install dependencies 39 | run: | 40 | python -m pip install --upgrade pip 41 | pip install -r requirements.txt 42 | pip install coverage 43 | - name: Update version in setup.py 44 | run: >- 45 | sed -i "s/{{VERSION_PLACEHOLDER}}/0.0.0/g" setup.py 46 | - name: Install PEPit 47 | run: | 48 | pip install -e . 49 | - name: Install MOSEK 50 | run: | 51 | pip install mosek 52 | - name: Setup MOSEK license, run tests and generate report 53 | env: 54 | MOSEKLM_LICENSE_FILE: ${{ secrets.MSK_LICENSE }} 55 | run: | 56 | coverage run -m unittest tests/test_* 57 | - name: Upload Coverage to Codecov 58 | uses: codecov/codecov-action@v3 59 | 60 | 61 | linux_no_mosek: 62 | 63 | runs-on: ubuntu-latest 64 | if: "!contains(github.event.head_commit.message, 'no ci')" 65 | strategy: 66 | max-parallel: 5 67 | matrix: 68 | python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] 69 | 70 | steps: 71 | - uses: actions/checkout@v1 72 | - name: Set up Python ${{ matrix.python-version }} 73 | uses: actions/setup-python@v1 74 | with: 75 | python-version: ${{ matrix.python-version }} 76 | - name: Install dependencies 77 | run: | 78 | python -m pip install --upgrade pip 79 | pip install -r requirements.txt 80 | pip install coverage 81 | - name: Update version in setup.py 82 | run: >- 83 | sed -i "s/{{VERSION_PLACEHOLDER}}/0.0.0/g" setup.py 84 | - name: Install PEPit 85 | run: | 86 | pip install -e . 87 | - name: Run tests and generate report 88 | run: | 89 | coverage run -m unittest tests/test_* 90 | - name: Upload Coverage to Codecov 91 | uses: codecov/codecov-action@v3 92 | 93 | 94 | linux_no_mosek_license: 95 | 96 | runs-on: ubuntu-latest 97 | if: "!contains(github.event.head_commit.message, 'no ci')" 98 | strategy: 99 | max-parallel: 5 100 | matrix: 101 | python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] 102 | 103 | steps: 104 | - uses: actions/checkout@v1 105 | - name: Set up Python ${{ matrix.python-version }} 106 | uses: actions/setup-python@v1 107 | with: 108 | python-version: ${{ matrix.python-version }} 109 | - name: Install dependencies 110 | run: | 111 | python -m pip install --upgrade pip 112 | pip install -r requirements.txt 113 | pip install coverage 114 | - name: Update version in setup.py 115 | run: >- 116 | sed -i "s/{{VERSION_PLACEHOLDER}}/0.0.0/g" setup.py 117 | - name: Install PEPit 118 | run: | 119 | pip install -e . 120 | - name: Setup MOSEK 121 | run: | 122 | pip install mosek 123 | - name: Run tests and generate report 124 | run: | 125 | coverage run -m unittest tests/test_* 126 | - name: Upload Coverage to Codecov 127 | uses: codecov/codecov-action@v3 128 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # intellij configs 85 | .idea/ 86 | 87 | # MacOS files 88 | *.DS_Store 89 | 90 | # pyenv 91 | .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 101 | __pypackages__/ 102 | 103 | # Celery stuff 104 | celerybeat-schedule 105 | celerybeat.pid 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # Environments 111 | .env 112 | .venv 113 | env/ 114 | venv/ 115 | ENV/ 116 | env.bak/ 117 | venv.bak/ 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | # Documentation 138 | docs/autodocgen-output 139 | docs/build 140 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.11" 12 | # You can also specify other tool versions: 13 | # nodejs: "20" 14 | # rust: "1.70" 15 | # golang: "1.20" 16 | 17 | # Build documentation in the "docs/" directory with Sphinx 18 | sphinx: 19 | configuration: docs/source/conf.py 20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs 21 | # builder: "dirhtml" 22 | # Fail on all warnings to avoid broken references 23 | # fail_on_warning: true 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | #formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: docs/requirements.txt 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [2021] [Baptiste Goujaud, Celine Moucer, Julien Hendrickx, François Glineur, Adrien Taylor, Aymeric Dieuleveut] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PEPit/__init__.py: -------------------------------------------------------------------------------- 1 | from .block_partition import BlockPartition 2 | from .constraint import Constraint 3 | from .expression import Expression, null_expression 4 | from .function import Function 5 | from .psd_matrix import PSDMatrix 6 | from .wrapper import Wrapper 7 | from .pep import PEP 8 | from .point import Point, null_point 9 | 10 | __all__ = ['block_partition', 'BlockPartition', 11 | 'examples', 12 | 'functions', 13 | 'operators', 14 | 'primitive_steps', 15 | 'tools', 16 | 'wrappers', 17 | 'constraint', 'Constraint', 18 | 'expression', 'Expression', 'null_expression', 19 | 'function', 'Function', 20 | 'psd_matrix', 'PSDMatrix', 21 | 'pep', 'PEP', 22 | 'point', 'Point', 'null_point', 23 | 'wrapper', 'Wrapper', 24 | ] 25 | -------------------------------------------------------------------------------- /PEPit/examples/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['unconstrained_convex_minimization', 2 | 'composite_convex_minimization', 3 | 'nonconvex_optimization', 4 | 'stochastic_and_randomized_convex_minimization', 5 | 'monotone_inclusions_variational_inequalities', 6 | 'fixed_point_problems', 7 | 'potential_functions', 8 | 'inexact_proximal_methods', 9 | 'adaptive_methods', 10 | 'low_dimensional_worst_cases_scenarios', 11 | 'tutorials', 12 | 'continuous_time_models', 13 | ] 14 | -------------------------------------------------------------------------------- /PEPit/examples/adaptive_methods/__init__.py: -------------------------------------------------------------------------------- 1 | from .polyak_steps_in_distance_to_optimum import wc_polyak_steps_in_distance_to_optimum 2 | from .polyak_steps_in_function_value import wc_polyak_steps_in_function_value 3 | 4 | __all__ = ['polyak_steps_in_distance_to_optimum', 'wc_polyak_steps_in_distance_to_optimum', 5 | 'polyak_steps_in_function_value', 'wc_polyak_steps_in_function_value', 6 | ] 7 | -------------------------------------------------------------------------------- /PEPit/examples/composite_convex_minimization/__init__.py: -------------------------------------------------------------------------------- 1 | from .accelerated_douglas_rachford_splitting import wc_accelerated_douglas_rachford_splitting 2 | from .accelerated_proximal_gradient import wc_accelerated_proximal_gradient 3 | from .bregman_proximal_point import wc_bregman_proximal_point 4 | from .douglas_rachford_splitting import wc_douglas_rachford_splitting 5 | from .douglas_rachford_splitting_contraction import wc_douglas_rachford_splitting_contraction 6 | from .frank_wolfe import wc_frank_wolfe 7 | from .improved_interior_algorithm import wc_improved_interior_algorithm 8 | from .no_lips_in_function_value import wc_no_lips_in_function_value 9 | from .no_lips_in_bregman_divergence import wc_no_lips_in_bregman_divergence 10 | from .proximal_gradient import wc_proximal_gradient 11 | from .proximal_gradient_quadratics import wc_proximal_gradient_quadratics 12 | from .three_operator_splitting import wc_three_operator_splitting 13 | 14 | __all__ = ['accelerated_douglas_rachford_splitting', 'wc_accelerated_douglas_rachford_splitting', 15 | 'accelerated_proximal_gradient', 'wc_accelerated_proximal_gradient', 16 | 'bregman_proximal_point', 'wc_bregman_proximal_point', 17 | 'douglas_rachford_splitting', 'wc_douglas_rachford_splitting', 18 | 'douglas_rachford_splitting_contraction', 'wc_douglas_rachford_splitting_contraction', 19 | 'frank_wolfe', 'wc_frank_wolfe', 20 | 'improved_interior_algorithm', 'wc_improved_interior_algorithm', 21 | 'no_lips_in_function_value', 'wc_no_lips_in_function_value', 22 | 'no_lips_in_bregman_divergence', 'wc_no_lips_in_bregman_divergence', 23 | 'proximal_gradient', 'wc_proximal_gradient', 24 | 'proximal_gradient_quadratics', 'wc_proximal_gradient_quadratics', 25 | 'three_operator_splitting', 'wc_three_operator_splitting', 26 | ] 27 | -------------------------------------------------------------------------------- /PEPit/examples/continuous_time_models/__init__.py: -------------------------------------------------------------------------------- 1 | from .accelerated_gradient_flow_convex import wc_accelerated_gradient_flow_convex 2 | from .accelerated_gradient_flow_strongly_convex import wc_accelerated_gradient_flow_strongly_convex 3 | from .gradient_flow_convex import wc_gradient_flow_convex 4 | from .gradient_flow_strongly_convex import wc_gradient_flow_strongly_convex 5 | 6 | __all__ = ['accelerated_gradient_flow_convex', 'wc_accelerated_gradient_flow_convex', 7 | 'accelerated_gradient_flow_strongly_convex', 'wc_accelerated_gradient_flow_strongly_convex', 8 | 'gradient_flow_convex', 'wc_gradient_flow_convex', 9 | 'gradient_flow_strongly_convex', 'wc_gradient_flow_strongly_convex', 10 | ] 11 | -------------------------------------------------------------------------------- /PEPit/examples/fixed_point_problems/__init__.py: -------------------------------------------------------------------------------- 1 | from .halpern_iteration import wc_halpern_iteration 2 | from .krasnoselskii_mann_constant_step_sizes import wc_krasnoselskii_mann_constant_step_sizes 3 | from .krasnoselskii_mann_increasing_step_sizes import wc_krasnoselskii_mann_increasing_step_sizes 4 | from .inconsistent_halpern_iteration import wc_inconsistent_halpern_iteration 5 | from .optimal_contractive_halpern_iteration import wc_optimal_contractive_halpern_iteration 6 | 7 | __all__ = ['halpern_iteration', 'wc_halpern_iteration', 8 | 'krasnoselskii_mann_constant_step_sizes', 'wc_krasnoselskii_mann_constant_step_sizes', 9 | 'krasnoselskii_mann_increasing_step_sizes', 'wc_krasnoselskii_mann_increasing_step_sizes', 10 | 'inconsistent_halpern_iteration', 'wc_inconsistent_halpern_iteration', 11 | 'optimal_contractive_halpern_iteration', 'wc_optimal_contractive_halpern_iteration', 12 | ] 13 | -------------------------------------------------------------------------------- /PEPit/examples/inexact_proximal_methods/__init__.py: -------------------------------------------------------------------------------- 1 | from .accelerated_inexact_forward_backward import wc_accelerated_inexact_forward_backward 2 | from .partially_inexact_douglas_rachford_splitting import wc_partially_inexact_douglas_rachford_splitting 3 | from .relatively_inexact_proximal_point_algorithm import wc_relatively_inexact_proximal_point_algorithm 4 | 5 | __all__ = ['accelerated_inexact_forward_backward', 'wc_accelerated_inexact_forward_backward', 6 | 'partially_inexact_douglas_rachford_splitting', 'wc_partially_inexact_douglas_rachford_splitting', 7 | 'relatively_inexact_proximal_point_algorithm', 'wc_relatively_inexact_proximal_point_algorithm', 8 | ] 9 | -------------------------------------------------------------------------------- /PEPit/examples/low_dimensional_worst_cases_scenarios/__init__.py: -------------------------------------------------------------------------------- 1 | from .alternate_projections import wc_alternate_projections 2 | from .averaged_projections import wc_averaged_projections 3 | from .dykstra import wc_dykstra 4 | from .frank_wolfe import wc_frank_wolfe 5 | from .gradient_descent import wc_gradient_descent 6 | from .halpern_iteration import wc_halpern_iteration 7 | from .inexact_gradient import wc_inexact_gradient 8 | from .optimized_gradient import wc_optimized_gradient 9 | from .proximal_point import wc_proximal_point 10 | 11 | __all__ = ['alternate_projections', 'wc_alternate_projections', 12 | 'averaged_projections', 'wc_averaged_projections', 13 | 'dykstra', 'wc_dykstra', 14 | 'frank_wolfe', 'wc_frank_wolfe', 15 | 'gradient_descent', 'wc_gradient_descent', 16 | 'halpern_iteration', 'wc_halpern_iteration', 17 | 'inexact_gradient', 'wc_inexact_gradient', 18 | 'optimized_gradient', 'wc_optimized_gradient', 19 | 'proximal_point', 'wc_proximal_point', 20 | ] 21 | -------------------------------------------------------------------------------- /PEPit/examples/monotone_inclusions_variational_inequalities/__init__.py: -------------------------------------------------------------------------------- 1 | from .accelerated_proximal_point import wc_accelerated_proximal_point 2 | from .douglas_rachford_splitting import wc_douglas_rachford_splitting 3 | from .optimal_strongly_monotone_proximal_point import wc_optimal_strongly_monotone_proximal_point 4 | from .optimistic_gradient import wc_optimistic_gradient 5 | from .past_extragradient import wc_past_extragradient 6 | from .proximal_point import wc_proximal_point 7 | from .three_operator_splitting import wc_three_operator_splitting 8 | 9 | __all__ = ['accelerated_proximal_point', 'wc_accelerated_proximal_point', 10 | 'douglas_rachford_splitting', 'wc_douglas_rachford_splitting', 11 | 'optimal_strongly_monotone_proximal_point', 'wc_optimal_strongly_monotone_proximal_point', 12 | 'optimistic_gradient', 'wc_optimistic_gradient', 13 | 'past_extragradient', 'wc_past_extragradient', 14 | 'proximal_point', 'wc_proximal_point', 15 | 'three_operator_splitting', 'wc_three_operator_splitting', 16 | ] 17 | -------------------------------------------------------------------------------- /PEPit/examples/nonconvex_optimization/__init__.py: -------------------------------------------------------------------------------- 1 | from .gradient_descent import wc_gradient_descent 2 | from .no_lips_1 import wc_no_lips_1 3 | from .no_lips_2 import wc_no_lips_2 4 | 5 | __all__ = ['gradient_descent', 'wc_gradient_descent', 6 | 'no_lips_1', 'wc_no_lips_1', 7 | 'no_lips_2', 'wc_no_lips_2', 8 | ] 9 | -------------------------------------------------------------------------------- /PEPit/examples/potential_functions/__init__.py: -------------------------------------------------------------------------------- 1 | from .accelerated_gradient_method import wc_accelerated_gradient_method 2 | from .gradient_descent_lyapunov_1 import wc_gradient_descent_lyapunov_1 3 | from .gradient_descent_lyapunov_2 import wc_gradient_descent_lyapunov_2 4 | 5 | __all__ = ['accelerated_gradient_method', 'wc_accelerated_gradient_method', 6 | 'gradient_descent_lyapunov_1', 'wc_gradient_descent_lyapunov_1', 7 | 'gradient_descent_lyapunov_2', 'wc_gradient_descent_lyapunov_2', 8 | ] 9 | -------------------------------------------------------------------------------- /PEPit/examples/stochastic_and_randomized_convex_minimization/__init__.py: -------------------------------------------------------------------------------- 1 | from .point_saga import wc_point_saga 2 | from .randomized_coordinate_descent_smooth_convex import wc_randomized_coordinate_descent_smooth_convex 3 | from .randomized_coordinate_descent_smooth_strongly_convex import wc_randomized_coordinate_descent_smooth_strongly_convex 4 | from .saga import wc_saga 5 | from .sgd import wc_sgd 6 | from .sgd_overparametrized import wc_sgd_overparametrized 7 | 8 | __all__ = ['point_saga', 'wc_point_saga', 9 | 'randomized_coordinate_descent_smooth_convex', 'wc_randomized_coordinate_descent_smooth_convex', 10 | 'randomized_coordinate_descent_smooth_strongly_convex', 'wc_randomized_coordinate_descent_smooth_strongly_convex', 11 | 'saga', 'wc_saga', 12 | 'sgd', 'wc_sgd', 13 | 'sgd_overparametrized', 'wc_sgd_overparametrized', 14 | ] 15 | -------------------------------------------------------------------------------- /PEPit/examples/tutorials/__init__.py: -------------------------------------------------------------------------------- 1 | from .gradient_descent_contraction import wc_gradient_descent_contraction 2 | 3 | __all__ = ['gradient_descent_contraction', 'wc_gradient_descent_contraction', 4 | ] 5 | -------------------------------------------------------------------------------- /PEPit/examples/unconstrained_convex_minimization/__init__.py: -------------------------------------------------------------------------------- 1 | from .accelerated_gradient_convex import wc_accelerated_gradient_convex 2 | from .accelerated_gradient_strongly_convex import wc_accelerated_gradient_strongly_convex 3 | from .accelerated_proximal_point import wc_accelerated_proximal_point 4 | from .conjugate_gradient import wc_conjugate_gradient 5 | from .conjugate_gradient_qg_convex import wc_conjugate_gradient_qg_convex 6 | from .cyclic_coordinate_descent import wc_cyclic_coordinate_descent 7 | from .epsilon_subgradient_method import wc_epsilon_subgradient_method 8 | from .gradient_descent import wc_gradient_descent 9 | from .gradient_descent_lc import wc_gradient_descent_lc 10 | from .gradient_descent_qg_convex import wc_gradient_descent_qg_convex 11 | from .gradient_descent_qg_convex_decreasing import wc_gradient_descent_qg_convex_decreasing 12 | from .gradient_descent_quadratics import wc_gradient_descent_quadratics 13 | from .gradient_exact_line_search import wc_gradient_exact_line_search 14 | from .gradient_descent_silver_stepsize_convex import wc_gradient_descent_silver_stepsize_convex 15 | from .gradient_descent_silver_stepsize_strongly_convex import wc_gradient_descent_silver_stepsize_strongly_convex 16 | from .heavy_ball_momentum import wc_heavy_ball_momentum 17 | from .heavy_ball_momentum_qg_convex import wc_heavy_ball_momentum_qg_convex 18 | from .inexact_accelerated_gradient import wc_inexact_accelerated_gradient 19 | from .inexact_gradient_descent import wc_inexact_gradient_descent 20 | from .inexact_gradient_exact_line_search import wc_inexact_gradient_exact_line_search 21 | from .information_theoretic_exact_method import wc_information_theoretic 22 | from .optimized_gradient import wc_optimized_gradient 23 | from .optimized_gradient_for_gradient import wc_optimized_gradient_for_gradient 24 | from .proximal_point import wc_proximal_point 25 | from .robust_momentum import wc_robust_momentum 26 | from .subgradient_method import wc_subgradient_method 27 | from .subgradient_method_rsi_eb import wc_subgradient_method_rsi_eb 28 | from .triple_momentum import wc_triple_momentum 29 | 30 | __all__ = ['accelerated_gradient_convex', 'wc_accelerated_gradient_convex', 31 | 'accelerated_gradient_strongly_convex', 'wc_accelerated_gradient_strongly_convex', 32 | 'accelerated_proximal_point', 'wc_accelerated_proximal_point', 33 | 'conjugate_gradient', 'wc_conjugate_gradient', 34 | 'conjugate_gradient_qg_convex', 'wc_conjugate_gradient_qg_convex', 35 | 'cyclic_coordinate_descent', 'wc_cyclic_coordinate_descent', 36 | 'epsilon_subgradient_method', 'wc_epsilon_subgradient_method', 37 | 'gradient_descent', 'wc_gradient_descent', 38 | 'gradient_descent_lc', 'wc_gradient_descent_lc', 39 | 'gradient_descent_qg_convex', 'wc_gradient_descent_qg_convex', 40 | 'gradient_descent_qg_convex_decreasing', 'wc_gradient_descent_qg_convex_decreasing', 41 | 'gradient_descent_quadratics', 'wc_gradient_descent_quadratics', 42 | 'gradient_exact_line_search', 'wc_gradient_exact_line_search', 43 | 'heavy_ball_momentum', 'wc_heavy_ball_momentum', 44 | 'heavy_ball_momentum_qg_convex', 'wc_heavy_ball_momentum_qg_convex', 45 | 'inexact_accelerated_gradient', 'wc_inexact_accelerated_gradient', 46 | 'inexact_gradient_descent', 'wc_inexact_gradient_descent', 47 | 'inexact_gradient_exact_line_search', 'wc_inexact_gradient_exact_line_search', 48 | 'information_theoretic_exact_method', 'wc_information_theoretic', 49 | 'optimized_gradient', 'wc_optimized_gradient', 50 | 'optimized_gradient_for_gradient', 'wc_optimized_gradient_for_gradient', 51 | 'proximal_point', 'wc_proximal_point', 52 | 'robust_momentum', 'wc_robust_momentum', 53 | 'subgradient_method', 'wc_subgradient_method', 54 | 'subgradient_method_rsi_eb', 'wc_subgradient_method_rsi_eb', 55 | 'triple_momentum', 'wc_triple_momentum', 56 | ] 57 | -------------------------------------------------------------------------------- /PEPit/functions/__init__.py: -------------------------------------------------------------------------------- 1 | from .block_smooth_convex_function import BlockSmoothConvexFunction 2 | from .convex_function import ConvexFunction 3 | from .convex_indicator import ConvexIndicatorFunction 4 | from .convex_lipschitz_function import ConvexLipschitzFunction 5 | from .convex_qg_function import ConvexQGFunction 6 | from .convex_support_function import ConvexSupportFunction 7 | from .rsi_eb_function import RsiEbFunction 8 | from .smooth_convex_function import SmoothConvexFunction 9 | from .smooth_convex_lipschitz_function import SmoothConvexLipschitzFunction 10 | from .smooth_function import SmoothFunction 11 | from .smooth_strongly_convex_function import SmoothStronglyConvexFunction 12 | from .smooth_strongly_convex_quadratic_function import SmoothStronglyConvexQuadraticFunction 13 | from .strongly_convex_function import StronglyConvexFunction 14 | 15 | __all__ = ['block_smooth_convex_function', 'BlockSmoothConvexFunction', 16 | 'convex_function', 'ConvexFunction', 17 | 'convex_indicator', 'ConvexIndicatorFunction', 18 | 'convex_lipschitz_function', 'ConvexLipschitzFunction', 19 | 'convex_qg_function', 'ConvexQGFunction', 20 | 'convex_support_function', 'ConvexSupportFunction', 21 | 'rsi_eb_function', 'RsiEbFunction', 22 | 'smooth_convex_function', 'SmoothConvexFunction', 23 | 'smooth_convex_lipschitz_function', 'SmoothConvexLipschitzFunction', 24 | 'smooth_function', 'SmoothFunction', 25 | 'smooth_strongly_convex_function', 'SmoothStronglyConvexFunction', 26 | 'smooth_strongly_convex_quadratic_function', 'SmoothStronglyConvexQuadraticFunction', 27 | 'strongly_convex_function', 'StronglyConvexFunction', 28 | ] 29 | -------------------------------------------------------------------------------- /PEPit/functions/convex_function.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class ConvexFunction(Function): 5 | """ 6 | The :class:`ConvexFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 7 | implementing the interpolation constraints of the class of convex, closed and proper (CCP) functions (i.e., convex 8 | functions whose epigraphs are non-empty closed sets). 9 | 10 | General CCP functions are not characterized by any parameter, hence can be instantiated as 11 | 12 | Example: 13 | >>> from PEPit import PEP 14 | >>> from PEPit.functions import ConvexFunction 15 | >>> problem = PEP() 16 | >>> func = problem.declare_function(function_class=ConvexFunction) 17 | 18 | """ 19 | 20 | def __init__(self, 21 | is_leaf=True, 22 | decomposition_dict=None, 23 | reuse_gradient=False, 24 | name=None): 25 | """ 26 | 27 | Args: 28 | is_leaf (bool): True if self is defined from scratch. 29 | False if self is defined as linear combination of leaf. 30 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 31 | Keys are :class:`Function` objects and values are their associated coefficients. 32 | reuse_gradient (bool): If True, the same subgradient is returned 33 | when one requires it several times on the same :class:`Point`. 34 | If False, a new subgradient is computed each time one is required. 35 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 36 | 37 | """ 38 | super().__init__(is_leaf=is_leaf, 39 | decomposition_dict=decomposition_dict, 40 | reuse_gradient=reuse_gradient, 41 | name=name, 42 | ) 43 | 44 | @staticmethod 45 | def set_convexity_constraint_i_j(xi, gi, fi, 46 | xj, gj, fj, 47 | ): 48 | """ 49 | Formulates the list of interpolation constraints for self (CCP function). 50 | """ 51 | # Interpolation conditions of convex functions class 52 | constraint = (fi - fj >= gj * (xi - xj)) 53 | 54 | return constraint 55 | 56 | def add_class_constraints(self): 57 | """ 58 | Add the convexity constraints. 59 | """ 60 | 61 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 62 | list_of_points_2=self.list_of_points, 63 | constraint_name="convexity", 64 | set_class_constraint_i_j=self.set_convexity_constraint_i_j, 65 | ) 66 | -------------------------------------------------------------------------------- /PEPit/functions/convex_lipschitz_function.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class ConvexLipschitzFunction(Function): 5 | """ 6 | The :class:`ConvexLipschitzFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 7 | implementing the interpolation constraints of the class of convex closed proper (CCP) 8 | Lipschitz continuous functions. 9 | 10 | Attributes: 11 | M (float): Lipschitz parameter 12 | 13 | CCP Lipschitz continuous functions are characterized by a parameter `M`, hence can be instantiated as 14 | 15 | Example: 16 | >>> from PEPit import PEP 17 | >>> from PEPit.functions import ConvexLipschitzFunction 18 | >>> problem = PEP() 19 | >>> func = problem.declare_function(function_class=ConvexLipschitzFunction, M=1.) 20 | 21 | References: 22 | 23 | `[1] A. Taylor, J. Hendrickx, F. Glineur (2017). 24 | Exact worst-case performance of first-order methods for composite convex optimization. 25 | SIAM Journal on Optimization, 27(3):1283–1313. 26 | `_ 27 | 28 | """ 29 | 30 | def __init__(self, 31 | M, 32 | is_leaf=True, 33 | decomposition_dict=None, 34 | reuse_gradient=False, 35 | name=None): 36 | """ 37 | 38 | Args: 39 | M (float): The Lipschitz continuity parameter of self. 40 | is_leaf (bool): True if self is defined from scratch. 41 | False if self is defined as linear combination of leaf. 42 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 43 | Keys are :class:`Function` objects and values are their associated coefficients. 44 | reuse_gradient (bool): If True, the same subgradient is returned 45 | when one requires it several times on the same :class:`Point`. 46 | If False, a new subgradient is computed each time one is required. 47 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 48 | 49 | """ 50 | # Inherit directly from Function. 51 | super().__init__(is_leaf=is_leaf, 52 | decomposition_dict=decomposition_dict, 53 | reuse_gradient=reuse_gradient, 54 | name=name, 55 | ) 56 | 57 | # param M 58 | self.M = M 59 | 60 | def set_lipschitz_continuity_constraint_i(self, 61 | xi, gi, fi): 62 | """ 63 | Formulates the Lipschitz continuity constraint by bounding the gradients. 64 | 65 | """ 66 | # Lipschitz condition on the function (bounded gradient) 67 | constraint = (gi ** 2 <= self.M ** 2) 68 | 69 | return constraint 70 | 71 | @staticmethod 72 | def set_convexity_constraint_i_j(xi, gi, fi, 73 | xj, gj, fj, 74 | ): 75 | """ 76 | Formulates the list of interpolation constraints for self (CCP function). 77 | """ 78 | # Interpolation conditions of convex functions class 79 | constraint = (fi - fj >= gj * (xi - xj)) 80 | 81 | return constraint 82 | 83 | def add_class_constraints(self): 84 | """ 85 | Formulates the list of interpolation constraints for self (CCP Lipschitz continuous function), 86 | see [1, Theorem 3.5]. 87 | """ 88 | 89 | self.add_constraints_from_one_list_of_points(list_of_points=self.list_of_points, 90 | constraint_name="lipschitz_continuity", 91 | set_class_constraint_i=self.set_lipschitz_continuity_constraint_i, 92 | ) 93 | 94 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 95 | list_of_points_2=self.list_of_points, 96 | constraint_name="convexity", 97 | set_class_constraint_i_j=self.set_convexity_constraint_i_j, 98 | ) 99 | -------------------------------------------------------------------------------- /PEPit/functions/convex_qg_function.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class ConvexQGFunction(Function): 5 | """ 6 | The :class:`ConvexQGFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 7 | implementing the interpolation constraints of the class of quadratically upper bounded (:math:`\\text{QG}^+` [1]), 8 | i.e. :math:`\\forall x, f(x) - f_\\star \\leqslant \\frac{L}{2} \\|x-x_\\star\\|^2`, and convex functions. 9 | 10 | Attributes: 11 | L (float): The quadratic upper bound parameter 12 | 13 | General quadratically upper bounded (:math:`\\text{QG}^+`) convex functions are characterized 14 | by the quadratic growth parameter `L`, hence can be instantiated as 15 | 16 | Example: 17 | >>> from PEPit import PEP 18 | >>> from PEPit.functions import ConvexQGFunction 19 | >>> problem = PEP() 20 | >>> func = problem.declare_function(function_class=ConvexQGFunction, L=1) 21 | 22 | References: 23 | 24 | `[1] B. Goujaud, A. Taylor, A. Dieuleveut (2022). 25 | Optimal first-order methods for convex functions with a quadratic upper bound. 26 | `_ 27 | 28 | """ 29 | 30 | def __init__(self, 31 | L, 32 | is_leaf=True, 33 | decomposition_dict=None, 34 | reuse_gradient=False, 35 | name=None): 36 | """ 37 | 38 | Args: 39 | L (float): The quadratic upper bound parameter. 40 | is_leaf (bool): True if self is defined from scratch. 41 | False if self is defined as linear combination of leaf. 42 | decomposition_dict (dict): decomposition of self as linear combination of leaf :class:`Function` objects. 43 | Keys are :class:`Function` objects and values are their associated coefficients. 44 | reuse_gradient (bool): If True, the same subgradient is returned 45 | when one requires it several times on the same :class:`Point`. 46 | If False, a new subgradient is computed each time one is required. 47 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 48 | 49 | """ 50 | super().__init__(is_leaf=is_leaf, 51 | decomposition_dict=decomposition_dict, 52 | reuse_gradient=reuse_gradient, 53 | name=name, 54 | ) 55 | 56 | # Store L 57 | self.L = L 58 | 59 | @staticmethod 60 | def set_convexity_constraint_i_j(xi, gi, fi, 61 | xj, gj, fj, 62 | ): 63 | """ 64 | Formulates the list of interpolation constraints for self (CCP function). 65 | """ 66 | # Interpolation conditions of convex functions class 67 | constraint = (fi - fj >= gj * (xi - xj)) 68 | 69 | return constraint 70 | 71 | def set_qg_convexity_constraint_i_j(self, 72 | xi, gi, fi, 73 | xj, gj, fj, 74 | ): 75 | """ 76 | Formulates the list of interpolation constraints for self (qg convex function). 77 | """ 78 | # Interpolation conditions of QG convex functions class 79 | constraint = (fi - fj >= gj * (xi - xj) + 1 / (2 * self.L) * gj ** 2) 80 | 81 | return constraint 82 | 83 | def add_class_constraints(self): 84 | """ 85 | Formulates the list of interpolation constraints for self (quadratically maximally growing convex function); 86 | see [1, Theorem 2.6]. 87 | """ 88 | if self.list_of_stationary_points == list(): 89 | self.stationary_point() 90 | 91 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_stationary_points, 92 | list_of_points_2=self.list_of_points, 93 | constraint_name="qg_convexity", 94 | set_class_constraint_i_j=self.set_qg_convexity_constraint_i_j, 95 | ) 96 | 97 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 98 | list_of_points_2=self.list_of_points, 99 | constraint_name="convexity", 100 | set_class_constraint_i_j=self.set_convexity_constraint_i_j, 101 | ) 102 | -------------------------------------------------------------------------------- /PEPit/functions/convex_support_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit.function import Function 4 | 5 | 6 | class ConvexSupportFunction(Function): 7 | """ 8 | The :class:`ConvexSupportFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 9 | implementing interpolation constraints for the class of closed convex support functions. 10 | 11 | Attributes: 12 | M (float): upper bound on the Lipschitz constant 13 | 14 | Convex support functions are characterized by a parameter `M`, hence can be instantiated as 15 | 16 | Example: 17 | >>> from PEPit import PEP 18 | >>> from PEPit.functions import ConvexSupportFunction 19 | >>> problem = PEP() 20 | >>> func = problem.declare_function(function_class=ConvexSupportFunction, M=1) 21 | 22 | References: 23 | 24 | `[1] A. Taylor, J. Hendrickx, F. Glineur (2017). 25 | Exact worst-case performance of first-order methods for composite convex optimization. 26 | SIAM Journal on Optimization, 27(3):1283–1313. 27 | `_ 28 | 29 | """ 30 | 31 | def __init__(self, 32 | M=np.inf, 33 | is_leaf=True, 34 | decomposition_dict=None, 35 | reuse_gradient=False, 36 | name=None): 37 | """ 38 | 39 | Args: 40 | M (float): Lipschitz constant of self. Default value set to infinity. 41 | is_leaf (bool): True if self is defined from scratch. 42 | False is self is defined as linear combination of leaf . 43 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 44 | Keys are :class:`Function` objects and values are their associated coefficients. 45 | reuse_gradient (bool): If True, the same subgradient is returned 46 | when one requires it several times on the same :class:`Point`. 47 | If False, a new subgradient is computed each time one is required. 48 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 49 | 50 | """ 51 | super().__init__(is_leaf=is_leaf, 52 | decomposition_dict=decomposition_dict, 53 | reuse_gradient=reuse_gradient, 54 | name=name, 55 | ) 56 | 57 | # Store the Lipschitz constant in an attribute 58 | self.M = M 59 | 60 | @staticmethod 61 | def set_fenchel_value_constraint_i(xi, gi, fi): 62 | """ 63 | Set the value of the Fenchel transform to 0. 64 | 65 | """ 66 | # Set constraint 67 | constraint = (gi * xi - fi == 0) 68 | 69 | return constraint 70 | 71 | def set_lipschitz_continuity_constraint_i(self, xi, gi, fi): 72 | """ 73 | Set Lipschitz continuity constraint so that its Fenchel transform has a bounded support. 74 | 75 | """ 76 | # Set constraint 77 | constraint = (gi ** 2 <= self.M ** 2) 78 | 79 | return constraint 80 | 81 | @staticmethod 82 | def set_convexity_constraint_i_j(xi, gi, fi, 83 | xj, gj, fj, 84 | ): 85 | """ 86 | Formulates the list of interpolation constraints for self (CCP function). 87 | """ 88 | # Interpolation conditions of convex functions class 89 | constraint = (xj * (gi - gj) <= 0) 90 | 91 | return constraint 92 | 93 | def add_class_constraints(self): 94 | """ 95 | Formulates the list of interpolation constraints for self (closed convex support function), 96 | see [1, Corollary 3.7]. 97 | """ 98 | 99 | self.add_constraints_from_one_list_of_points(list_of_points=self.list_of_points, 100 | constraint_name="fenchel_value", 101 | set_class_constraint_i=self.set_fenchel_value_constraint_i, 102 | ) 103 | 104 | if self.M != np.inf: 105 | self.add_constraints_from_one_list_of_points(list_of_points=self.list_of_points, 106 | constraint_name="lipschitz_continuity", 107 | set_class_constraint_i= 108 | self.set_lipschitz_continuity_constraint_i, 109 | ) 110 | 111 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 112 | list_of_points_2=self.list_of_points, 113 | constraint_name="convexity", 114 | set_class_constraint_i_j=self.set_convexity_constraint_i_j, 115 | ) 116 | -------------------------------------------------------------------------------- /PEPit/functions/rsi_eb_function.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class RsiEbFunction(Function): 5 | """ 6 | The :class:`RsiEbFunction` class overwrites the `add_class_constraints` method 7 | of :class:`Function`, implementing the interpolation constraints of the class of functions verifying 8 | the "lower" restricted secant inequality (:math:`\\text{RSI}^-`) and the "upper" error bound (:math:`\\text{EB}^+`). 9 | 10 | Attributes: 11 | mu (float): Restricted sequent inequality parameter 12 | L (float): Error bound parameter 13 | 14 | :math:`\\text{RSI}^-` and :math:`\\text{EB}^+` functions are characterized by parameters :math:`\\mu` and `L`, 15 | hence can be instantiated as 16 | 17 | Example: 18 | >>> from PEPit import PEP 19 | >>> from PEPit.functions import RsiEbFunction 20 | >>> problem = PEP() 21 | >>> h = problem.declare_function(function_class=RsiEbFunction, mu=.1, L=1) 22 | 23 | References: 24 | 25 | A definition of the class of :math:`\\text{RSI}^-` and :math:`\\text{EB}^+` functions can be found in [1]. 26 | 27 | `[1] C. Guille-Escuret, B. Goujaud, A. Ibrahim, I. Mitliagkas (2022). 28 | Gradient Descent Is Optimal Under Lower Restricted Secant Inequality And Upper Error Bound. 29 | arXiv 2203.00342. 30 | `_ 31 | 32 | """ 33 | 34 | def __init__(self, 35 | mu, 36 | L, 37 | is_leaf=True, 38 | decomposition_dict=None, 39 | reuse_gradient=False, 40 | name=None): 41 | """ 42 | 43 | Args: 44 | mu (float): The restricted secant inequality parameter. 45 | L (float): The upper error bound parameter. 46 | is_leaf (bool): True if self is defined from scratch. 47 | False if self is defined as linear combination of leaf . 48 | decomposition_dict (dict): decomposition of self as linear combination of leaf :class:`Function` objects. 49 | Keys are :class:`Function` objects and values are their associated coefficients. 50 | reuse_gradient (bool): If True, the same subgradient is returned 51 | when one requires it several times on the same :class:`Point`. 52 | If False, a new subgradient is computed each time one is required. 53 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 54 | 55 | """ 56 | super().__init__(is_leaf=is_leaf, 57 | decomposition_dict=decomposition_dict, 58 | reuse_gradient=reuse_gradient, 59 | name=name, 60 | ) 61 | # Store mu and L 62 | self.mu = mu 63 | self.L = L 64 | 65 | def set_rsi_constraints_i_j(self, 66 | xi, gi, fi, 67 | xj, gj, fj, 68 | ): 69 | """ 70 | Set RSI constraints. 71 | 72 | """ 73 | # Interpolation conditions of RSI function class 74 | constraint = ((gi - gj) * (xi - xj) - self.mu * (xi - xj) ** 2 >= 0) 75 | 76 | return constraint 77 | 78 | def set_eb_constraints_i_j(self, 79 | xi, gi, fi, 80 | xj, gj, fj, 81 | ): 82 | """ 83 | Set EB constraints. 84 | 85 | """ 86 | # Interpolation conditions of RSI function class 87 | constraint = ((gi - gj) ** 2 - self.L ** 2 * (xi - xj) ** 2 <= 0) 88 | 89 | return constraint 90 | 91 | def add_class_constraints(self): 92 | """ 93 | Formulates the list of necessary conditions for interpolation of self, see [1, Theorem 1]. 94 | """ 95 | if self.list_of_stationary_points == list(): 96 | self.stationary_point() 97 | 98 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_stationary_points, 99 | list_of_points_2=self.list_of_points, 100 | constraint_name="rsi", 101 | set_class_constraint_i_j=self.set_rsi_constraints_i_j, 102 | ) 103 | 104 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_stationary_points, 105 | list_of_points_2=self.list_of_points, 106 | constraint_name="eb", 107 | set_class_constraint_i_j=self.set_eb_constraints_i_j, 108 | ) 109 | -------------------------------------------------------------------------------- /PEPit/functions/smooth_convex_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PEPit.function import Function 3 | 4 | 5 | class SmoothConvexFunction(Function): 6 | """ 7 | The :class:`SmoothConvexFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 8 | by implementing interpolation constraints of the class of smooth convex functions. 9 | 10 | Attributes: 11 | L (float): smoothness parameter 12 | 13 | Smooth convex functions are characterized by the smoothness parameter `L`, hence can be instantiated as 14 | 15 | Example: 16 | >>> from PEPit import PEP 17 | >>> from PEPit.functions import SmoothConvexFunction 18 | >>> problem = PEP() 19 | >>> func = problem.declare_function(function_class=SmoothConvexFunction, L=1.) 20 | 21 | References: 22 | 23 | `[1] A. Taylor, J. Hendrickx, F. Glineur (2017). 24 | Smooth strongly convex interpolation and exact worst-case performance of first-order methods. 25 | Mathematical Programming, 161(1-2), 307-345. 26 | `_ 27 | 28 | """ 29 | 30 | def __init__(self, 31 | L, 32 | is_leaf=True, 33 | decomposition_dict=None, 34 | reuse_gradient=True, 35 | name=None): 36 | """ 37 | 38 | Args: 39 | L (float): The smoothness parameter. 40 | is_leaf (bool): True if self is defined from scratch. 41 | False if self is defined as linear combination of leaf. 42 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 43 | Keys are :class:`Function` objects and values are their associated coefficients. 44 | reuse_gradient (bool): If True, the same subgradient is returned 45 | when one requires it several times on the same :class:`Point`. 46 | If False, a new subgradient is computed each time one is required. 47 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 48 | 49 | Note: 50 | Smooth convex functions are necessarily differentiable, hence `reuse_gradient` is set to True. 51 | 52 | """ 53 | super().__init__(is_leaf=is_leaf, 54 | decomposition_dict=decomposition_dict, 55 | reuse_gradient=True, 56 | name=name, 57 | ) 58 | 59 | # Store L 60 | self.L = L 61 | 62 | if self.L == np.inf: 63 | print("\033[96m(PEPit) The class of smooth convex functions is necessarily differentiable.\n" 64 | "To instantiate a convex function, please avoid using the class SmoothConvexFunction with \n" 65 | "L == np.inf. Instead, please use the class ConvexFunction (which accounts for the fact \n" 66 | "that there might be several subgradients at the same point).\033[0m") 67 | 68 | def set_smoothness_convexity_constraint_i_j(self, 69 | xi, gi, fi, 70 | xj, gj, fj, 71 | ): 72 | """ 73 | Formulates the list of interpolation constraints for self (smooth convex function). 74 | """ 75 | # Interpolation conditions of smooth convex functions class 76 | constraint = (fi - fj >= gj * (xi - xj) + 1 / (2 * self.L) * (gi - gj) ** 2) 77 | 78 | return constraint 79 | 80 | def add_class_constraints(self): 81 | """ 82 | Add class constraints. 83 | """ 84 | # Add Smoothness convexity interpolation constraints 85 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 86 | list_of_points_2=self.list_of_points, 87 | constraint_name="smoothness_convexity", 88 | set_class_constraint_i_j= 89 | self.set_smoothness_convexity_constraint_i_j, 90 | ) 91 | -------------------------------------------------------------------------------- /PEPit/functions/smooth_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PEPit.function import Function 3 | 4 | 5 | class SmoothFunction(Function): 6 | """ 7 | The :class:`SmoothFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 8 | implementing the interpolation constraints of the class of smooth (not necessarily convex) functions. 9 | 10 | Attributes: 11 | L (float): smoothness parameter 12 | 13 | Smooth functions are characterized by the smoothness parameter `L`, hence can be instantiated as 14 | 15 | Example: 16 | >>> from PEPit import PEP 17 | >>> from PEPit.functions import SmoothFunction 18 | >>> problem = PEP() 19 | >>> func = problem.declare_function(function_class=SmoothFunction, L=1.) 20 | 21 | References: 22 | 23 | `[1] A. Taylor, J. Hendrickx, F. Glineur (2017). 24 | Exact worst-case performance of first-order methods for composite convex optimization. 25 | SIAM Journal on Optimization, 27(3):1283–1313. 26 | `_ 27 | 28 | """ 29 | 30 | def __init__(self, 31 | L, 32 | is_leaf=True, 33 | decomposition_dict=None, 34 | reuse_gradient=True, 35 | name=None): 36 | """ 37 | 38 | Args: 39 | L (float): The smoothness parameter. 40 | is_leaf (bool): True if self is defined from scratch. 41 | False if self is defined as linear combination of leaf. 42 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 43 | Keys are :class:`Function` objects and values are their associated coefficients. 44 | reuse_gradient (bool): If True, the same subgradient is returned 45 | when one requires it several times on the same :class:`Point`. 46 | If False, a new subgradient is computed each time one is required. 47 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 48 | 49 | Note: 50 | Smooth functions are necessarily differentiable, hence `reuse_gradient` is set to True. 51 | 52 | """ 53 | super().__init__(is_leaf=is_leaf, 54 | decomposition_dict=decomposition_dict, 55 | reuse_gradient=True, 56 | name=name, 57 | ) 58 | 59 | # Store L 60 | self.L = L 61 | 62 | if self.L == np.inf: 63 | print("\033[96m(PEPit) The class of L-smooth functions with L == np.inf implies no constraint: \n" 64 | "it contains all differentiable functions. This might imply issues in your code.\033[0m") 65 | 66 | def set_smoothness_i_j(self, 67 | xi, gi, fi, 68 | xj, gj, fj, 69 | ): 70 | """ 71 | Set smoothness interpolation constraints. 72 | 73 | """ 74 | # Set constraint 75 | constraint = (fi - fj >= 76 | - self.L / 4 * (xi - xj) ** 2 77 | + 1 / 2 * (gi + gj) * (xi - xj) 78 | + 1 / (4 * self.L) * (gi - gj) ** 2 79 | ) 80 | 81 | return constraint 82 | 83 | def add_class_constraints(self): 84 | """ 85 | Formulates the list of interpolation constraints for self (smooth (not necessarily convex) function), 86 | see [1, Theorem 3.10]. 87 | """ 88 | 89 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 90 | list_of_points_2=self.list_of_points, 91 | constraint_name="smoothness", 92 | set_class_constraint_i_j=self.set_smoothness_i_j, 93 | ) 94 | -------------------------------------------------------------------------------- /PEPit/functions/smooth_strongly_convex_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PEPit.function import Function 3 | 4 | 5 | class SmoothStronglyConvexFunction(Function): 6 | """ 7 | The :class:`SmoothStronglyConvexFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 8 | by implementing interpolation constraints of the class of smooth strongly convex functions. 9 | 10 | Attributes: 11 | mu (float): strong convexity parameter 12 | L (float): smoothness parameter 13 | 14 | Smooth strongly convex functions are characterized by parameters :math:`\\mu` and :math:`L`, 15 | hence can be instantiated as 16 | 17 | Example: 18 | >>> from PEPit import PEP 19 | >>> from PEPit.functions import SmoothStronglyConvexFunction 20 | >>> problem = PEP() 21 | >>> func = problem.declare_function(function_class=SmoothStronglyConvexFunction, mu=.1, L=1.) 22 | 23 | References: 24 | 25 | `[1] A. Taylor, J. Hendrickx, F. Glineur (2017). 26 | Smooth strongly convex interpolation and exact worst-case performance of first-order methods. 27 | Mathematical Programming, 161(1-2), 307-345. 28 | `_ 29 | 30 | """ 31 | 32 | def __init__(self, 33 | mu, 34 | L, 35 | is_leaf=True, 36 | decomposition_dict=None, 37 | reuse_gradient=True, 38 | name=None): 39 | """ 40 | 41 | Args: 42 | mu (float): The strong convexity parameter. 43 | L (float): The smoothness parameter. 44 | is_leaf (bool): True if self is defined from scratch. 45 | False if self is defined as linear combination of leaf. 46 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 47 | Keys are :class:`Function` objects and values are their associated coefficients. 48 | reuse_gradient (bool): If True, the same subgradient is returned 49 | when one requires it several times on the same :class:`Point`. 50 | If False, a new subgradient is computed each time one is required. 51 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 52 | 53 | Note: 54 | Smooth strongly convex functions are necessarily differentiable, hence `reuse_gradient` is set to True. 55 | 56 | """ 57 | super().__init__(is_leaf=is_leaf, 58 | decomposition_dict=decomposition_dict, 59 | reuse_gradient=True, 60 | name=name, 61 | ) 62 | 63 | # Store mu and L 64 | self.mu = mu 65 | self.L = L 66 | 67 | if self.L == np.inf: 68 | print("\033[96m(PEPit) Smooth strongly convex functions are necessarily differentiable. To instantiate\n" 69 | "a strongly convex function, please avoid using the class SmoothStronglyConvexFunction with\n" 70 | "L == np.inf. Instead, please use the class StronglyConvexFunction (which accounts for the fact\n" 71 | "that there might be several sub-gradients at the same point).\033[0m") 72 | 73 | def set_smoothness_strong_convexity_constraint_i_j(self, 74 | xi, gi, fi, 75 | xj, gj, fj, 76 | ): 77 | """ 78 | Formulates the list of interpolation constraints for self (smooth strongly convex function). 79 | """ 80 | # Interpolation conditions of smooth strongly convex functions class 81 | constraint = (fi - fj >= 82 | gj * (xi - xj) 83 | + 1 / (2 * self.L) * (gi - gj) ** 2 84 | + self.mu / (2 * (1 - self.mu / self.L)) * ( 85 | xi - xj - 1 / self.L * (gi - gj)) ** 2) 86 | 87 | return constraint 88 | 89 | def add_class_constraints(self): 90 | """ 91 | Add class constraints. 92 | """ 93 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 94 | list_of_points_2=self.list_of_points, 95 | constraint_name="smoothness_strong_convexity", 96 | set_class_constraint_i_j= 97 | self.set_smoothness_strong_convexity_constraint_i_j, 98 | ) 99 | -------------------------------------------------------------------------------- /PEPit/functions/strongly_convex_function.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class StronglyConvexFunction(Function): 5 | """ 6 | The :class:`StronglyConvexFunction` class overwrites the `add_class_constraints` method of :class:`Function`, 7 | implementing the interpolation constraints of the class of strongly convex closed proper functions (strongly convex 8 | functions whose epigraphs are non-empty closed sets). 9 | 10 | Attributes: 11 | mu (float): strong convexity parameter 12 | 13 | Strongly convex functions are characterized by the strong convexity parameter :math:`\\mu`, 14 | hence can be instantiated as 15 | 16 | Example: 17 | >>> from PEPit import PEP 18 | >>> from PEPit.functions import StronglyConvexFunction 19 | >>> problem = PEP() 20 | >>> func = problem.declare_function(function_class=StronglyConvexFunction, mu=.1) 21 | 22 | References: 23 | 24 | `[1] A. Taylor, J. Hendrickx, F. Glineur (2017). 25 | Smooth strongly convex interpolation and exact worst-case performance of first-order methods. 26 | Mathematical Programming, 161(1-2), 307-345. 27 | `_ 28 | 29 | """ 30 | 31 | def __init__(self, 32 | mu, 33 | is_leaf=True, 34 | decomposition_dict=None, 35 | reuse_gradient=False, 36 | name=None): 37 | """ 38 | 39 | Args: 40 | mu (float): The strong convexity parameter. 41 | is_leaf (bool): True if self is defined from scratch. 42 | False if self is defined as linear combination of leaf. 43 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 44 | Keys are :class:`Function` objects and values are their associated coefficients. 45 | reuse_gradient (bool): If True, the same subgradient is returned 46 | when one requires it several times on the same :class:`Point`. 47 | If False, a new subgradient is computed each time one is required. 48 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 49 | 50 | """ 51 | super().__init__(is_leaf=is_leaf, 52 | decomposition_dict=decomposition_dict, 53 | reuse_gradient=reuse_gradient, 54 | name=name, 55 | ) 56 | 57 | # Store mu 58 | self.mu = mu 59 | 60 | def set_strong_convexity_constraint_i_j(self, 61 | xi, gi, fi, 62 | xj, gj, fj, 63 | ): 64 | """ 65 | Set strong convexity interpolation constraints. 66 | 67 | """ 68 | # Set constraints 69 | constraint = (fi - fj >= 70 | gj * (xi - xj) 71 | + self.mu / 2 * (xi - xj) ** 2) 72 | 73 | return constraint 74 | 75 | def add_class_constraints(self): 76 | """ 77 | Formulates the list of interpolation constraints for self (strongly convex closed proper function), 78 | see [1, Corollary 2]. 79 | """ 80 | 81 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 82 | list_of_points_2=self.list_of_points, 83 | constraint_name="strong_convexity", 84 | set_class_constraint_i_j=self.set_strong_convexity_constraint_i_j, 85 | ) 86 | -------------------------------------------------------------------------------- /PEPit/operators/__init__.py: -------------------------------------------------------------------------------- 1 | from .cocoercive import CocoerciveOperator 2 | from .cocoercive_strongly_monotone import CocoerciveStronglyMonotoneOperator 3 | from .linear import LinearOperator 4 | from .lipschitz import LipschitzOperator 5 | from .lipschitz_strongly_monotone import LipschitzStronglyMonotoneOperator 6 | from .monotone import MonotoneOperator 7 | from .negatively_comonotone import NegativelyComonotoneOperator 8 | from .nonexpansive import NonexpansiveOperator 9 | from .skew_symmetric_linear import SkewSymmetricLinearOperator 10 | from .strongly_monotone import StronglyMonotoneOperator 11 | from .symmetric_linear import SymmetricLinearOperator 12 | 13 | __all__ = ['cocoercive', 'CocoerciveOperator', 14 | 'cocoercive_strongly_monotone', 'CocoerciveStronglyMonotoneOperator', 15 | 'linear', 'LinearOperator', 16 | 'lipschitz', 'LipschitzOperator', 17 | 'lipschitz_strongly_monotone', 'LipschitzStronglyMonotoneOperator', 18 | 'monotone', 'MonotoneOperator', 19 | 'negatively_comonotone', 'NegativelyComonotoneOperator', 20 | 'nonexpansive', 'NonexpansiveOperator', 21 | 'skew_symmetric_linear', 'SkewSymmetricLinearOperator', 22 | 'strongly_monotone', 'StronglyMonotoneOperator', 23 | 'symmetric_linear', 'SymmetricLinearOperator', 24 | ] 25 | -------------------------------------------------------------------------------- /PEPit/operators/cocoercive.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class CocoerciveOperator(Function): 5 | """ 6 | The :class:`CocoerciveOperator` class overwrites the `add_class_constraints` method of :class:`Function`, 7 | implementing the interpolation constraints of the class of cocoercive (and maximally monotone) operators. 8 | 9 | Note: 10 | Operator values can be requested through `gradient` and `function values` should not be used. 11 | 12 | Attributes: 13 | beta (float): cocoercivity parameter 14 | 15 | Cocoercive operators are characterized by the parameter :math:`\\beta`, hence can be instantiated as 16 | 17 | Example: 18 | >>> from PEPit import PEP 19 | >>> from PEPit.operators import CocoerciveOperator 20 | >>> problem = PEP() 21 | >>> func = problem.declare_function(function_class=CocoerciveOperator, beta=1.) 22 | 23 | References: 24 | 25 | `[1] E. Ryu, A. Taylor, C. Bergeling, P. Giselsson (2020). 26 | Operator splitting performance estimation: Tight contraction factors and optimal parameter selection. 27 | SIAM Journal on Optimization, 30(3), 2251-2271. 28 | `_ 29 | 30 | """ 31 | 32 | def __init__(self, 33 | beta, 34 | is_leaf=True, 35 | decomposition_dict=None, 36 | reuse_gradient=True, 37 | name=None): 38 | """ 39 | 40 | Args: 41 | beta (float): The cocoercivity parameter. 42 | is_leaf (bool): True if self is defined from scratch. 43 | False if self is defined as linear combination of leaf . 44 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 45 | Keys are :class:`Function` objects and values are their associated coefficients. 46 | reuse_gradient (bool): If True, the same subgradient is returned 47 | when one requires it several times on the same :class:`Point`. 48 | If False, a new subgradient is computed each time one is required. 49 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 50 | 51 | Note: 52 | Cocoercive operators are necessarily continuous, hence `reuse_gradient` is set to True. 53 | 54 | """ 55 | super().__init__(is_leaf=is_leaf, 56 | decomposition_dict=decomposition_dict, 57 | reuse_gradient=True, 58 | name=name, 59 | ) 60 | 61 | # Store the beta parameter 62 | self.beta = beta 63 | 64 | if self.beta == 0: 65 | print("\033[96m(PEPit) The class of cocoercive operators is necessarily continuous. \n" 66 | "To instantiate a monotone operator, please avoid using the class CocoerciveOperator\n" 67 | "with beta == 0. Instead, please use the class Monotone (which accounts for the fact \n" 68 | "that the image of the operator at certain points might not be a singleton).\033[0m") 69 | 70 | def set_cocoercivity_constraint_i_j(self, 71 | xi, gi, fi, 72 | xj, gj, fj, 73 | ): 74 | """ 75 | Set cocoercivity constraint for operator. 76 | 77 | """ 78 | # Set constraint 79 | constraint = ((gi - gj) * (xi - xj) - self.beta * (gi - gj) ** 2 >= 0) 80 | 81 | return constraint 82 | 83 | def add_class_constraints(self): 84 | """ 85 | Formulates the list of interpolation constraints for self (cocoercive maximally monotone operator), 86 | see, e.g., [1, Proposition 2]. 87 | """ 88 | 89 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 90 | list_of_points_2=self.list_of_points, 91 | constraint_name="cocoercivity", 92 | set_class_constraint_i_j=self.set_cocoercivity_constraint_i_j, 93 | symmetry=True, 94 | ) 95 | -------------------------------------------------------------------------------- /PEPit/operators/linear.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit import Function 4 | from PEPit import Expression 5 | from PEPit import PSDMatrix 6 | 7 | 8 | class LinearOperator(Function): 9 | """ 10 | The :class:`LinearOperator` class overwrites the `add_class_constraints` method of :class:`Function`, 11 | implementing the interpolation constraints of the class of linear operators. 12 | 13 | Note: 14 | Operator values can be requested through `gradient` and `function values` should not be used. 15 | 16 | Attributes: 17 | L (float): singular values upper bound 18 | T (Function): the adjunct linear operator 19 | 20 | Linear operators are characterized by the parameter :math:`L`, hence can be instantiated as 21 | 22 | Example: 23 | >>> from PEPit import PEP 24 | >>> from PEPit.operators import LinearOperator 25 | >>> problem = PEP() 26 | >>> M = problem.declare_function(function_class=LinearOperator, L=1.) 27 | 28 | References: 29 | 30 | `[1] N. Bousselmi, J. Hendrickx, F. Glineur (2023). 31 | Interpolation Conditions for Linear Operators and applications to Performance Estimation Problems. 32 | arXiv preprint. 33 | `_ 34 | 35 | """ 36 | 37 | def __init__(self, 38 | L, 39 | is_leaf=True, 40 | decomposition_dict=None, 41 | reuse_gradient=True, 42 | name=None): 43 | """ 44 | 45 | Args: 46 | L (float): The singular values upper bound. 47 | is_leaf (bool): True if self is defined from scratch. 48 | False if self is defined as linear combination of leaf . 49 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 50 | Keys are :class:`Function` objects and values are their associated coefficients. 51 | reuse_gradient (bool): If True, the same subgradient is returned 52 | when one requires it several times on the same :class:`Point`. 53 | If False, a new subgradient is computed each time one is required. 54 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 55 | 56 | Note: 57 | Linear operators are necessarily continuous, hence `reuse_gradient` is set to True. 58 | 59 | """ 60 | super().__init__(is_leaf=is_leaf, 61 | decomposition_dict=decomposition_dict, 62 | reuse_gradient=True, 63 | name=name, 64 | ) 65 | 66 | # Store L 67 | self.L = L 68 | 69 | # Define an adjunct operator with no class constraint 70 | # Its list of points is what is important 71 | self.T = Function(is_leaf=True) 72 | self.T.counter = None 73 | Function.counter -= 1 74 | 75 | def no_class_constraint_for_transpose(): 76 | pass 77 | 78 | self.T.add_class_constraints = no_class_constraint_for_transpose 79 | 80 | def add_class_constraints(self): 81 | """ 82 | Formulates the list of necessary and sufficient conditions for interpolation of self 83 | (Linear operator), see [1, Theorem 3.1]. 84 | """ 85 | 86 | # Add interpolation constraints for linear operator 87 | for point_xy in self.list_of_points: 88 | 89 | xi, yi, fi = point_xy 90 | 91 | for point_uv in self.T.list_of_points: 92 | uj, vj, hj = point_uv 93 | 94 | # Constraint X^T V = Y^T U 95 | self.list_of_class_constraints.append(xi * vj == yi * uj) 96 | 97 | # Add constraint of singular value upper bound of self 98 | N1 = len(self.list_of_points) 99 | T1 = np.empty([N1, N1], dtype=Expression) 100 | 101 | for i, point_i in enumerate(self.list_of_points): 102 | 103 | xi, yi, fi = point_i 104 | 105 | for j, point_j in enumerate(self.list_of_points): 106 | xj, yj, fj = point_j 107 | 108 | # Constraint Y^T Y <= L^2 X^T X 109 | T1[i, j] = (self.L ** 2) * xi * xj - yi * yj 110 | 111 | psd_matrix1 = PSDMatrix(matrix_of_expressions=T1) 112 | self.list_of_class_psd.append(psd_matrix1) 113 | 114 | # Add constraint of singular value upper bound of self.T 115 | N2 = len(self.T.list_of_points) 116 | T2 = np.empty([N2, N2], dtype=Expression) 117 | 118 | for i, point_i in enumerate(self.T.list_of_points): 119 | 120 | ui, vi, hi = point_i 121 | 122 | for j, point_j in enumerate(self.T.list_of_points): 123 | uj, vj, hj = point_j 124 | 125 | # Constraint V^T V <= L^2 U^T U 126 | T2[i, j] = (self.L ** 2) * ui * uj - vi * vj 127 | 128 | psd_matrix2 = PSDMatrix(matrix_of_expressions=T2) 129 | self.list_of_class_psd.append(psd_matrix2) 130 | -------------------------------------------------------------------------------- /PEPit/operators/lipschitz.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PEPit.function import Function 3 | 4 | 5 | class LipschitzOperator(Function): 6 | """ 7 | The :class:`LipschitzOperator` class overwrites the `add_class_constraints` method of :class:`Function`, 8 | implementing the interpolation constraints of the class of Lipschitz continuous operators. 9 | 10 | Note: 11 | Operator values can be requested through `gradient` and `function values` should not be used. 12 | 13 | Attributes: 14 | L (float): Lipschitz parameter 15 | 16 | Cocoercive operators are characterized by the parameter :math:`L`, hence can be instantiated as 17 | 18 | Example: 19 | >>> from PEPit import PEP 20 | >>> from PEPit.operators import LipschitzOperator 21 | >>> problem = PEP() 22 | >>> func = problem.declare_function(function_class=LipschitzOperator, L=1.) 23 | 24 | Notes: 25 | By setting L=1, we define a non-expansive operator. 26 | 27 | By setting L<1, we define a contracting operator. 28 | 29 | References: 30 | 31 | [1] M. Kirszbraun (1934). 32 | Uber die zusammenziehende und Lipschitzsche transformationen. 33 | Fundamenta Mathematicae, 22 (1934). 34 | 35 | [2] F.A. Valentine (1943). 36 | On the extension of a vector function so as to preserve a Lipschitz condition. 37 | Bulletin of the American Mathematical Society, 49 (2). 38 | 39 | [3] F.A. Valentine (1945). 40 | A Lipschitz condition preserving extension for a vector function. 41 | American Journal of Mathematics, 67(1). 42 | 43 | Discussions and appropriate pointers for the interpolation problem can be found in: 44 | `[4] E. Ryu, A. Taylor, C. Bergeling, P. Giselsson (2020). 45 | Operator splitting performance estimation: Tight contraction factors and optimal parameter selection. 46 | SIAM Journal on Optimization, 30(3), 2251-2271. 47 | `_ 48 | 49 | """ 50 | 51 | def __init__(self, 52 | L, 53 | is_leaf=True, 54 | decomposition_dict=None, 55 | reuse_gradient=True, 56 | name=None): 57 | """ 58 | 59 | Args: 60 | L (float): Lipschitz continuity parameter. 61 | is_leaf (bool): True if self is defined from scratch. 62 | False if self is defined as linear combination of leaf . 63 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 64 | Keys are :class:`Function` objects and values are their associated coefficients. 65 | reuse_gradient (bool): If True, the same subgradient is returned 66 | when one requires it several times on the same :class:`Point`. 67 | If False, a new subgradient is computed each time one is required. 68 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 69 | 70 | Note: 71 | Lipschitz continuous operators are necessarily continuous, hence `reuse_gradient` is set to True. 72 | 73 | """ 74 | super().__init__(is_leaf=is_leaf, 75 | decomposition_dict=decomposition_dict, 76 | reuse_gradient=True, 77 | name=name, 78 | ) 79 | # Store L 80 | self.L = L 81 | 82 | if self.L == np.inf: 83 | print("\033[96m(PEPit) The class of L-Lipschitz operators with L == np.inf implies no constraint: \n" 84 | "it contains all multi-valued mappings. This might imply issues in your code.\033[0m") 85 | 86 | def set_lipschitz_continuity_constraint_i_j(self, 87 | xi, gi, fi, 88 | xj, gj, fj, 89 | ): 90 | """ 91 | Set Lipschitz continuity constraint for operators. 92 | 93 | """ 94 | # Set constraint 95 | constraint = ((gi - gj) ** 2 - self.L ** 2 * (xi - xj) ** 2 <= 0) 96 | 97 | return constraint 98 | 99 | def add_class_constraints(self): 100 | """ 101 | Formulates the list of interpolation constraints for self (Lipschitz operator), 102 | see [1, 2, 3] or e.g., [4, Fact 2]. 103 | """ 104 | 105 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 106 | list_of_points_2=self.list_of_points, 107 | constraint_name="lipschitz_continuity", 108 | set_class_constraint_i_j= 109 | self.set_lipschitz_continuity_constraint_i_j, 110 | symmetry=True, 111 | ) 112 | -------------------------------------------------------------------------------- /PEPit/operators/monotone.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class MonotoneOperator(Function): 5 | """ 6 | The :class:`MonotoneOperator` class overwrites the `add_class_constraints` method of :class:`Function`, 7 | implementing interpolation constraints for the class of maximally monotone operators. 8 | 9 | Note: 10 | Operator values can be requested through `gradient` and `function values` should not be used. 11 | 12 | General maximally monotone operators are not characterized by any parameter, hence can be instantiated as 13 | 14 | Example: 15 | >>> from PEPit import PEP 16 | >>> from PEPit.operators import MonotoneOperator 17 | >>> problem = PEP() 18 | >>> h = problem.declare_function(function_class=MonotoneOperator) 19 | 20 | References: 21 | [1] H. H. Bauschke and P. L. Combettes (2017). 22 | Convex Analysis and Monotone Operator Theory in Hilbert Spaces. 23 | Springer New York, 2nd ed. 24 | 25 | """ 26 | 27 | def __init__(self, 28 | is_leaf=True, 29 | decomposition_dict=None, 30 | reuse_gradient=False, 31 | name=None): 32 | """ 33 | Args: 34 | is_leaf (bool): True if self is defined from scratch. 35 | False if self is defined as linear combination of leaf . 36 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 37 | Keys are :class:`Function` objects and values are their associated coefficients. 38 | reuse_gradient (bool): If True, the same subgradient is returned 39 | when one requires it several times on the same :class:`Point`. 40 | If False, a new subgradient is computed each time one is required. 41 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 42 | 43 | """ 44 | super().__init__(is_leaf=is_leaf, 45 | decomposition_dict=decomposition_dict, 46 | reuse_gradient=reuse_gradient, 47 | name=name, 48 | ) 49 | 50 | @staticmethod 51 | def set_monotonicity_constraint_i_j(xi, gi, fi, 52 | xj, gj, fj, 53 | ): 54 | """ 55 | Set monotonicity constraint for operators. 56 | 57 | """ 58 | # Set constraint 59 | constraint = ((gi - gj) * (xi - xj) >= 0) 60 | 61 | return constraint 62 | 63 | def add_class_constraints(self): 64 | """ 65 | Formulates the list of interpolation constraints for self (maximally monotone operator), 66 | see, e.g., [1, Theorem 20.21]. 67 | """ 68 | 69 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 70 | list_of_points_2=self.list_of_points, 71 | constraint_name="monotonicity", 72 | set_class_constraint_i_j=self.set_monotonicity_constraint_i_j, 73 | symmetry=True, 74 | ) 75 | -------------------------------------------------------------------------------- /PEPit/operators/negatively_comonotone.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class NegativelyComonotoneOperator(Function): 5 | """ 6 | The :class:`NegativelyComonotoneOperator` class overwrites the `add_class_constraints` method of :class:`Function`, 7 | implementing some necessary constraints of the class of negatively comonotone operators. 8 | 9 | Warnings: 10 | Those constraints might not be sufficient, thus the caracterized class might contain more operators. 11 | 12 | Note: 13 | Operator values can be requested through `gradient` and `function values` should not be used. 14 | 15 | Attributes: 16 | rho (float): comonotonicity parameter (>0) 17 | 18 | Negatively comonotone operators are characterized by the parameter :math:`\\rho`, hence can be instantiated as 19 | 20 | Example: 21 | >>> from PEPit import PEP 22 | >>> from PEPit.operators import NegativelyComonotoneOperator 23 | >>> problem = PEP() 24 | >>> func = problem.declare_function(function_class=NegativelyComonotoneOperator, rho=1.) 25 | 26 | References: 27 | `[1] E. Gorbunov, A. Taylor, S. Horváth, G. Gidel (2023). 28 | Convergence of proximal point and extragradient-based methods beyond monotonicity: 29 | the case of negative comonotonicity. 30 | International Conference on Machine Learning. 31 | `_ 32 | 33 | """ 34 | 35 | def __init__(self, 36 | rho, 37 | is_leaf=True, 38 | decomposition_dict=None, 39 | reuse_gradient=True, 40 | name=None): 41 | """ 42 | 43 | Args: 44 | rho (float): The comonotonicity parameter (>0). 45 | is_leaf (bool): True if self is defined from scratch. 46 | False if self is defined as linear combination of leaf . 47 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 48 | Keys are :class:`Function` objects and values are their associated coefficients. 49 | reuse_gradient (bool): If True, the same subgradient is returned 50 | when one requires it several times on the same :class:`Point`. 51 | If False, a new subgradient is computed each time one is required. 52 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 53 | 54 | """ 55 | super().__init__(is_leaf=is_leaf, 56 | decomposition_dict=decomposition_dict, 57 | reuse_gradient=reuse_gradient, 58 | name=name, 59 | ) 60 | 61 | # Store the beta parameter 62 | self.rho = rho 63 | 64 | if self.rho < 0: 65 | print("\033[96m(PEPit) The parameters rho is expected to be positive.\033[0m") 66 | 67 | if self.rho == 0: 68 | print("\033[96m(PEPit) The class of cocoercive operators is necessarily continuous. \n" 69 | "To instantiate a monotone operator, please avoid using the class NegativelyComonotoneOperator\n" 70 | "with rho == 0. Instead, please use the class Monotone.\033[0m") 71 | 72 | def set_negative_comonotonicity_constraint_i_j(self, 73 | xi, gi, fi, 74 | xj, gj, fj, 75 | ): 76 | """ 77 | Formulates the list of interpolation constraints for self (negatively comonotone operator). 78 | """ 79 | # Interpolation conditions of negatively comonotone operator class 80 | constraint = ((gi - gj) * (xi - xj) + self.rho * (gi - gj) ** 2 >= 0) 81 | 82 | return constraint 83 | 84 | def add_class_constraints(self): 85 | """ 86 | Add negative comonotonicity constraints. 87 | """ 88 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 89 | list_of_points_2=self.list_of_points, 90 | constraint_name="negative_comonotonicity", 91 | set_class_constraint_i_j= 92 | self.set_negative_comonotonicity_constraint_i_j, 93 | symmetry=True, 94 | ) 95 | -------------------------------------------------------------------------------- /PEPit/operators/skew_symmetric_linear.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit import Function 4 | from PEPit import Expression 5 | from PEPit import PSDMatrix 6 | 7 | 8 | class SkewSymmetricLinearOperator(Function): 9 | """ 10 | The :class:`SkewSymmetricLinearOperator` class overwrites the `add_class_constraints` method of :class:`Function`, 11 | implementing the interpolation constraints for the class of skew-symmetric linear operators. 12 | 13 | Note: 14 | Operator values can be requested through `gradient` and `function values` should not be used. 15 | 16 | Attributes: 17 | L (float): singular values upper bound 18 | 19 | Skew-Symmetric Linear operators are characterized by parameters :math:`L`, hence can be instantiated as 20 | 21 | Example: 22 | >>> from PEPit import PEP 23 | >>> from PEPit.operators import SkewSymmetricLinearOperator 24 | >>> problem = PEP() 25 | >>> M = problem.declare_function(function_class=SkewSymmetricLinearOperator, L=1.) 26 | 27 | References: 28 | 29 | `[1] N. Bousselmi, J. Hendrickx, F. Glineur (2023). 30 | Interpolation Conditions for Linear Operators and applications to Performance Estimation Problems. 31 | arXiv preprint 32 | `_ 33 | 34 | """ 35 | 36 | def __init__(self, 37 | L, 38 | is_leaf=True, 39 | decomposition_dict=None, 40 | reuse_gradient=True, 41 | name=None): 42 | """ 43 | 44 | Args: 45 | L (float): The singular values upper bound. 46 | is_leaf (bool): True if self is defined from scratch. 47 | False if self is defined as linear combination of leaf . 48 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 49 | Keys are :class:`Function` objects and values are their associated coefficients. 50 | reuse_gradient (bool): If True, the same subgradient is returned 51 | when one requires it several times on the same :class:`Point`. 52 | If False, a new subgradient is computed each time one is required. 53 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 54 | 55 | Note: 56 | Skew-Symmetric Linear operators are necessarily continuous, 57 | hence `reuse_gradient` is set to True. 58 | 59 | """ 60 | super().__init__(is_leaf=is_leaf, 61 | decomposition_dict=decomposition_dict, 62 | reuse_gradient=True, 63 | name=name, 64 | ) 65 | 66 | # Store L 67 | self.L = L 68 | 69 | @staticmethod 70 | def set_antisymmetric_linear_constraint_i_j(xi, gi, fi, 71 | xj, gj, fj, 72 | ): 73 | """ 74 | Formulates the list of interpolation constraints for self (Skew-symmetric linear operator). 75 | """ 76 | # Interpolation conditions of symmetric linear operators class 77 | constraint = (xi * gj == - xj * gi) 78 | 79 | return constraint 80 | 81 | def add_class_constraints(self): 82 | """ 83 | Formulates the list of necessary and sufficient conditions for interpolation of self 84 | (Skew-Symmetric Linear operator), see [1, Corollary 3.2]. 85 | """ 86 | 87 | # Add the class constraint for antisymmetric linear operators 88 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 89 | list_of_points_2=self.list_of_points, 90 | constraint_name="antisymmetric_linearity", 91 | set_class_constraint_i_j= 92 | self.set_antisymmetric_linear_constraint_i_j, 93 | symmetry=True, 94 | ) 95 | 96 | # Create a PSD matrix to enforce the singular values to be smaller than L 97 | N = len(self.list_of_points) 98 | T = np.empty([N, N], dtype=Expression) 99 | 100 | for i, point_i in enumerate(self.list_of_points): 101 | 102 | xi, gi, fi = point_i 103 | 104 | for j, point_j in enumerate(self.list_of_points): 105 | 106 | xj, gj, fj = point_j 107 | 108 | T[i, j] = - gi * gj + (self.L ** 2) * xi * xj 109 | 110 | psd_matrix = PSDMatrix(matrix_of_expressions=T) 111 | self.list_of_class_psd.append(psd_matrix) 112 | -------------------------------------------------------------------------------- /PEPit/operators/strongly_monotone.py: -------------------------------------------------------------------------------- 1 | from PEPit.function import Function 2 | 3 | 4 | class StronglyMonotoneOperator(Function): 5 | """ 6 | The :class:`StronglyMonotoneOperator` class overwrites the `add_class_constraints` method 7 | of :class:`Function`, implementing interpolation constraints of the class of strongly monotone 8 | (maximally monotone) operators. 9 | 10 | Note: 11 | Operator values can be requested through `gradient` and `function values` should not be used. 12 | 13 | Attributes: 14 | mu (float): strong monotonicity parameter 15 | 16 | Strongly monotone (and maximally monotone) operators are characterized by the parameter :math:`\\mu`, 17 | hence can be instantiated as 18 | 19 | Example: 20 | >>> from PEPit import PEP 21 | >>> from PEPit.operators import StronglyMonotoneOperator 22 | >>> problem = PEP() 23 | >>> h = problem.declare_function(function_class=StronglyMonotoneOperator, mu=.1) 24 | 25 | References: 26 | Discussions and appropriate pointers for the problem of 27 | interpolation of maximally monotone operators can be found in: 28 | `[1] E. Ryu, A. Taylor, C. Bergeling, P. Giselsson (2020). 29 | Operator splitting performance estimation: Tight contraction factors and optimal parameter selection. 30 | SIAM Journal on Optimization, 30(3), 2251-2271. 31 | `_ 32 | 33 | """ 34 | 35 | def __init__(self, 36 | mu, 37 | is_leaf=True, 38 | decomposition_dict=None, 39 | reuse_gradient=False, 40 | name=None): 41 | """ 42 | 43 | Args: 44 | mu (float): Strong monotonicity parameter. 45 | is_leaf (bool): True if self is defined from scratch. 46 | False if self is defined as linear combination of leaf . 47 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 48 | Keys are :class:`Function` objects and values are their associated coefficients. 49 | reuse_gradient (bool): If True, the same subgradient is returned 50 | when one requires it several times on the same :class:`Point`. 51 | If False, a new subgradient is computed each time one is required. 52 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 53 | 54 | """ 55 | super().__init__(is_leaf=is_leaf, 56 | decomposition_dict=decomposition_dict, 57 | reuse_gradient=reuse_gradient, 58 | name=name, 59 | ) 60 | # Store mu 61 | self.mu = mu 62 | 63 | def set_strong_monotonicity_constraint_i_j(self, 64 | xi, gi, fi, 65 | xj, gj, fj, 66 | ): 67 | """ 68 | Set strong monotonicity constraint for operators. 69 | 70 | """ 71 | # Set constraint 72 | constraint = ((gi - gj) * (xi - xj) - self.mu * (xi - xj) ** 2 >= 0) 73 | 74 | return constraint 75 | 76 | def add_class_constraints(self): 77 | """ 78 | Formulates the list of necessary conditions for interpolation of self (Lipschitz strongly monotone and 79 | maximally monotone operator), see, e.g., discussions in [1, Section 2]. 80 | """ 81 | 82 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 83 | list_of_points_2=self.list_of_points, 84 | constraint_name="strong_monotonicity", 85 | set_class_constraint_i_j= 86 | self.set_strong_monotonicity_constraint_i_j, 87 | symmetry=True, 88 | ) 89 | -------------------------------------------------------------------------------- /PEPit/operators/symmetric_linear.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit import Function 4 | from PEPit import Expression 5 | from PEPit import PSDMatrix 6 | 7 | 8 | class SymmetricLinearOperator(Function): 9 | """ 10 | The :class:`SymmetricLinearOperator` class overwrites the `add_class_constraints` method of :class:`Function`, 11 | implementing the interpolation constraints for the class of symmetric linear operators. 12 | 13 | Note: 14 | Operator values can be requested through `gradient` and `function values` should not be used. 15 | 16 | Attributes: 17 | mu (float): eigenvalues lower bound 18 | L (float): eigenvalues upper bound 19 | 20 | Symmetric Linear operators are characterized by parameters :math:`\\mu` and `L`, 21 | hence can be instantiated as 22 | 23 | Example: 24 | >>> from PEPit import PEP 25 | >>> from PEPit.operators import SymmetricLinearOperator 26 | >>> problem = PEP() 27 | >>> M = problem.declare_function(function_class=SymmetricLinearOperator, mu=.1, L=1.) 28 | 29 | References: 30 | 31 | `[1] N. Bousselmi, J. Hendrickx, F. Glineur (2023). 32 | Interpolation Conditions for Linear Operators and applications to Performance Estimation Problems. 33 | arXiv preprint 34 | `_ 35 | 36 | """ 37 | 38 | def __init__(self, 39 | mu, 40 | L, 41 | is_leaf=True, 42 | decomposition_dict=None, 43 | reuse_gradient=True, 44 | name=None): 45 | """ 46 | 47 | Args: 48 | mu (float): The eigenvalues lower bound. 49 | L (float): The eigenvalues upper bound. 50 | is_leaf (bool): True if self is defined from scratch. 51 | False if self is defined as linear combination of leaf . 52 | decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. 53 | Keys are :class:`Function` objects and values are their associated coefficients. 54 | reuse_gradient (bool): If True, the same subgradient is returned 55 | when one requires it several times on the same :class:`Point`. 56 | If False, a new subgradient is computed each time one is required. 57 | name (str): name of the object. None by default. Can be updated later through the method `set_name`. 58 | 59 | Note: 60 | Symmetric Linear operators are necessarily continuous, 61 | hence `reuse_gradient` is set to True. 62 | 63 | """ 64 | super().__init__(is_leaf=is_leaf, 65 | decomposition_dict=decomposition_dict, 66 | reuse_gradient=True, 67 | name=name, 68 | ) 69 | 70 | # Store L and mu 71 | self.mu = mu 72 | self.L = L 73 | 74 | @staticmethod 75 | def set_symmetric_linear_constraint_i_j(xi, gi, fi, 76 | xj, gj, fj, 77 | ): 78 | """ 79 | Formulates the list of interpolation constraints for self (Symmetric linear operator). 80 | """ 81 | # Interpolation conditions of symmetric linear operators class 82 | constraint = (xi * gj == xj * gi) 83 | 84 | return constraint 85 | 86 | def add_class_constraints(self): 87 | """ 88 | Formulates the list of necessary and sufficient conditions for interpolation of self 89 | (Symmetric Linear operator), see [1, Theorem 3.3]. 90 | 91 | """ 92 | # Add the class constraint for symmetric linear operators 93 | self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, 94 | list_of_points_2=self.list_of_points, 95 | constraint_name="symmetric_linearity", 96 | set_class_constraint_i_j=self.set_symmetric_linear_constraint_i_j, 97 | symmetry=True, 98 | ) 99 | 100 | # Create a PSD matrix to enforce the eigenvalues to lie into the interval [mu, L] 101 | N = len(self.list_of_points) 102 | T = np.empty([N, N], dtype=Expression) 103 | 104 | for i, point_i in enumerate(self.list_of_points): 105 | 106 | xi, gi, fi = point_i 107 | 108 | for j, point_j in enumerate(self.list_of_points): 109 | 110 | xj, gj, fj = point_j 111 | 112 | T[i, j] = self.L * gi * xj - gi * gj - self.mu * self.L * xi * xj + self.mu * xi * gj 113 | 114 | psd_matrix = PSDMatrix(matrix_of_expressions=T) 115 | self.list_of_class_psd.append(psd_matrix) 116 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/__init__.py: -------------------------------------------------------------------------------- 1 | from .bregman_gradient_step import bregman_gradient_step 2 | from .bregman_proximal_step import bregman_proximal_step 3 | from .exact_linesearch_step import exact_linesearch_step 4 | from .inexact_gradient_step import inexact_gradient_step 5 | from .inexact_proximal_step import inexact_proximal_step 6 | from .linear_optimization_step import linear_optimization_step 7 | from .proximal_step import proximal_step 8 | from .epsilon_subgradient_step import epsilon_subgradient_step 9 | 10 | __all__ = ['bregman_gradient_step', 11 | 'bregman_proximal_step', 12 | 'exact_linesearch_step', 13 | 'inexact_gradient_step', 14 | 'inexact_proximal_step', 15 | 'linear_optimization_step', 16 | 'proximal_step', 17 | 'epsilon_subgradient_step', 18 | ] 19 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/bregman_gradient_step.py: -------------------------------------------------------------------------------- 1 | from PEPit.point import Point 2 | from PEPit.expression import Expression 3 | 4 | 5 | def bregman_gradient_step(gx0, sx0, mirror_map, gamma): 6 | """ 7 | This routine outputs :math:`x` by performing a mirror step of step-size :math:`\\gamma`. 8 | That is, denoting :math:`f` the function to be minimized 9 | and :math:`h` the **mirror map**, it performs 10 | 11 | .. math:: x = \\arg\\min_x \\left[ f(x_0) + \\left< \\nabla f(x_0);\, x - x_0 \\right> 12 | + \\frac{1}{\\gamma} D_h(x; x_0) \\right], 13 | 14 | where :math:`D_h(x; x_0)` denotes the Bregman divergence of :math:`h` on :math:`x` with respect to :math:`x_0`. 15 | 16 | .. math:: D_h(x; x_0) \\triangleq h(x) - h(x_0) - \\left< \\nabla h(x_0);\, x - x_0 \\right>. 17 | 18 | Warning: 19 | The mirror map :math:`h` is assumed differentiable. 20 | 21 | By differentiating the previous objective function, one can observe that 22 | 23 | .. math:: \\nabla h(x) = \\nabla h(x_0) - \\gamma \\nabla f(x_0). 24 | 25 | Args: 26 | sx0 (Point): starting gradient :math:`\\textbf{sx0} \\triangleq \\nabla h(x_0)`. 27 | gx0 (Point): descent direction :math:`\\textbf{gx0} \\triangleq \\nabla f(x_0)`. 28 | mirror_map (Function): the reference function :math:`h` we computed Bregman divergence of. 29 | gamma (float): step size. 30 | 31 | Returns: 32 | x (Point): new iterate :math:`\\textbf{x} \\triangleq x`. 33 | sx (Point): :math:`h`'s gradient on new iterate :math:`x` :math:`\\textbf{sx} \\triangleq \\nabla h(x)`. 34 | hx (Expression): :math:`h`'s value on new iterate :math:`\\textbf{hx} \\triangleq h(x)`. 35 | 36 | """ 37 | 38 | # Instantiating point and function value. 39 | x = Point() 40 | hx = Expression() 41 | 42 | # Apply Bregman gradient step. 43 | sx = sx0 - gamma * gx0 44 | 45 | # Store triplet in mirror map list of points. 46 | mirror_map.add_point((x, sx, hx)) 47 | 48 | # Return the aforementioned triplet. 49 | return x, sx, hx 50 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/bregman_proximal_step.py: -------------------------------------------------------------------------------- 1 | from PEPit.point import Point 2 | from PEPit.expression import Expression 3 | 4 | 5 | def bregman_proximal_step(sx0, mirror_map, min_function, gamma): 6 | """ 7 | This routine outputs :math:`x` by performing a proximal mirror step of step-size :math:`\\gamma`. 8 | That is, denoting :math:`f` the function to be minimized 9 | and :math:`h` the **mirror map**, it performs 10 | 11 | .. math:: x = \\arg\\min_x \\left[ f(x) + \\frac{1}{\\gamma} D_h(x; x_0) \\right], 12 | 13 | where :math:`D_h(x; x_0)` denotes the Bregman divergence of :math:`h` on :math:`x` with respect to :math:`x_0`. 14 | 15 | .. math:: D_h(x; x_0) \\triangleq h(x) - h(x_0) - \\left< \\nabla h(x_0);\, x - x_0 \\right>. 16 | 17 | Warning: 18 | The mirror map :math:`h` is assumed differentiable. 19 | 20 | By differentiating the previous objective function, one can observe that 21 | 22 | .. math:: \\nabla h(x) = \\nabla h(x_0) - \\gamma \\nabla f(x). 23 | 24 | Args: 25 | sx0 (Point): starting gradient :math:`\\textbf{sx0} \\triangleq \\nabla h(x_0)`. 26 | mirror_map (Function): the reference function :math:`h` we computed Bregman divergence of. 27 | min_function (Function): function we aim to minimize. 28 | gamma (float): step size. 29 | 30 | Returns: 31 | x (Point): new iterate :math:`\\textbf{x} \\triangleq x`. 32 | sx (Point): :math:`h`'s gradient on new iterate :math:`x` :math:`\\textbf{sx} \\triangleq \\nabla h(x)`. 33 | hx (Expression): :math:`h`'s value on new iterate :math:`\\textbf{hx} \\triangleq h(x)`. 34 | gx (Point): :math:`f`'s gradient on new iterate :math:`x` :math:`\\textbf{gx} \\triangleq \\nabla f(x)`. 35 | fx (Expression): :math:`f`'s value on new iterate :math:`\\textbf{fx} \\triangleq f(x)`. 36 | 37 | """ 38 | 39 | # Instantiate new point 40 | x = Point() 41 | 42 | # Create f's gradient and function value on x 43 | gx = Point() 44 | fx = Expression() 45 | 46 | # Create h's gradient and function value on x 47 | sx = sx0 - gamma * gx 48 | hx = Expression() 49 | 50 | # Add triplets to lists of points 51 | min_function.add_point((x, gx, fx)) 52 | mirror_map.add_point((x, sx, hx)) 53 | 54 | # Return all 5 new elements 55 | return x, sx, hx, gx, fx 56 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/epsilon_subgradient_step.py: -------------------------------------------------------------------------------- 1 | from PEPit.point import Point 2 | from PEPit.expression import Expression 3 | 4 | 5 | def epsilon_subgradient_step(x0, f, gamma): 6 | """ 7 | This routine performs a step :math:`x \\leftarrow x_0 - \\gamma g_0` 8 | where :math:`g_0 \\in\\partial_{\\varepsilon} f(x_0)`. That is, :math:`g_0` is an 9 | :math:`\\varepsilon`-subgradient of :math:`f` at :math:`x_0`. The set :math:`\\partial_{\\varepsilon} f(x_0)` 10 | (referred to as the :math:`\\varepsilon`-subdifferential) is defined as (see [1, Section 3]) 11 | 12 | .. math:: \\partial_{\\varepsilon} f(x_0)=\\left\\{g_0:\,\\forall z,\, f(z)\\geqslant f(x_0)+\\left< g_0;\, z-x_0 \\right>-\\varepsilon \\right\\}. 13 | 14 | An alternative characterization of :math:`g_0 \\in\\partial_{\\varepsilon} f(x_0)` consists in writing 15 | 16 | .. math:: f(x_0)+f^*(g_0)-\\left< g_0;x_0\\right>\\leqslant \\varepsilon. 17 | 18 | References: 19 | `[1] A. Brøndsted, R.T. Rockafellar. 20 | On the subdifferentiability of convex functions. 21 | Proceedings of the American Mathematical Society 16(4), 605–611 (1965) 22 | `_ 23 | 24 | Args: 25 | x0 (Point): starting point x0. 26 | f (Function): a function. 27 | gamma (float): the step size parameter. 28 | 29 | Returns: 30 | x (Point): the output point. 31 | g0 (Point): an :math:`\\varepsilon`-subgradient of f at x0. 32 | f0 (Expression): the value of the function f at x0. 33 | epsilon (Expression): the value of epsilon. 34 | 35 | """ 36 | 37 | g0 = Point() 38 | f0 = f.value(x0) 39 | epsilon = Expression() 40 | 41 | x = x0 - gamma * g0 42 | 43 | # f^*(g0) = -f(y) for some y 44 | y = Point() 45 | fy = Expression() 46 | f.add_point((y, g0, fy)) 47 | fstarg0 = g0 * y - fy 48 | 49 | # epsilon-subgradient condition: 50 | constraint = (f0 + fstarg0 - g0 * x0 <= epsilon) 51 | constraint.set_name("epsilon_subgradient({})_on_{}".format(f.get_name(), x0.get_name())) 52 | f.add_constraint(constraint) 53 | 54 | # Return the newly obtained point, the epsilon-subgradient, the value of f in x0, and epsilon. 55 | return x, g0, f0, epsilon 56 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/exact_linesearch_step.py: -------------------------------------------------------------------------------- 1 | from PEPit.point import Point 2 | 3 | 4 | def exact_linesearch_step(x0, f, directions): 5 | """ 6 | This routine outputs some :math:`x` by *mimicking* an exact line/span search in specified directions. 7 | It is used for instance in ``PEPit.examples.unconstrained_convex_minimization.wc_gradient_exact_line_search`` 8 | and in ``PEPit.examples.unconstrained_convex_minimization.wc_conjugate_gradient``. 9 | 10 | The routine aims at mimicking the operation: 11 | 12 | .. math:: 13 | :nowrap: 14 | 15 | \\begin{eqnarray} 16 | x & = & x_0 - \\sum_{i=1}^{T} \\gamma_i d_i,\\\\ 17 | \\text{with } \\overrightarrow{\\gamma} & = & \\arg\\min_\\overrightarrow{\\gamma} f\\left(x_0 - \\sum_{i=1}^{T} \\gamma_i d_i\\right), 18 | \\end{eqnarray} 19 | 20 | where :math:`T` denotes the number of directions :math:`d_i`. This operation can equivalently be described 21 | in terms of the following conditions: 22 | 23 | .. math:: 24 | :nowrap: 25 | 26 | \\begin{eqnarray} 27 | x - x_0 & \\in & \\text{span}\\left\{d_1,\\ldots,d_T\\right\}, \\\\ 28 | \\nabla f(x) & \\perp & \\text{span}\\left\{d_1,\\ldots,d_T\\right\}. 29 | \\end{eqnarray} 30 | 31 | In this routine, we instead constrain :math:`x_{t}` and :math:`\\nabla f(x_{t})` to satisfy 32 | 33 | .. math:: 34 | :nowrap: 35 | 36 | \\begin{eqnarray} 37 | \\forall i=1,\\ldots,T: & \\left< \\nabla f(x);\, d_i \\right> & = & 0,\\\\ 38 | \\text{and } & \\left< \\nabla f(x);\, x - x_0 \\right> & = & 0, 39 | \\end{eqnarray} 40 | 41 | which is a relaxation of the true line/span search conditions. 42 | 43 | Note: 44 | The latest condition is automatically implied by the 2 previous ones. 45 | 46 | Warning: 47 | One can notice this routine does not encode completely the fact that 48 | :math:`x_{t+1} - x_t` must be a linear combination of the provided directions 49 | (i.e., this routine performs a relaxation). Therefore, if this routine is included in a PEP, 50 | the obtained value might be an upper bound on the true worst-case value. 51 | 52 | Although not always tight, this relaxation is often observed to deliver pretty accurate results 53 | (in particular, it automatically produces tight results under some specific conditions, see, e.g., [1]). 54 | Two such examples are provided in the `conjugate gradient` and `gradient with exact line search` example files. 55 | 56 | References: 57 | `[1] Y. Drori and A. Taylor (2020). 58 | Efficient first-order methods for convex minimization: a constructive approach. 59 | Mathematical Programming 184 (1), 183-220. 60 | `_ 61 | 62 | Args: 63 | x0 (Point): the starting point. 64 | f (Function): the function on which the (sub)gradient will be evaluated. 65 | directions (List of Points): the list of all directions required to be orthogonal to the (sub)gradient of x. 66 | 67 | Returns: 68 | x (Point): such that all vectors in directions are orthogonal to the (sub)gradient of f at x. 69 | gx (Point): a (sub)gradient of f at x. 70 | fx (Expression): the function f evaluated at x. 71 | 72 | """ 73 | 74 | # Instantiate a Point 75 | x = Point() 76 | 77 | # Define gradient and function value of f on x 78 | gx, fx = f.oracle(x) 79 | 80 | # Add constraints 81 | constraint = ((x - x0) * gx == 0) 82 | constraint.set_name("exact_linesearch({})_on_{}".format(f.get_name(), x0.get_name())) 83 | f.add_constraint(constraint) 84 | for d in directions: 85 | constraint = (d * gx == 0) 86 | constraint.set_name("exact_linesearch({})_on_{}_in_direction_{}".format(f.get_name(), x0.get_name(), d.get_name())) 87 | f.add_constraint(constraint) 88 | 89 | # Return triplet of points 90 | return x, gx, fx 91 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/inexact_gradient_step.py: -------------------------------------------------------------------------------- 1 | from PEPit.point import Point 2 | 3 | 4 | def inexact_gradient_step(x0, f, gamma, epsilon, notion='absolute'): 5 | """ 6 | This routine performs a step :math:`x \\leftarrow x_0 - \\gamma d_{x_0}` 7 | where :math:`d_{x_0}` is close to the gradient of :math:`f` in :math:`x_0` 8 | in the following sense: 9 | 10 | .. math:: \\|d_{x_0} - \\nabla f(x_0)\\|^2 \\leqslant \\left\{ 11 | \\begin{eqnarray} 12 | & \\varepsilon^2 & \\text{if notion is set to 'absolute'}, \\\\ 13 | & \\varepsilon^2 \\|\\nabla f(x_0)\\|^2 & \\text{if notion is set to 'relative'}. 14 | \\end{eqnarray} 15 | \\right. 16 | 17 | This relative approximation is used at least in 3 PEPit examples, 18 | in particular in 2 unconstrained convex minimizations: 19 | an inexact gradient descent, and an inexact accelerated gradient. 20 | 21 | References: 22 | `[1] E. De Klerk, F. Glineur, A. Taylor (2020). 23 | Worst-case convergence analysis of inexact gradient and Newton methods 24 | through semidefinite programming performance estimation. 25 | SIAM Journal on Optimization, 30(3), 2053-2082. 26 | `_ 27 | 28 | Args: 29 | x0 (Point): starting point x0. 30 | f (Function): a function. 31 | gamma (float): the step size parameter. 32 | epsilon (float): the required accuracy. 33 | notion (string): defines the mode (absolute or relative inaccuracy). By default, notion='absolute'. 34 | 35 | Returns: 36 | x (Point): the output point. 37 | dx0 (Point): the approximate (sub)gradient of f at x0. 38 | fx0 (Expression): the value of the function f at x0. 39 | 40 | Raises: 41 | ValueError: if notion is not set in ['absolute', 'relative']. 42 | 43 | Note: 44 | When :math:`\\gamma` is set to 0, then this routine returns 45 | :math:`x_0`, :math:`d_{x_0}`, and :math:`f_{x_0}`. 46 | It is used as is in the example of unconstrained convex minimization scheme called 47 | "inexact gradient exact line search" only to access to the direction :math:`d_{x_0}` 48 | close to the gradient :math:`g_{x_0}`. 49 | 50 | """ 51 | 52 | # Get the gradient gx0 and function value fx0 of f in x0. 53 | gx0, fx0 = f.oracle(x0) 54 | 55 | # Define dx0 as a proxy to gx0. 56 | dx0 = Point() 57 | if notion == 'absolute': 58 | constraint = ((gx0 - dx0) ** 2 - epsilon ** 2 <= 0) 59 | elif notion == 'relative': 60 | constraint = ((gx0 - dx0) ** 2 - epsilon ** 2 * (gx0 ** 2) <= 0) 61 | else: 62 | raise ValueError("inexact_gradient_step supports only notion in ['absolute', 'relative']," 63 | " got {}".format(notion)) 64 | 65 | # Add constraint to list of constraints. 66 | constraint.set_name("inexact_gradient_step({})_on_{}".format(f.get_name(), x0.get_name())) 67 | f.add_constraint(constraint) 68 | 69 | # Perform an inexact gradient step in the direction dx0. 70 | x = x0 - gamma * dx0 71 | 72 | # Return the newly obtained point, the direction of descent and the value of f in x0. 73 | return x, dx0, fx0 74 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/linear_optimization_step.py: -------------------------------------------------------------------------------- 1 | from PEPit.point import Point 2 | from PEPit.expression import Expression 3 | 4 | 5 | def linear_optimization_step(dir, ind): 6 | """ 7 | This routine outputs the result of a minimization problem with linear objective (whose direction 8 | is provided by `dir`) on the domain of the (closed convex) indicator function `ind`. 9 | That is, it outputs a solution to 10 | 11 | .. math:: \\arg\\min_{\\text{ind}(x)=0} \\left< \\text{dir};\, x \\right>, 12 | 13 | One can notice that :math:`x` is solution of this problem if and only if 14 | 15 | .. math:: - \\text{dir} \\in \\partial \\text{ind}(x). 16 | 17 | References: 18 | [1] M .Frank, P. Wolfe (1956). 19 | An algorithm for quadratic programming. 20 | Naval research logistics quarterly, 3(1-2), 95-110. 21 | 22 | Args: 23 | dir (Point): direction of optimization 24 | ind (ConvexIndicatorFunction): convex indicator function 25 | 26 | Returns: 27 | x (Point): the optimal point. 28 | gx (Point): the (sub)gradient of ind on x. 29 | fx (Expression): the function value of ind on x. 30 | 31 | """ 32 | 33 | # Define triplet x, gradient, function value. 34 | x = Point() 35 | gx = - dir 36 | fx = Expression() 37 | 38 | # Store it in ind list of points. 39 | ind.add_point((x, gx, fx)) 40 | 41 | # Return triplet x, gradient, function value. 42 | return x, gx, fx 43 | -------------------------------------------------------------------------------- /PEPit/primitive_steps/proximal_step.py: -------------------------------------------------------------------------------- 1 | from PEPit.point import Point 2 | from PEPit.expression import Expression 3 | 4 | 5 | def proximal_step(x0, f, gamma): 6 | """ 7 | This routine performs a proximal step of step-size **gamma**, starting from **x0**, and on function **f**. 8 | That is, it performs: 9 | 10 | .. math:: 11 | :nowrap: 12 | 13 | \\begin{eqnarray} 14 | x \\triangleq \\text{prox}_{\\gamma f}(x_0) & \\triangleq & \\arg\\min_x \\left\\{ \\gamma f(x) + \\frac{1}{2} \\|x - x_0\\|^2 \\right\\}, \\\\ 15 | & \\Updownarrow & \\\\ 16 | 0 & = & \\gamma g_x + x - x_0 \\text{ for some } g_x\\in\\partial f(x),\\\\ 17 | & \\Updownarrow & \\\\ 18 | x & = & x_0 - \\gamma g_x \\text{ for some } g_x\\in\\partial f(x). 19 | \\end{eqnarray} 20 | 21 | Args: 22 | x0 (Point): starting point x0. 23 | f (Function): function on which the proximal step is computed. 24 | gamma (float): step-size of the proximal step. 25 | 26 | Returns: 27 | x (Point): proximal point. 28 | gx (Point): the (sub)gradient of f at x. 29 | fx (Expression): the function value of f on x. 30 | 31 | """ 32 | 33 | # Define gradient and function value on x. 34 | gx = Point() 35 | fx = Expression() 36 | 37 | # Compute x from the docstring equation. 38 | x = x0 - gamma * gx 39 | 40 | # Add point to Function f. 41 | f.add_point((x, gx, fx)) 42 | 43 | return x, gx, fx 44 | -------------------------------------------------------------------------------- /PEPit/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PerformanceEstimation/PEPit/8d6d9f5e59d8e64b863cc29a62b1180a86a2c9ec/PEPit/tools/__init__.py -------------------------------------------------------------------------------- /PEPit/tools/dict_operations.py: -------------------------------------------------------------------------------- 1 | def merge_dict(dict1, dict2): 2 | """ 3 | Merge keys of dict1 and dict2. 4 | If a key is in the 2 dictionaries, then add the values. 5 | 6 | Args: 7 | dict1 (dict): any dictionary 8 | dict2 (dict): any dictionary 9 | 10 | Returns: 11 | (dict): the union of the 2 inputs with added values. 12 | 13 | """ 14 | 15 | # Start from dict1 16 | merged_dict = dict1.copy() 17 | 18 | # Add all key of dict2 to dict1 19 | for key in dict2.keys(): 20 | 21 | # If in both, the values are added 22 | if key in dict1.keys(): 23 | 24 | merged_dict[key] += dict2[key] 25 | 26 | # Otherwise, just add the new key and value 27 | else: 28 | 29 | merged_dict[key] = dict2[key] 30 | 31 | # Return the merged dict 32 | return merged_dict 33 | 34 | 35 | def prune_dict(my_dict): 36 | """ 37 | Remove all keys associated to a null value. 38 | 39 | Args: 40 | my_dict (dict): any dictionary 41 | 42 | Returns: 43 | (dict): pruned dictionary 44 | 45 | """ 46 | 47 | # Start from empty dict 48 | pruned_dict = dict() 49 | 50 | # Add all entry of dict1 that does not have a null value 51 | for key in my_dict.keys(): 52 | 53 | if my_dict[key] != 0: 54 | 55 | pruned_dict[key] = my_dict[key] 56 | 57 | # Return pruned dict 58 | return pruned_dict 59 | 60 | 61 | def multiply_dicts(dict1, dict2): 62 | """ 63 | Multiply 2 dictionaries in the sense of developing a product of 2 sums. 64 | 65 | Args: 66 | dict1 (dict): any dictionary 67 | dict2 (dict): any dictionary 68 | 69 | Returns: 70 | (dict): the keys are the couple of keys of dict1 and dict2 71 | and the values the product of values of dict1 and dict2. 72 | 73 | """ 74 | 75 | # Start from empty dict 76 | product_dict = dict() 77 | 78 | # Complete the dict 79 | for key1 in dict1.keys(): 80 | for key2 in dict2.keys(): 81 | product_key = (key1, key2) 82 | product_value = dict1[key1] * dict2[key2] 83 | 84 | if product_key in product_dict.keys(): 85 | product_dict[product_key] += product_value 86 | else: 87 | product_dict[product_key] = product_value 88 | 89 | # Return the product dict 90 | return product_dict 91 | 92 | 93 | def symmetrize_dict(my_dict): 94 | """ 95 | Symmetrize the keys of a dictionary. 96 | Each entry which key is a tuple is replaced by two entries: 97 | 98 | - one with the same key and half the original value, 99 | - the other one with reversed key and half the original value as well. 100 | 101 | Args: 102 | my_dict (dict): any dictionary 103 | 104 | Returns: 105 | (dict): the keys are the ones of my_dict and the reversed tuples for the tuple ones. 106 | the values are half the original ones for entries with symmetries tuples keys, 107 | and the original ones for the others. 108 | 109 | """ 110 | 111 | reversed_dict = dict() 112 | for key, value in my_dict.items(): 113 | if isinstance(key, tuple): 114 | reversed_dict[key[::-1]] = value 115 | else: 116 | reversed_dict[key] = value 117 | 118 | symmetric_dict = merge_dict(my_dict, reversed_dict) 119 | final_dict = {key: value/2 for key, value in symmetric_dict.items()} 120 | return final_dict 121 | -------------------------------------------------------------------------------- /PEPit/wrappers/__init__.py: -------------------------------------------------------------------------------- 1 | from .cvxpy_wrapper import CvxpyWrapper 2 | from .mosek_wrapper import MosekWrapper 3 | 4 | # Define a dictionary of wrapper. 5 | # By convention, the keys must be written with lower cases. 6 | WRAPPERS = { 7 | "cvxpy": CvxpyWrapper, 8 | "mosek": MosekWrapper, 9 | } 10 | 11 | __all__ = ['cvxpy_wrapper', 'CvxpyWrapper', 12 | 'mosek_wrapper', 'MosekWrapper', 13 | 'WRAPPERS', 14 | ] 15 | -------------------------------------------------------------------------------- /codecov.yaml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "tests/test_*” 3 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-autodocgen 3 | sphinx-rtd-theme 4 | myst-parser 5 | easydev 6 | -r ../requirements.txt 7 | -------------------------------------------------------------------------------- /docs/source/api.rst: -------------------------------------------------------------------------------- 1 | API and modules 2 | =============== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | api/main_modules 9 | api/functions_and_operators 10 | api/steps 11 | api/tools 12 | api/wrappers 13 | -------------------------------------------------------------------------------- /docs/source/api/functions.rst: -------------------------------------------------------------------------------- 1 | Functions 2 | ========= 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | Convex 9 | ------ 10 | .. autoclass:: PEPit.functions.ConvexFunction 11 | :members: 12 | :show-inheritance: 13 | 14 | Strongly convex 15 | --------------- 16 | .. autoclass:: PEPit.functions.StronglyConvexFunction 17 | :members: 18 | :show-inheritance: 19 | 20 | Smooth 21 | ------ 22 | .. autoclass:: PEPit.functions.SmoothFunction 23 | :members: 24 | :show-inheritance: 25 | 26 | Convex and smooth 27 | ----------------- 28 | .. autoclass:: PEPit.functions.SmoothConvexFunction 29 | :members: 30 | :show-inheritance: 31 | 32 | Convex and quadratically upper bounded 33 | -------------------------------------- 34 | .. autoclass:: PEPit.functions.ConvexQGFunction 35 | :members: 36 | :show-inheritance: 37 | 38 | Strongly convex and smooth 39 | -------------------------- 40 | .. autoclass:: PEPit.functions.SmoothStronglyConvexFunction 41 | :members: 42 | :show-inheritance: 43 | 44 | Convex and Lipschitz continuous 45 | ------------------------------- 46 | .. autoclass:: PEPit.functions.ConvexLipschitzFunction 47 | :members: 48 | :show-inheritance: 49 | 50 | Smooth convex and Lipschitz continuous 51 | -------------------------------------- 52 | .. autoclass:: PEPit.functions.SmoothConvexLipschitzFunction 53 | :members: 54 | :show-inheritance: 55 | 56 | Convex indicator 57 | ---------------- 58 | .. autoclass:: PEPit.functions.ConvexIndicatorFunction 59 | :members: 60 | :show-inheritance: 61 | 62 | Convex support functions 63 | ------------------------ 64 | .. autoclass:: PEPit.functions.ConvexSupportFunction 65 | :members: 66 | :show-inheritance: 67 | 68 | Restricted secant inequality and error bound 69 | -------------------------------------------- 70 | .. autoclass:: PEPit.functions.RsiEbFunction 71 | :members: 72 | :show-inheritance: 73 | 74 | Convex and smooth by block 75 | -------------------------- 76 | .. autoclass:: PEPit.functions.BlockSmoothConvexFunction 77 | :members: 78 | :show-inheritance: 79 | 80 | Strongly convex and smooth quadratic 81 | ------------------------------------ 82 | .. autoclass:: PEPit.functions.SmoothStronglyConvexQuadraticFunction 83 | :members: 84 | :show-inheritance: 85 | -------------------------------------------------------------------------------- /docs/source/api/functions_and_operators.rst: -------------------------------------------------------------------------------- 1 | Functions classes 2 | ================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | functions 9 | operators 10 | -------------------------------------------------------------------------------- /docs/source/api/main_modules.rst: -------------------------------------------------------------------------------- 1 | Main modules 2 | ============ 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | PEP 10 | --- 11 | .. autoclass:: PEPit.PEP 12 | :members: 13 | :show-inheritance: 14 | 15 | 16 | Point 17 | ----- 18 | .. autoclass:: PEPit.Point 19 | :members: 20 | :show-inheritance: 21 | 22 | 23 | Block partition 24 | --------------- 25 | .. autoclass:: PEPit.BlockPartition 26 | :members: 27 | :show-inheritance: 28 | 29 | 30 | Expression 31 | ---------- 32 | .. autoclass:: PEPit.Expression 33 | :members: 34 | :show-inheritance: 35 | 36 | 37 | Constraint 38 | ---------- 39 | .. autoclass:: PEPit.Constraint 40 | :members: 41 | :show-inheritance: 42 | 43 | 44 | Symmetric positive semi-definite matrix 45 | --------------------------------------- 46 | .. autoclass:: PEPit.PSDMatrix 47 | :members: 48 | :show-inheritance: 49 | 50 | 51 | Function 52 | -------- 53 | .. autoclass:: PEPit.Function 54 | :members: 55 | :show-inheritance: 56 | 57 | 58 | Wrapper 59 | ------- 60 | .. autoclass:: PEPit.Wrapper 61 | :members: 62 | :show-inheritance: 63 | -------------------------------------------------------------------------------- /docs/source/api/operators.rst: -------------------------------------------------------------------------------- 1 | Operators 2 | ========= 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | Monotone 9 | -------- 10 | .. autoclass:: PEPit.operators.MonotoneOperator 11 | :members: 12 | :show-inheritance: 13 | 14 | Strongly monotone 15 | ----------------- 16 | .. autoclass:: PEPit.operators.StronglyMonotoneOperator 17 | :members: 18 | :show-inheritance: 19 | 20 | Lipschitz continuous 21 | -------------------- 22 | .. autoclass:: PEPit.operators.LipschitzOperator 23 | :members: 24 | :show-inheritance: 25 | 26 | Nonexpansive 27 | ------------ 28 | .. autoclass:: PEPit.operators.NonexpansiveOperator 29 | :members: 30 | :show-inheritance: 31 | 32 | Strongly monotone and Lipschitz continuous 33 | ------------------------------------------ 34 | .. autoclass:: PEPit.operators.LipschitzStronglyMonotoneOperator 35 | :members: 36 | :show-inheritance: 37 | 38 | Cocoercive 39 | ---------- 40 | .. autoclass:: PEPit.operators.CocoerciveOperator 41 | :members: 42 | :show-inheritance: 43 | 44 | Cocoercive and strongly monotone 45 | -------------------------------- 46 | .. autoclass:: PEPit.operators.CocoerciveStronglyMonotoneOperator 47 | :members: 48 | :show-inheritance: 49 | 50 | Negatively comonotone 51 | --------------------- 52 | .. autoclass:: PEPit.operators.NegativelyComonotoneOperator 53 | :members: 54 | :show-inheritance: 55 | 56 | Linear Operator 57 | --------------- 58 | .. autoclass:: PEPit.operators.LinearOperator 59 | :members: 60 | :show-inheritance: 61 | 62 | Skew Symmetric Linear Operator 63 | ------------------------------ 64 | .. autoclass:: PEPit.operators.SkewSymmetricLinearOperator 65 | :members: 66 | :show-inheritance: 67 | 68 | Symmetric Linear Operator 69 | ------------------------- 70 | .. autoclass:: PEPit.operators.SymmetricLinearOperator 71 | :members: 72 | :show-inheritance: 73 | -------------------------------------------------------------------------------- /docs/source/api/steps.rst: -------------------------------------------------------------------------------- 1 | Primitive steps 2 | =============== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | Inexact gradient step 9 | --------------------- 10 | .. autofunction:: PEPit.primitive_steps.inexact_gradient_step 11 | 12 | Exact line-search step 13 | ---------------------- 14 | .. autofunction:: PEPit.primitive_steps.exact_linesearch_step 15 | 16 | Proximal step 17 | ------------- 18 | .. autofunction:: PEPit.primitive_steps.proximal_step 19 | 20 | Inexact proximal step 21 | --------------------- 22 | .. autofunction:: PEPit.primitive_steps.inexact_proximal_step 23 | 24 | Bregman gradient step 25 | --------------------- 26 | .. autofunction:: PEPit.primitive_steps.bregman_gradient_step 27 | 28 | Bregman proximal step 29 | --------------------- 30 | .. autofunction:: PEPit.primitive_steps.bregman_proximal_step 31 | 32 | Linear optimization step 33 | ------------------------ 34 | .. autofunction:: PEPit.primitive_steps.linear_optimization_step 35 | 36 | Epsilon-subgradient step 37 | ------------------------ 38 | .. autofunction:: PEPit.primitive_steps.epsilon_subgradient_step 39 | -------------------------------------------------------------------------------- /docs/source/api/tools.rst: -------------------------------------------------------------------------------- 1 | Tools 2 | ===== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Merge two dictionaries 10 | ---------------------- 11 | .. autofunction:: PEPit.tools.dict_operations.merge_dict 12 | 13 | 14 | Multiply two dictionaries 15 | ------------------------- 16 | .. autofunction:: PEPit.tools.dict_operations.multiply_dicts 17 | 18 | 19 | Prune a dictionary 20 | ------------------ 21 | .. autofunction:: PEPit.tools.dict_operations.prune_dict 22 | 23 | 24 | Symmetrize a dictionary 25 | ----------------------- 26 | .. autofunction:: PEPit.tools.dict_operations.symmetrize_dict 27 | 28 | 29 | Expression to matrices 30 | ---------------------- 31 | .. autofunction:: PEPit.tools.expressions_to_matrices.expression_to_matrices 32 | 33 | 34 | Expression to sparse matrices 35 | ----------------------------- 36 | .. autofunction:: PEPit.tools.expressions_to_matrices.expression_to_sparse_matrices 37 | -------------------------------------------------------------------------------- /docs/source/api/wrappers.rst: -------------------------------------------------------------------------------- 1 | Wrappers 2 | ======== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | CVXPY 10 | ----- 11 | .. autoclass:: PEPit.wrappers.CvxpyWrapper 12 | :members: 13 | :show-inheritance: 14 | 15 | 16 | MOSEK 17 | ----- 18 | .. autoclass:: PEPit.wrappers.MosekWrapper 19 | :members: 20 | :show-inheritance: 21 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | # import os 14 | import os 15 | import sys 16 | 17 | # The module you're documenting (assumes you've added the project root dir to sys.path) 18 | sys.path.insert(0, os.path.abspath('../..')) 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = 'PEPit' 23 | copyright = '2021, PEPit Contributors' 24 | author = 'PEPit Contributors' 25 | 26 | # The full version, including alpha/beta/rc tags 27 | release = '0.0.1' 28 | 29 | # -- General configuration --------------------------------------------------- 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | # 'easydev.copybutton', 36 | 'sphinx.ext.autodoc', 37 | 'sphinx.ext.doctest', 38 | 'sphinx.ext.intersphinx', 39 | 'sphinx.ext.todo', 40 | 'sphinx.ext.coverage', 41 | 'sphinx.ext.mathjax', 42 | 'sphinx.ext.ifconfig', 43 | 'sphinx.ext.viewcode', 44 | 'sphinx.ext.napoleon', 45 | 'sphinx.ext.autosummary', 46 | # 'sphinxcontrib_autodocgen', 47 | 'myst_parser', 48 | ] 49 | 50 | napoleon_custom_sections = [('Returns', 'params_style'), 51 | ('Attributes', 'params_style')] 52 | 53 | import PEPit 54 | 55 | autodocgen_config = [{ 56 | 'modules': [PEPit], 57 | 'generated_source_dir': './autodocgen-output/', 58 | 59 | # if module matches this then it and any of its submodules will be skipped 60 | 'skip_module_regex': '(.*[.]__|myskippedmodule)', 61 | 62 | # produce a text file containing a list of everything documented. you can use this in a test to notice 63 | # when you've intentionally added/removed/changed a documented API 64 | 'write_documented_items_output_file': 'autodocgen_documented_items.txt', 65 | 66 | # customize autodoc on a per-module basis 67 | 'autodoc_options_decider': { 68 | 'mymodule.FooBar': {'inherited-members': True}, 69 | }, 70 | 71 | # choose a different title for specific modules, e.g. the toplevel one 72 | 'module_title_decider': lambda modulename: 'API Reference' if modulename == 'mymodule' else modulename, 73 | }] 74 | 75 | autoclass_content = 'both' 76 | 77 | # Include or not the special methods 78 | napoleon_include_special_with_doc = False 79 | 80 | # Add any paths that contain templates here, relative to this directory. 81 | templates_path = ['_templates'] 82 | 83 | # List of patterns, relative to source directory, that match files and 84 | # directories to ignore when looking for source files. 85 | # This pattern also affects html_static_path and html_extra_path. 86 | exclude_patterns = [] 87 | 88 | # -- Options for HTML output ------------------------------------------------- 89 | 90 | # The theme to use for HTML and HTML Help pages. See the documentation for 91 | # a list of builtin themes. 92 | # 93 | html_theme = 'sphinx_rtd_theme' 94 | 95 | # # Make the copy paste possible for any example code in documentation 96 | # import easydev 97 | # 98 | # jscopybutton_path = easydev.copybutton.get_copybutton_path() 99 | # 100 | # # if not os.path.isdir('_static'): 101 | # # os.mkdir('_static') 102 | # 103 | # import shutil 104 | # 105 | # shutil.copy(jscopybutton_path, '_static') 106 | 107 | # Add any paths that contain custom static files (such as style sheets) here, 108 | # relative to this directory. They are copied after the builtin static files, 109 | # so a file named "default.css" will overwrite the builtin "default.css". 110 | html_static_path = ['_static'] 111 | -------------------------------------------------------------------------------- /docs/source/contributing.rst: -------------------------------------------------------------------------------- 1 | Contributing 2 | ============ 3 | 4 | PEPit is designed for allowing users to easily contribute to add new features to the package. 5 | Classes of functions (or operators) as well as black-box oracles can be implemented 6 | by following the canvas from respectively 7 | `PEPit/functions/ 8 | `_ 9 | (or `PEPit/operators/ 10 | `_ 11 | and `PEPit/primitive_steps/ 12 | `_). 13 | 14 | We encourage authors of research papers presenting novel optimization methods 15 | and/or a novel convergence results to submit the corresponding PEPit files in the directory 16 | `PEPit/examples/ 17 | `_. 18 | 19 | .. contents:: 20 | :depth: 1 21 | :local: 22 | 23 | General guidelines 24 | ------------------ 25 | 26 | We kindly ask you follow common guidelines, namely that the provided code: 27 | 28 | - sticks as much as possible to the PEP8 convention. 29 | 30 | - is commented with Google style docstring. 31 | 32 | - is well covered by tests. 33 | 34 | - is aligned with the documentation. 35 | 36 | - is also mentioned in the ``whatsnew`` section of the documentation. 37 | 38 | Adding a new function or operator class 39 | --------------------------------------- 40 | 41 | To add a new function / operator class, 42 | please follow the format used for the other function / operator classes. 43 | 44 | In particular: 45 | 46 | - your class must inherit from the class ``Function`` and overwrite its ``add_class_constraints`` method. 47 | 48 | - the docstring must be complete. 49 | In particular, it must contains the list of attributes and arguments 50 | as well as an example of usage via the ``declare_function`` method of the class ``PEP``. 51 | It must also contain a clickable reference to the paper introducing it. 52 | 53 | Adding a step / an oracle 54 | ------------------------- 55 | 56 | To add a new oracle / step, 57 | please add a new file containing the oracle function in ``PEPit/primitive_steps``. 58 | 59 | Remark that transforming the mathematical formulation of an oracle into its PEP equivalent 60 | may require additional tricks, 61 | see e.g. `PEPit/primitive_steps/proximal_step.py 62 | `_, 63 | or `PEPit/primitive_steps/linear_optimization_step.py 64 | `_. 65 | 66 | Please make sure that your docstring contains the mathematical derivation of the latest from the previous. 67 | 68 | Adding a new method as an example 69 | --------------------------------- 70 | 71 | We don't require a specific code format for a new example. 72 | However, we ask the associated docstring to be precisely organized as follow: 73 | 74 | - Define Problem solved (introducing function notations and assumptions). 75 | 76 | - Name method in boldface formatting. 77 | 78 | - Introduce performance metric, initial condition and parameters (``performance_metric < tau(parameters) initialization``). 79 | 80 | - Describe method main step and cite reference with specified algorithm. 81 | 82 | - Provide theoretical result (``Upper/Lower/Tight`` in boldface formatting + ``performance_metric < theoretical_bound initialization``). 83 | 84 | - Reference block containing relevant clickable references (preferably to arxiv with specified version of the paper) in the format: 85 | (``First name initial letter``, ``last name`` (``YEAR``). ``Title``. ``Journal or conference`` (``Acronym of journal or conference``). 86 | 87 | - Args block containing parameters with their type and short description. 88 | 89 | - Returns block containing ``pepit_tau`` and ``theoretical_tau``. 90 | 91 | - Example block containing a minimal work example of the coded function. 92 | 93 | We provide, in ``PEPit/examples/example_template.py``, a template that can be filled very quickly 94 | to help the contributor to share their method easily. 95 | 96 | New example template 97 | ^^^^^^^^^^^^^^^^^^^^ 98 | 99 | .. autofunction:: PEPit.examples.example_template.wc_example_template 100 | 101 | New example test template 102 | ^^^^^^^^^^^^^^^^^^^^^^^^^ 103 | 104 | .. code-block:: python 105 | 106 | def test_[NAME_METHOD](self): 107 | PARAMS = PARAMS 108 | 109 | wc, theory = wc_[NAME_METHOD](PARAMS=PARAMS, verbose=self.verbose) 110 | 111 | # If theoretical upper bound is tight 112 | self.assertAlmostEqual(theory, wc, delta=self.relative_precision * theory) 113 | 114 | # If theoretical upper bound is not tight 115 | self.assertLessEqual(wc, theory * (1 + self.relative_precision)) 116 | 117 | # If theoretical lower bound is not tight 118 | self.assertLessEqual(theory, wc * (1 + self.relative_precision)) 119 | -------------------------------------------------------------------------------- /docs/source/examples.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | .. toctree:: 5 | :numbered: 6 | :maxdepth: 2 7 | :caption: Contents: 8 | 9 | examples/a 10 | examples/b 11 | examples/c 12 | examples/d 13 | examples/e 14 | examples/f 15 | examples/g 16 | examples/h 17 | examples/i 18 | examples/j 19 | examples/k 20 | examples/l 21 | -------------------------------------------------------------------------------- /docs/source/examples/b.rst: -------------------------------------------------------------------------------- 1 | Composite convex minimization 2 | ============================= 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Proximal gradient 10 | ----------------- 11 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_proximal_gradient 12 | 13 | 14 | Proximal gradient on quadratics 15 | ------------------------------- 16 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_proximal_gradient_quadratics 17 | 18 | 19 | Accelerated proximal gradient 20 | ----------------------------- 21 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_accelerated_proximal_gradient 22 | 23 | 24 | Bregman proximal point 25 | ----------------------- 26 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_bregman_proximal_point 27 | 28 | 29 | Douglas Rachford splitting 30 | -------------------------- 31 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_douglas_rachford_splitting 32 | 33 | 34 | Douglas Rachford splitting contraction 35 | -------------------------------------- 36 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_douglas_rachford_splitting_contraction 37 | 38 | 39 | Accelerated Douglas Rachford splitting 40 | -------------------------------------- 41 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_accelerated_douglas_rachford_splitting 42 | 43 | 44 | Frank Wolfe 45 | ----------- 46 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_frank_wolfe 47 | 48 | 49 | Improved interior method 50 | ------------------------ 51 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_improved_interior_algorithm 52 | 53 | 54 | No Lips in function value 55 | ------------------------- 56 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_no_lips_in_function_value 57 | 58 | 59 | No Lips in Bregman divergence 60 | ----------------------------- 61 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_no_lips_in_bregman_divergence 62 | 63 | 64 | Three operator splitting 65 | ------------------------ 66 | .. autofunction:: PEPit.examples.composite_convex_minimization.wc_three_operator_splitting 67 | -------------------------------------------------------------------------------- /docs/source/examples/c.rst: -------------------------------------------------------------------------------- 1 | Non-convex optimization 2 | ======================= 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Gradient Descent 10 | ---------------- 11 | .. autofunction:: PEPit.examples.nonconvex_optimization.wc_gradient_descent 12 | 13 | 14 | No Lips 1 15 | --------- 16 | .. autofunction:: PEPit.examples.nonconvex_optimization.wc_no_lips_1 17 | 18 | 19 | No Lips 2 20 | --------- 21 | .. autofunction:: PEPit.examples.nonconvex_optimization.wc_no_lips_2 22 | -------------------------------------------------------------------------------- /docs/source/examples/d.rst: -------------------------------------------------------------------------------- 1 | Stochastic and randomized convex minimization 2 | ============================================= 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Stochastic gradient descent 10 | --------------------------- 11 | .. autofunction:: PEPit.examples.stochastic_and_randomized_convex_minimization.wc_sgd 12 | 13 | 14 | Stochastic gradient descent in overparametrized setting 15 | ------------------------------------------------------- 16 | .. autofunction:: PEPit.examples.stochastic_and_randomized_convex_minimization.wc_sgd_overparametrized 17 | 18 | 19 | SAGA 20 | ---- 21 | .. autofunction:: PEPit.examples.stochastic_and_randomized_convex_minimization.wc_saga 22 | 23 | 24 | Point SAGA 25 | ---------- 26 | .. autofunction:: PEPit.examples.stochastic_and_randomized_convex_minimization.wc_point_saga 27 | 28 | 29 | Randomized coordinate descent for smooth strongly convex functions 30 | ------------------------------------------------------------------ 31 | .. autofunction:: PEPit.examples.stochastic_and_randomized_convex_minimization.wc_randomized_coordinate_descent_smooth_strongly_convex 32 | 33 | 34 | Randomized coordinate descent for smooth convex functions 35 | --------------------------------------------------------- 36 | .. autofunction:: PEPit.examples.stochastic_and_randomized_convex_minimization.wc_randomized_coordinate_descent_smooth_convex 37 | -------------------------------------------------------------------------------- /docs/source/examples/e.rst: -------------------------------------------------------------------------------- 1 | Monotone inclusions and variational inequalities 2 | ================================================ 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Proximal point 10 | -------------- 11 | .. autofunction:: PEPit.examples.monotone_inclusions_variational_inequalities.wc_proximal_point 12 | 13 | 14 | Accelerated proximal point 15 | -------------------------- 16 | .. autofunction:: PEPit.examples.monotone_inclusions_variational_inequalities.wc_accelerated_proximal_point 17 | 18 | 19 | Optimal Strongly-monotone Proximal Point 20 | ---------------------------------------- 21 | .. autofunction:: PEPit.examples.monotone_inclusions_variational_inequalities.wc_optimal_strongly_monotone_proximal_point 22 | 23 | 24 | Douglas Rachford Splitting 25 | -------------------------- 26 | .. autofunction:: PEPit.examples.monotone_inclusions_variational_inequalities.wc_douglas_rachford_splitting 27 | 28 | 29 | Three operator splitting 30 | ------------------------ 31 | .. autofunction:: PEPit.examples.monotone_inclusions_variational_inequalities.wc_three_operator_splitting 32 | 33 | 34 | Optimistic gradient 35 | ------------------- 36 | .. autofunction:: PEPit.examples.monotone_inclusions_variational_inequalities.wc_optimistic_gradient 37 | 38 | 39 | Past extragradient 40 | ------------------ 41 | .. autofunction:: PEPit.examples.monotone_inclusions_variational_inequalities.wc_past_extragradient 42 | -------------------------------------------------------------------------------- /docs/source/examples/f.rst: -------------------------------------------------------------------------------- 1 | Fixed point 2 | =========== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Halpern iteration 10 | ----------------- 11 | .. autofunction:: PEPit.examples.fixed_point_problems.wc_halpern_iteration 12 | 13 | 14 | Optimal Contractive Halpern iteration 15 | ------------------------------------- 16 | .. autofunction:: PEPit.examples.fixed_point_problems.wc_optimal_contractive_halpern_iteration 17 | 18 | 19 | Krasnoselskii-Mann with constant step-sizes 20 | ------------------------------------------- 21 | .. autofunction:: PEPit.examples.fixed_point_problems.wc_krasnoselskii_mann_constant_step_sizes 22 | 23 | 24 | Krasnoselskii-Mann with increasing step-sizes 25 | --------------------------------------------- 26 | .. autofunction:: PEPit.examples.fixed_point_problems.wc_krasnoselskii_mann_increasing_step_sizes 27 | 28 | 29 | Inconsistent Halpern iteration 30 | ------------------------------ 31 | .. autofunction:: PEPit.examples.fixed_point_problems.wc_inconsistent_halpern_iteration 32 | -------------------------------------------------------------------------------- /docs/source/examples/g.rst: -------------------------------------------------------------------------------- 1 | Potential functions 2 | =================== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Gradient descent Lyapunov 1 10 | --------------------------- 11 | .. autofunction:: PEPit.examples.potential_functions.wc_gradient_descent_lyapunov_1 12 | 13 | 14 | Gradient descent Lyapunov 2 15 | --------------------------- 16 | .. autofunction:: PEPit.examples.potential_functions.wc_gradient_descent_lyapunov_2 17 | 18 | 19 | Accelerated gradient method 20 | --------------------------- 21 | .. autofunction:: PEPit.examples.potential_functions.wc_accelerated_gradient_method 22 | -------------------------------------------------------------------------------- /docs/source/examples/h.rst: -------------------------------------------------------------------------------- 1 | Inexact proximal methods 2 | ======================== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Accelerated inexact forward backward 10 | ------------------------------------ 11 | .. autofunction:: PEPit.examples.inexact_proximal_methods.wc_accelerated_inexact_forward_backward 12 | 13 | 14 | Partially inexact Douglas Rachford splitting 15 | -------------------------------------------- 16 | .. autofunction:: PEPit.examples.inexact_proximal_methods.wc_partially_inexact_douglas_rachford_splitting 17 | 18 | 19 | Relatively inexact proximal point 20 | --------------------------------- 21 | .. autofunction:: PEPit.examples.inexact_proximal_methods.wc_relatively_inexact_proximal_point_algorithm 22 | -------------------------------------------------------------------------------- /docs/source/examples/i.rst: -------------------------------------------------------------------------------- 1 | Adaptive methods 2 | ================ 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Polyak steps in distance to optimum 10 | ----------------------------------- 11 | .. autofunction:: PEPit.examples.adaptive_methods.wc_polyak_steps_in_distance_to_optimum 12 | 13 | 14 | Polyak steps in function value 15 | ------------------------------ 16 | .. autofunction:: PEPit.examples.adaptive_methods.wc_polyak_steps_in_function_value 17 | -------------------------------------------------------------------------------- /docs/source/examples/j.rst: -------------------------------------------------------------------------------- 1 | Low dimensional worst-cases scenarios 2 | ===================================== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Inexact gradient 10 | ---------------- 11 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_inexact_gradient 12 | 13 | 14 | Non-convex gradient descent 15 | --------------------------- 16 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_gradient_descent 17 | 18 | 19 | Optimized gradient 20 | ------------------ 21 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_optimized_gradient 22 | 23 | 24 | Frank Wolfe 25 | ----------- 26 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_frank_wolfe 27 | 28 | 29 | Proximal point 30 | -------------- 31 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_proximal_point 32 | 33 | 34 | Halpern iteration 35 | ----------------- 36 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_halpern_iteration 37 | 38 | 39 | Alternate projections 40 | --------------------- 41 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_alternate_projections 42 | 43 | 44 | Averaged projections 45 | -------------------- 46 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_averaged_projections 47 | 48 | 49 | Dykstra 50 | ------- 51 | .. autofunction:: PEPit.examples.low_dimensional_worst_cases_scenarios.wc_dykstra 52 | -------------------------------------------------------------------------------- /docs/source/examples/k.rst: -------------------------------------------------------------------------------- 1 | Continuous-time models 2 | ====================== 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Gradient flow for strongly convex functions 10 | ------------------------------------------- 11 | .. autofunction:: PEPit.examples.continuous_time_models.wc_gradient_flow_strongly_convex 12 | 13 | 14 | Gradient flow for convex functions 15 | ---------------------------------- 16 | .. autofunction:: PEPit.examples.continuous_time_models.wc_gradient_flow_convex 17 | 18 | 19 | Accelerated gradient flow for strongly convex functions 20 | ------------------------------------------------------- 21 | .. autofunction:: PEPit.examples.continuous_time_models.wc_accelerated_gradient_flow_strongly_convex 22 | 23 | 24 | Accelerated gradient flow for convex functions 25 | ---------------------------------------------- 26 | .. autofunction:: PEPit.examples.continuous_time_models.wc_accelerated_gradient_flow_convex 27 | -------------------------------------------------------------------------------- /docs/source/examples/l.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | .. contents:: 5 | :depth: 1 6 | :local: 7 | 8 | 9 | Contraction rate of gradient descent 10 | ------------------------------------ 11 | .. autofunction:: PEPit.examples.tutorials.wc_gradient_descent_contraction 12 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. PEPit documentation master file, created by 2 | sphinx-quickstart on Thu Oct 7 13:57:16 2021. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to PEPit's documentation! 7 | ================================= 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | :caption: Contents: 12 | 13 | self 14 | quickstart 15 | api 16 | examples 17 | whatsnew 18 | contributing 19 | 20 | .. include:: ../../README.md 21 | :parser: myst_parser.sphinx_ 22 | 23 | 24 | Indices and tables 25 | ------------------ 26 | 27 | * :ref:`genindex` 28 | * :ref:`modindex` 29 | * :ref:`search` 30 | -------------------------------------------------------------------------------- /docs/source/whatsnew.rst: -------------------------------------------------------------------------------- 1 | What's new in PEPit 2 | =================== 3 | 4 | .. toctree:: 5 | :numbered: 6 | :maxdepth: 2 7 | :caption: Contents: 8 | 9 | whatsnew/0.1.0 10 | whatsnew/0.2.0 11 | whatsnew/0.2.1 12 | whatsnew/0.3.2 13 | whatsnew/0.3.3 14 | -------------------------------------------------------------------------------- /docs/source/whatsnew/0.1.0.rst: -------------------------------------------------------------------------------- 1 | What's new in PEPit 0.1.0 2 | ========================= 3 | 4 | - Adding general constraints to your problem. 5 | 6 | | The method ``add_constraint`` has been added to the class ``PEP`` for general constraints not necessarily related to a specific function. 7 | | For readability of your code, 8 | we suggest to use the method ``set_initial_condition`` when the constraint is the initial one, 9 | and the method ``add_constraint`` for any other constraint. 10 | 11 | - Adding LMI constraints to your problem. 12 | 13 | The method ``add_psd_matrix`` has been added to the class ``PEP`` and must be used to add LMI constraints to your problem. 14 | 15 | - CVXPY options. 16 | 17 | | PEPit uses CVXPY to solve the underlying SDP of your problem. 18 | | CVXPY solver options can be provided to the method ``PEP.solve``. 19 | 20 | - Optimizing dimension of the solution. 21 | 22 | | The ``tracetrick`` option of the method ``PEP.solve`` has been replaced by ``dimension_reduction_heuristic``. 23 | | Set to None by default, this option can be set to "`trace`" or "`logdet{followed by a number}`" to use one of those heuristic. 24 | 25 | - Granularity of the verbose mode has evolved. 26 | 27 | | The verbose mode of the method ``PEP.solve`` and of the provided examples files are now integers: 28 | 29 | - 0: No verbose at all 30 | - 1: PEPit information is printed but not CVXPY's 31 | - 2: Both PEPit and CVXPY details are printed 32 | 33 | - Parameters of function classes. 34 | 35 | | The parameters that characterize a function class must be provided directly as arguments of this function class, not through the dict "param" anymore. 36 | | Example: ``PEP.declare_function(function_class=SmoothStronglyConvexFunction, mu=.1, L=1.)`` 37 | 38 | - Initializing a Point or an Expression to 0. 39 | 40 | ``null_point`` and ``null_expression`` have been added to the module ``PEPit`` to facilitate the access to a ``Point`` or an ``Expression`` initialized to 0. 41 | 42 | - 3 new function classes have been added: 43 | 44 | - ``ConvexSupportFunction`` for convex support functions (see [1]) 45 | - ``ConvexQGFunction``, for convex and quadratically upper bounded functions (see [2]) 46 | - ``RsiEbFunction``, for functions verifying lower restricted secant inequality and upper error bound (see [3]) 47 | 48 | `[1] A. Taylor, J. Hendrickx, F. Glineur (2017). 49 | Exact worst-case performance of first-order methods for composite convex optimization. 50 | SIAM Journal on Optimization, 27(3):1283–1313. 51 | `_ 52 | 53 | `[2] B. Goujaud, A. Taylor, A. Dieuleveut (2022). 54 | Optimal first-order methods for convex functions with a quadratic upper bound. 55 | `_ 56 | 57 | `[3] C. Guille-Escuret, B. Goujaud, A. Ibrahim, I. Mitliagkas (2022). 58 | Gradient Descent Is Optimal Under Lower Restricted Secant Inequality And Upper Error Bound. 59 | arXiv 2203.00342. 60 | `_ 61 | -------------------------------------------------------------------------------- /docs/source/whatsnew/0.2.0.rst: -------------------------------------------------------------------------------- 1 | What's new in PEPit 0.2.0 2 | ========================= 3 | 4 | - Adding possibility to set LMI constraints associated to function objects. 5 | 6 | The method ``add_psd_matrix`` has been added to the class ``Function`` and must be used to add LMI constraints associated to a function. 7 | 8 | - Storing dual values prior to dimension reduction. 9 | 10 | Each ``Constraint`` object receives a dual value in the attribute ``_dual_value`` which can be accessed through the method ``eval_dual``. 11 | In previous releases, and in case of dimension reduction being activated, the dual values being stored where those of the latest solved problem. 12 | From this release, the dual values being stored are always those of the original problem. 13 | Note the primal values are those of the last problem providing adversarial example of smallest dimension possible. 14 | 15 | - Creating ``PSDMatrix`` class. 16 | 17 | ``PSDMatrix`` class as been added. This doesn't affect how the methods ``add_psd_matrix`` must be used. 18 | A user must continue providing a psd matrix under the form of an Iterable of ``Expression``s. 19 | The latter will be automatically transformed into a ``PSDMatrix`` object that contains a ``_dual_value`` attribute and an ``eval_dual`` method as any ``Constraint`` object. 20 | 21 | - Fixing a minor issue in pep.py. 22 | 23 | There was an issue when the Gram matrix G did not need any eigenvalue correction as ``eig_threshold`` in ``pep.get_nb_eigenvalues_and_corrected_matrix`` where defined as the maximum of an empty list. 24 | This issue has been fixed in this release. 25 | 26 | - Eigenvalues are now sorted in decreasing order in the output of the PEP, making it easier to plot low-dimensional worst-case examples (examples of such usages can be found in the exercise repository `Learning-Performance-Estimation `_). 27 | 28 | - Many new examples were introduced, including for looking for low-dimensional worst-case examples, fixed-point iterations, variational inequalities, and continuous-time dynamics. 29 | -------------------------------------------------------------------------------- /docs/source/whatsnew/0.2.1.rst: -------------------------------------------------------------------------------- 1 | What's new in PEPit 0.2.1 2 | ========================= 3 | 4 | - :class:`PSDMatrix` can now be instantiated with a list of lists. Previously, the argument had to be a ndarray. 5 | 6 | - The attribute `matrix_of_expression` of :class:`PSDMatrix` has been renamed to `matrix_of_expressions`. 7 | 8 | - Fix: Points that were not associated with Functions could be not evaluable after solving the PEP (See Issue `#74 `_). 9 | This has been fixed in PR `#75 `_. 10 | 11 | - Fix: Constraints on a function defined a linear combination of other functions were not taken into consideration. 12 | This has been fixed in PR `#80 `_. 13 | 14 | - The attributes `list_of_constraints` and `list_of_psd` of :class:`Function` objects have respectively been splitted into 15 | `list_of_constraints` and `list_of_class_constraints` (the latter containing the interpolation constraints of the class of functions), 16 | and into `list_of_psd` and `list_of_class_psd` (the latter containing the lmi interpolation constraints of the class of functions). 17 | The 2 lists containing the interpolation constraints are filled when the PEP solver is called. 18 | 19 | - The attributes `list_of_functions` and `list_of_points` of the :class:`PEP` objects are not used anymore in the pipeline. 20 | They still contains the same elements as in previous version of PEPit and can still be called for now. 21 | However, they will be removed in a future version of PEPit, therefore we discourage from using them. 22 | The full list of functions as well as the full list of points can respectively be obtained in 23 | `Function.list_of_functions` and `Point.list_of_points`. 24 | -------------------------------------------------------------------------------- /docs/source/whatsnew/0.3.2.rst: -------------------------------------------------------------------------------- 1 | What's new in PEPit 0.3.2 2 | ========================= 3 | 4 | For users: 5 | ---------- 6 | 7 | - The :class:`BlockPartition` has been created and allows to decompose the space into orthogonal subspaces to treat points differently in each subspaces. This is useful in particular to study Coordinate descent like methods. 8 | 9 | - The function classes :class:`SmoothConvexLipschitzFunction` and :class:`SmoothStronglyConvexQuadraticFunction` have been added. 10 | 11 | - The operators classes :class:`CocoerciveStronglyMonotoneOperator`, :class:`NegativelyComonotoneOperator`, :class:`NonexpansiveOperator`, :class:`LinearOperator`, :class:`SymmetricLinearOperator` and :class:`SkewSymmetricLinearOperator` have been added. 12 | 13 | - Most operators classes had redundant class constraints. They have been removed, reducing the size of the PEP and then the computation load. 14 | 15 | - Add argument `wrapper` in solve set to `cvxpy` by default. If `wrapper` is set to `cvxpy`, the routine is unchanged compared to PEPit 0.2.1. If `wrapper` is set to `mosek`, PEPit bypasses CVXPY and run MOSEK directly. 16 | 17 | - Remove possibility to return full problem with `pep.solve`. Instead, the pep object possesses a lot of attributes with all the details. 18 | 19 | - PEP.solve now returns the dual value of the objective by default, i.e. the actually worst-case guarantee. The user can still call `PEP.solve` with the option `return_primal_or_dual == "primal"` to get the primal value of the objective corresponding to a counter example. Note, those two values should be almost identical by strong duality. A new list of messages are printed to verify feasibility of the provided solution and provides duality gap. 20 | 21 | - The order of the constraints of :class:`SmoothFunction` has been reversed to be consistent with :class:`ConvexFunction`, :class:`StronglyConvexFunction`, :class:`SmoothConvexFunction` and :class:`SmoothStronglyConvexFunction`. 22 | 23 | - :class:`Point`, :class:`Expression`, :class:`Constraint`, :class:`PSDMatrix` and :class:`Function` objects now have an attribute "name" that contains a string representing them. The latter is initialized from the argument name when creating the object (set to None by default), and get updated through the methods `set_name`. Finally they are accessible through the methods `get_name`. 24 | 25 | - The class interpolation constraints are automatically named after the function name and the points names. To fully enjoy this feature, it is recommended to give a name to those entities. 26 | 27 | - Each function now possess an attribute `tables_of_constraints` storing the class constraints in a nice readable way. To access the dual values of all interpolation constraints in readable tables, use the method `get_class_constraints_duals`. It returns a dictionary whose values are pandas DataFrames. 28 | 29 | 30 | For contributors only: 31 | ---------------------- 32 | 33 | - Creating a new class of functions / operators previously required to overwrite the method `set_class_constraints`. This method has been renamed into `add_class_constraints`, while `set_class_constraints` is implemented in the class `Function` and calls `add_class_constraints`. 34 | 35 | - In order to simplify the implementation of the functions / operators classes, a method `add_constraints_from_lists_of_points` have been added to the class `Function`. A contributor adding a function / operator class can use it in `add_class_constraints`. `add_constraints_from_lists_of_points` takes lists of points in arguments as well as a method that returns a named constraint from 2 points, their names and the function name. Please refer to `the implemented functions / operators classes `_ as examples. 36 | -------------------------------------------------------------------------------- /docs/source/whatsnew/0.3.3.rst: -------------------------------------------------------------------------------- 1 | What's new in PEPit 0.3.3 2 | ========================= 3 | 4 | - Silver step-sizes have been added in the PEPit.examples.unconstrained_convex_minimization module. 5 | 6 | - A fix has been added in the class :class:`SmoothStronglyConvexQuadraticFunction`. Prior to that fix, using this class without stationary point in a PEP solved with the direct interface to MOSEK was problematic due to the late creation of a stationary point. After this fix, a stationary point is automatically created when instantiating this class of functions. 7 | 8 | - Another modification has been made to the class :class:`SmoothStronglyConvexQuadraticFunction`. Prior to that, the minimum value was assumed to be 0. This is not the case anymore. 9 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cvxpy 2 | numpy 3 | pandas 4 | scipy 5 | matplotlib 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r", encoding="utf-8") as fh: 4 | long_description = fh.read() 5 | 6 | version = "{{VERSION_PLACEHOLDER}}" 7 | 8 | setuptools.setup( 9 | name="PEPit", 10 | version=version, 11 | author="Baptiste Goujaud, Céline Moucer, Julien Hendrickx, Francois Glineur, Adrien Taylor and Aymeric Dieuleveut", 12 | author_email="baptiste.goujaud@gmail.com", 13 | description="PEPit is a package that allows users " 14 | "to pep their optimization algorithms as easily as they implement them", 15 | long_description=long_description, 16 | long_description_content_type="text/markdown", 17 | install_requires=["cvxpy>=1.1.17", "pandas>=1.0.0"], 18 | url="https://github.com/PerformanceEstimation/PEPit", 19 | project_urls={ 20 | "Documentation": "https://pepit.readthedocs.io/en/{}/".format(version), 21 | }, 22 | download_url="https://github.com/PerformanceEstimation/PEPit/archive/refs/tags/{}.tar.gz".format(version), 23 | classifiers=[ 24 | "Programming Language :: Python :: 3", 25 | "License :: OSI Approved :: MIT License", 26 | "Operating System :: OS Independent", 27 | ], 28 | packages=[element for element in setuptools.find_packages() if element[:5] == 'PEPit'], 29 | python_requires=">=3.7", 30 | ) 31 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['additional_complexified_examples_tests', 2 | 'test_block_partition', 3 | 'test_block_smooth_convex_functions', 4 | 'test_constraints', 5 | 'test_dict_operations', 6 | 'test_examples', 7 | 'test_expression', 8 | 'test_function', 9 | 'test_pep', 10 | 'test_point', 11 | 'test_functions_and_operators', 12 | 'test_uselessly_complexified_examples', 13 | 'test_wrappers', 14 | ] 15 | -------------------------------------------------------------------------------- /tests/additional_complexified_examples_tests/__init__.py: -------------------------------------------------------------------------------- 1 | from .gradient_descent_blocks import wc_gradient_descent_blocks 2 | from .gradient_descent_useless_blocks import wc_gradient_descent_useless_blocks 3 | from .gradient_exact_line_search import wc_gradient_exact_line_search_complexified 4 | from .inexact_gradient_exact_line_search import wc_inexact_gradient_exact_line_search_complexified 5 | from .inexact_gradient_exact_line_search2 import wc_inexact_gradient_exact_line_search_complexified2 6 | from .inexact_gradient_exact_line_search3 import wc_inexact_gradient_exact_line_search_complexified3 7 | from .proximal_gradient import wc_proximal_gradient_complexified 8 | from .proximal_gradient_useless_partition import wc_proximal_gradient_complexified2 9 | from .proximal_point import wc_proximal_point_complexified 10 | from .proximal_point_useless_partition import wc_proximal_point_complexified2 11 | from .proximal_point_LMI import wc_proximal_point_complexified3 12 | from .randomized_coordinate_descent_smooth_convex import wc_randomized_coordinate_descent_smooth_convex_complexified 13 | from .randomized_coordinate_descent_smooth_strongly_convex import wc_randomized_coordinate_descent_smooth_strongly_convex_complexified 14 | 15 | 16 | __all__ = ['gradient_descent_blocks', 'wc_gradient_descent_blocks', 17 | 'gradient_descent_useless_blocks', 'wc_gradient_descent_useless_blocks', 18 | 'gradient_exact_line_search', 'wc_gradient_exact_line_search_complexified', 19 | 'inexact_gradient_exact_line_search', 'wc_inexact_gradient_exact_line_search_complexified', 20 | 'inexact_gradient_exact_line_search2', 'wc_inexact_gradient_exact_line_search_complexified2', 21 | 'inexact_gradient_exact_line_search3', 'wc_inexact_gradient_exact_line_search_complexified3', 22 | 'proximal_gradient', 'wc_proximal_gradient_complexified', 23 | 'proximal_gradient_useless_partition', 'wc_proximal_gradient_complexified2', 24 | 'proximal_point', 'wc_proximal_point_complexified', 25 | 'proximal_point_useless_partition', 'wc_proximal_point_complexified2', 26 | 'proximal_point_LMI', 'wc_proximal_point_complexified3', 27 | 'randomized_coordinate_descent_smooth_convex', 'wc_randomized_coordinate_descent_smooth_convex_complexified', 28 | 'randomized_coordinate_descent_smooth_strongly_convex', 'wc_randomized_coordinate_descent_smooth_strongly_convex_complexified', 29 | ] 30 | -------------------------------------------------------------------------------- /tests/additional_complexified_examples_tests/gradient_exact_line_search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit import PEP 4 | from PEPit.functions import SmoothStronglyConvexFunction 5 | from PEPit.point import Point 6 | 7 | 8 | def wc_gradient_exact_line_search_complexified(L, mu, n, verbose=1): 9 | """ 10 | See description in `PEPit/examples/unconstrained_convex_minimization/gradient_exact_line_search.py`. 11 | This example is for testing purposes; the worst-case result is supposed to be the same as that of the other routine, 12 | but the exact line-search steps are imposed via LMI constraints (instead of relying on the `exact_linesearch_step` 13 | primitive). 14 | 15 | Args: 16 | L (float): the smoothness parameter. 17 | mu (float): the strong convexity parameter. 18 | n (int): number of iterations. 19 | verbose (int): Level of information details to print. 20 | 21 | - -1: No verbose at all. 22 | - 0: This example's output. 23 | - 1: This example's output + PEPit information. 24 | - 2: This example's output + PEPit information + CVXPY details. 25 | 26 | Returns: 27 | pepit_tau (float): worst-case value 28 | theoretical_tau (float): theoretical value 29 | 30 | Example: 31 | >>> pepit_tau, theoretical_tau = wc_gradient_exact_line_search_complexified(L=1, mu=.1, n=2, verbose=1) 32 | (PEPit) Setting up the problem: size of the main PSD matrix: 7x7 33 | (PEPit) Setting up the problem: performance measure is minimum of 1 element(s) 34 | (PEPit) Setting up the problem: Adding initial conditions and general constraints ... 35 | (PEPit) Setting up the problem: initial conditions and general constraints (1 constraint(s) added) 36 | (PEPit) Setting up the problem: 2 lmi constraint(s) added 37 | Size of PSD matrix 1: 2x2 38 | Size of PSD matrix 2: 2x2 39 | (PEPit) Setting up the problem: interpolation conditions for 1 function(s) 40 | function 1 : Adding 14 scalar constraint(s) ... 41 | function 1 : 14 scalar constraint(s) added 42 | (PEPit) Compiling SDP 43 | (PEPit) Calling SDP solver 44 | (PEPit) Solver status: optimal (solver: SCS); optimal value: 0.4480925747748075 45 | *** Example file: worst-case performance of gradient descent with exact linesearch (ELS) *** 46 | PEPit guarantee: f(x_n)-f_* <= 0.448093 (f(x_0)-f_*) 47 | Theoretical guarantee: f(x_n)-f_* <= 0.448125 (f(x_0)-f_*) 48 | 49 | """ 50 | 51 | # Instantiate PEP 52 | problem = PEP() 53 | 54 | # Declare a smooth strongly convex function 55 | f1 = problem.declare_function(SmoothStronglyConvexFunction, mu=mu, L=L) 56 | f2 = problem.declare_function(SmoothStronglyConvexFunction, mu=mu, L=L) 57 | func = f1 + f2 58 | 59 | # Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_* 60 | xs = func.stationary_point() 61 | fs = func(xs) 62 | 63 | # Then define the starting point x0 of the algorithm as well as corresponding gradient and function value g0 and f0 64 | x0 = problem.set_initial_point() 65 | g0, f0 = func.oracle(x0) 66 | 67 | # Set the initial constraint that is the difference between f0 and f_* 68 | problem.set_initial_condition(f0 - fs <= 1) 69 | 70 | # Run n steps of GD method with ELS 71 | x = x0 72 | gx = g0 73 | for i in range(n): 74 | gx_prev = gx 75 | x_prev = x 76 | x = Point() 77 | gx, fx = func.oracle(x) 78 | 79 | matrix_of_expressions = np.array([[0, gx_prev * gx], [gx_prev * gx, 0]]) 80 | problem.add_psd_matrix(matrix_of_expressions=matrix_of_expressions) 81 | func.add_constraint((x - x_prev) * gx == 0) 82 | 83 | # Set the performance metric to the function value accuracy 84 | problem.set_performance_metric(fx - fs) 85 | 86 | # Solve the PEP 87 | pepit_verbose = max(verbose, 0) 88 | pepit_tau = problem.solve(verbose=pepit_verbose) 89 | 90 | # Compute theoretical guarantee (for comparison) 91 | theoretical_tau = ((L - mu) / (L + mu)) ** (2 * n) 92 | 93 | # Print conclusion if required 94 | if verbose != -1: 95 | print('*** Example file: worst-case performance of gradient descent with exact linesearch (ELS) ***') 96 | print('\tPEPit guarantee:\t f(x_n)-f_* <= {:.6} (f(x_0)-f_*)'.format(pepit_tau)) 97 | print('\tTheoretical guarantee:\t f(x_n)-f_* <= {:.6} (f(x_0)-f_*)'.format(theoretical_tau)) 98 | 99 | # Return the worst-case guarantee of the evaluated method (and the reference theoretical value) 100 | return pepit_tau, theoretical_tau 101 | 102 | 103 | if __name__ == "__main__": 104 | pepit_tau, theoretical_tau = wc_gradient_exact_line_search_complexified(L=1, mu=.1, n=2, verbose=1) 105 | -------------------------------------------------------------------------------- /tests/additional_complexified_examples_tests/inexact_gradient_exact_line_search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit import PEP 4 | from PEPit.functions import SmoothStronglyConvexFunction 5 | from PEPit.point import Point 6 | 7 | 8 | def wc_inexact_gradient_exact_line_search_complexified(L, mu, epsilon, n, verbose=1): 9 | """ 10 | See description in `PEPit/examples/unconstrained_convex_minimization/inexact_gradient_exact_line_search.py`. 11 | This example is for testing purposes; the worst-case result is supposed to be the same as that of the other routine, 12 | but the exact line-search steps are imposed via LMI constraints (instead of relying on the `exact_linesearch_step` 13 | primitive). 14 | 15 | Args: 16 | L (float): the smoothness parameter. 17 | mu (float): the strong convexity parameter. 18 | epsilon (float): level of inaccuracy. 19 | n (int): number of iterations. 20 | verbose (int): Level of information details to print. 21 | 22 | - -1: No verbose at all. 23 | - 0: This example's output. 24 | - 1: This example's output + PEPit information. 25 | - 2: This example's output + PEPit information + CVXPY details. 26 | 27 | Returns: 28 | pepit_tau (float): worst-case value 29 | theoretical_tau (float): theoretical value 30 | 31 | Example: 32 | >>> pepit_tau, theoretical_tau = wc_inexact_gradient_exact_line_search_complexified(L=1, mu=0.1, epsilon=0.1, n=2, verbose=1) 33 | (PEPit) Setting up the problem: size of the main PSD matrix: 7x7 34 | (PEPit) Setting up the problem: performance measure is minimum of 1 element(s) 35 | (PEPit) Setting up the problem: Adding initial conditions and general constraints ... 36 | (PEPit) Setting up the problem: initial conditions and general constraints (1 constraint(s) added) 37 | (PEPit) Setting up the problem: 2 lmi constraint(s) added 38 | Size of PSD matrix 1: 2x2 39 | Size of PSD matrix 2: 2x2 40 | (PEPit) Setting up the problem: interpolation conditions for 1 function(s) 41 | function 1 : Adding 14 scalar constraint(s) ... 42 | function 1 : 14 scalar constraint(s) added 43 | (PEPit) Compiling SDP 44 | (PEPit) Calling SDP solver 45 | (PEPit) Solver status: optimal (solver: SCS); optimal value: 0.5188573779100247 46 | *** Example file: worst-case performance of inexact gradient descent with exact linesearch *** 47 | PEPit guarantee: f(x_n)-f_* <= 0.518857 (f(x_0)-f_*) 48 | Theoretical guarantee: f(x_n)-f_* <= 0.518917 (f(x_0)-f_*) 49 | 50 | """ 51 | 52 | # Instantiate PEP 53 | problem = PEP() 54 | 55 | # Declare a strongly convex smooth function 56 | func = problem.declare_function(SmoothStronglyConvexFunction, mu=mu, L=L) 57 | 58 | # Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_* 59 | xs = func.stationary_point() 60 | fs = func(xs) 61 | 62 | # Then define the starting point x0 of the algorithm as well as corresponding gradient and function value g0 and f0 63 | x0 = problem.set_initial_point() 64 | gx0, _ = func.oracle(x0) 65 | 66 | # Set the initial constraint that is the distance between f0 and f_* 67 | problem.set_initial_condition(func(x0) - fs <= 1) 68 | 69 | # Run n steps of the inexact gradient method with ELS 70 | x = x0 71 | gx = gx0 72 | for i in range(n): 73 | gx_prev = gx 74 | x_prev = x 75 | x = Point() 76 | gx, fx = func.oracle(x) 77 | 78 | matrix_of_expressions = np.array([[epsilon * gx_prev ** 2, gx_prev * gx], [gx_prev * gx, epsilon * gx ** 2]]) 79 | problem.add_psd_matrix(matrix_of_expressions=matrix_of_expressions) 80 | func.add_constraint((x - x_prev) * gx == 0) 81 | 82 | # Set the performance metric to the function value accuracy 83 | problem.set_performance_metric(func(x) - fs) 84 | 85 | # Solve the PEP 86 | pepit_verbose = max(verbose, 0) 87 | pepit_tau = problem.solve(verbose=pepit_verbose) 88 | 89 | # Compute theoretical guarantee (for comparison) 90 | Leps = (1 + epsilon) * L 91 | meps = (1 - epsilon) * mu 92 | theoretical_tau = ((Leps - meps) / (Leps + meps)) ** (2 * n) 93 | 94 | # for test: attempt to evaluate a point that was not created through a pep object. 95 | x.eval() 96 | 97 | # Print conclusion if required 98 | if verbose != -1: 99 | print('*** Example file: worst-case performance of inexact gradient descent with exact linesearch ***') 100 | print('\tPEPit guarantee:\t f(x_n)-f_* <= {:.6} (f(x_0)-f_*)'.format(pepit_tau)) 101 | print('\tTheoretical guarantee:\t f(x_n)-f_* <= {:.6} (f(x_0)-f_*)'.format(theoretical_tau)) 102 | # Return the worst-case guarantee of the evaluated method (and the reference theoretical value) 103 | return pepit_tau, theoretical_tau 104 | 105 | 106 | if __name__ == "__main__": 107 | pepit_tau, theoretical_tau = wc_inexact_gradient_exact_line_search_complexified(L=1, mu=0.1, epsilon=0.1, n=2, verbose=1) 108 | -------------------------------------------------------------------------------- /tests/additional_complexified_examples_tests/inexact_gradient_exact_line_search2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit import PEP 4 | from PEPit.functions import SmoothStronglyConvexFunction 5 | from PEPit.point import Point 6 | 7 | 8 | def wc_inexact_gradient_exact_line_search_complexified2(L, mu, epsilon, n, verbose=1): 9 | """ 10 | See description in `PEPit/examples/unconstrained_convex_minimization/inexact_gradient_exact_line_search.py`. 11 | This example is for testing purposes; the worst-case result is supposed to be the same as that of the other routine, 12 | but the exact line-search steps are imposed via LMI constraints (instead of relying on the `exact_linesearch_step` 13 | primitive). 14 | 15 | Args: 16 | L (float): the smoothness parameter. 17 | mu (float): the strong convexity parameter. 18 | epsilon (float): level of inaccuracy. 19 | n (int): number of iterations. 20 | verbose (int): Level of information details to print. 21 | 22 | - -1: No verbose at all. 23 | - 0: This example's output. 24 | - 1: This example's output + PEPit information. 25 | - 2: This example's output + PEPit information + CVXPY details. 26 | 27 | Returns: 28 | pepit_tau (float): worst-case value 29 | theoretical_tau (float): theoretical value 30 | 31 | Example: 32 | >>> pepit_tau, theoretical_tau = wc_inexact_gradient_exact_line_search_complexified2(L=1, mu=0.1, epsilon=0.1, n=2, verbose=1) 33 | (PEPit) Setting up the problem: size of the main PSD matrix: 7x7 34 | (PEPit) Setting up the problem: performance measure is minimum of 1 element(s) 35 | (PEPit) Setting up the problem: Adding initial conditions and general constraints ... 36 | (PEPit) Setting up the problem: initial conditions and general constraints (1 constraint(s) added) 37 | (PEPit) Setting up the problem: interpolation conditions for 1 function(s) 38 | function 1 : Adding 14 scalar constraint(s) ... 39 | function 1 : 14 scalar constraint(s) added 40 | function 1 : Adding 2 lmi constraint(s) ... 41 | Size of PSD matrix 1: 2x2 42 | Size of PSD matrix 2: 2x2 43 | function 1 : 2 lmi constraint(s) added 44 | (PEPit) Compiling SDP 45 | (PEPit) Calling SDP solver 46 | (PEPit) Solver status: optimal (solver: SCS); optimal value: 0.5188471810673873 47 | *** Example file: worst-case performance of inexact gradient descent with exact linesearch *** 48 | PEPit guarantee: f(x_n)-f_* <= 0.518847 (f(x_0)-f_*) 49 | Theoretical guarantee: f(x_n)-f_* <= 0.518917 (f(x_0)-f_*) 50 | 51 | """ 52 | 53 | # Instantiate PEP 54 | problem = PEP() 55 | 56 | # Declare a strongly convex smooth function 57 | func = problem.declare_function(SmoothStronglyConvexFunction, mu=mu, L=L) 58 | 59 | # Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_* 60 | xs = func.stationary_point() 61 | fs = func(xs) 62 | 63 | # Then define the starting point x0 of the algorithm as well as corresponding gradient and function value g0 and f0 64 | x0 = problem.set_initial_point() 65 | gx0, _ = func.oracle(x0) 66 | 67 | # Set the initial constraint that is the distance between f0 and f_* 68 | problem.set_initial_condition(func(x0) - fs <= 1) 69 | 70 | # Run n steps of the inexact gradient method with ELS 71 | x = x0 72 | gx = gx0 73 | for i in range(n): 74 | gx_prev = gx 75 | x_prev = x 76 | x = Point() 77 | gx, fx = func.oracle(x) 78 | 79 | matrix_of_expressions = np.array([[epsilon * gx_prev ** 2, gx_prev * gx], [gx_prev * gx, epsilon * gx ** 2]]) 80 | func.add_psd_matrix(matrix_of_expressions=matrix_of_expressions) 81 | func.add_constraint((x - x_prev) * gx == 0) 82 | 83 | # Set the performance metric to the function value accuracy 84 | problem.set_performance_metric(func(x) - fs) 85 | 86 | # Solve the PEP 87 | pepit_verbose = max(verbose, 0) 88 | pepit_tau = problem.solve(verbose=pepit_verbose) 89 | 90 | # Compute theoretical guarantee (for comparison) 91 | Leps = (1 + epsilon) * L 92 | meps = (1 - epsilon) * mu 93 | theoretical_tau = ((Leps - meps) / (Leps + meps)) ** (2 * n) 94 | 95 | # Print conclusion if required 96 | if verbose != -1: 97 | print('*** Example file: worst-case performance of inexact gradient descent with exact linesearch ***') 98 | print('\tPEPit guarantee:\t f(x_n)-f_* <= {:.6} (f(x_0)-f_*)'.format(pepit_tau)) 99 | print('\tTheoretical guarantee:\t f(x_n)-f_* <= {:.6} (f(x_0)-f_*)'.format(theoretical_tau)) 100 | 101 | # Return the worst-case guarantee of the evaluated method (and the reference theoretical value) 102 | return pepit_tau, theoretical_tau 103 | 104 | 105 | if __name__ == "__main__": 106 | pepit_tau, theoretical_tau = wc_inexact_gradient_exact_line_search_complexified2(L=1, mu=0.1, epsilon=0.1, n=2, verbose=1) 107 | -------------------------------------------------------------------------------- /tests/additional_complexified_examples_tests/proximal_point.py: -------------------------------------------------------------------------------- 1 | from PEPit import PEP 2 | from PEPit.functions import ConvexFunction 3 | from PEPit.primitive_steps import proximal_step 4 | 5 | 6 | def wc_proximal_point_complexified(gamma, n, verbose=1): 7 | """ 8 | See description in `PEPit/examples/unconstrained_convex_minimization/proximal_point.py`. 9 | This example is for testing purposes; the worst-case result is supposed to be the same as that of the other routine, 10 | but the parameterization is different (convex function to be minimized is explicitly formed as a sum of two convex 11 | functions). That is, the minimization problem is 12 | 13 | .. math:: f_\\star = \\min_x \\{f(x) \\equiv f_1(x) + f_2(x)\\}, 14 | 15 | where :math:`f_1` and :math:`f_2` are closed, proper, and convex (and potentially non-smooth) functions. 16 | 17 | Args: 18 | gamma (float): the step size parameter. 19 | n (int): number of iterations. 20 | verbose (int): Level of information details to print. 21 | 22 | - -1: No verbose at all. 23 | - 0: This example's output. 24 | - 1: This example's output + PEPit information. 25 | - 2: This example's output + PEPit information + CVXPY details. 26 | 27 | Returns: 28 | tuple: worst_case value, theoretical value 29 | 30 | Example: 31 | >>> pepit_tau, theoretical_tau = wc_proximal_point_complexified(gamma=1, n=2, verbose=1) 32 | (PEPit) Setting up the problem: size of the main PSD matrix: 7x7 33 | (PEPit) Setting up the problem: performance measure is minimum of 1 element(s) 34 | (PEPit) Setting up the problem: Adding initial conditions and general constraints ... 35 | (PEPit) Setting up the problem: initial conditions and general constraints (1 constraint(s) added) 36 | (PEPit) Setting up the problem: interpolation conditions for 2 function(s) 37 | function 1 : Adding 6 scalar constraint(s) ... 38 | function 1 : 6 scalar constraint(s) added 39 | function 2 : Adding 6 scalar constraint(s) ... 40 | function 2 : 6 scalar constraint(s) added 41 | (PEPit) Compiling SDP 42 | (PEPit) Calling SDP solver 43 | (PEPit) Solver status: optimal (solver: SCS); optimal value: 0.12500022120131604 44 | *** Example file: worst-case performance of the Proximal Point Method in function values*** 45 | PEPit guarantee: f(x_n)-f_* <= 0.125 ||x0 - xs||^2 46 | Theoretical guarantee : f(x_n)-f_* <= 0.125 ||x0 - xs||^2 47 | 48 | """ 49 | 50 | # Instantiate PEP 51 | problem = PEP() 52 | 53 | # Declare a convex function 54 | convex_1 = problem.declare_function(ConvexFunction) 55 | convex_2 = problem.declare_function(ConvexFunction) 56 | func = convex_1 + convex_2 57 | 58 | # Start by defining its unique optimal point xs = x_* and its function value fs = F(x_*) 59 | xs = func.stationary_point() 60 | fs = func(xs) 61 | 62 | # Then define the starting point x0 of the algorithm 63 | x0 = problem.set_initial_point() 64 | 65 | # Set the initial constraint that is the distance between x0 and x^* 66 | problem.set_initial_condition((x0 - xs) ** 2 <= 1) 67 | 68 | # Run the proximal point method 69 | x = x0 70 | for _ in range(n): 71 | x, _, fx = proximal_step(x, func, gamma) 72 | 73 | # Set the performance metric to the final distance to optimum in function values 74 | problem.set_performance_metric(fx - fs) 75 | 76 | # Solve the PEP 77 | pepit_verbose = max(verbose, 0) 78 | pepit_tau = problem.solve(verbose=pepit_verbose) 79 | 80 | # Compute theoretical guarantee (for comparison) 81 | theoretical_tau = 1 / (4 * gamma * n) 82 | 83 | # Print conclusion if required 84 | if verbose != -1: 85 | print('*** Example file: worst-case performance of the Proximal Point Method in function values***') 86 | print('\tPEPit guarantee:\t f(x_n)-f_* <= {:.6} ||x0 - xs||^2'.format(pepit_tau)) 87 | print('\tTheoretical guarantee :\t f(x_n)-f_* <= {:.6} ||x0 - xs||^2 '.format(theoretical_tau)) 88 | 89 | # Return the worst-case guarantee of the evaluated method ( and the reference theoretical value) 90 | return pepit_tau, theoretical_tau 91 | 92 | 93 | if __name__ == "__main__": 94 | pepit_tau, theoretical_tau = wc_proximal_point_complexified(gamma=1, n=2, verbose=1) 95 | -------------------------------------------------------------------------------- /tests/additional_complexified_examples_tests/proximal_point_LMI.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PEPit import PEP 4 | from PEPit.functions import ConvexFunction 5 | from PEPit.primitive_steps import proximal_step 6 | 7 | 8 | def wc_proximal_point_complexified3(gamma, n, verbose=1): 9 | """ 10 | See description in `PEPit/examples/unconstrained_convex_minimization/proximal_point.py`. 11 | This example is for testing purposes; the worst-case result is supposed to be the same as that of the other routine, 12 | but the parameterization is different (convex function to be minimized is explicitly formed as a sum of two convex 13 | functions). That is, the minimization problem is 14 | 15 | .. math:: f_\\star = \\min_x \\{f(x) \\equiv f_1(x) + f_2(x)\\}, 16 | 17 | where :math:`f_1` and :math:`f_2` are closed, proper, and convex (and potentially non-smooth) functions. 18 | In addition, we also specify the initial condition is an LMI. 19 | 20 | Args: 21 | gamma (float): the step size parameter. 22 | n (int): number of iterations. 23 | verbose (int): Level of information details to print. 24 | 25 | - -1: No verbose at all. 26 | - 0: This example's output. 27 | - 1: This example's output + PEPit information. 28 | - 2: This example's output + PEPit information + CVXPY details. 29 | 30 | Returns: 31 | tuple: worst_case value, theoretical value 32 | 33 | Example: 34 | >>> pepit_tau, theoretical_tau = wc_proximal_point_complexified3(gamma=1, n=2, verbose=1) 35 | (PEPit) Setting up the problem: size of the main PSD matrix: 7x7 36 | (PEPit) Setting up the problem: performance measure is minimum of 1 element(s) 37 | (PEPit) Setting up the problem: Adding initial conditions and general constraints ... 38 | (PEPit) Setting up the problem: initial conditions and general constraints (0 constraint(s) added) 39 | (PEPit) Setting up the problem: 1 lmi constraint(s) added 40 | (PEPit) Setting up the problem: interpolation conditions for 2 function(s) 41 | function 1 : Adding 6 scalar constraint(s) ... 42 | function 1 : 6 scalar constraint(s) added 43 | function 2 : Adding 6 scalar constraint(s) ... 44 | function 2 : 6 scalar constraint(s) added 45 | (PEPit) Setting up the problem: constraints for 0 function(s) 46 | (PEPit) Compiling SDP 47 | (PEPit) Calling SDP solver 48 | (PEPit) Solver status: optimal (solver: SCS); optimal value: 0.12499999936719847 49 | (PEPit) Final upper bound (dual): 0.12500002829268117 and lower bound (primal example): 0.12499999936719847 50 | (PEPit) Duality gap: absolute: 2.892548270350126e-08 and relative: 2.3140386279947184e-07 51 | *** Example file: worst-case performance of the Proximal Point Method in function values*** 52 | PEPit guarantee: f(x_n)-f_* <= 0.125 ||x0 - xs||^2 53 | Theoretical guarantee : f(x_n)-f_* <= 0.125 ||x0 - xs||^2 54 | 55 | """ 56 | 57 | # Instantiate PEP 58 | problem = PEP() 59 | 60 | # Declare a convex function 61 | convex_1 = problem.declare_function(ConvexFunction) 62 | convex_2 = problem.declare_function(ConvexFunction) 63 | func = convex_1 + convex_2 64 | 65 | # Start by defining its unique optimal point xs = x_* and its function value fs = F(x_*) 66 | xs = func.stationary_point() 67 | fs = func(xs) 68 | 69 | # Then define the starting point x0 of the algorithm 70 | x0 = problem.set_initial_point() 71 | 72 | # Set the initial constraint that is the distance between x0 and x^* 73 | # problem.set_initial_condition((x0 - xs) ** 2 <= 1) 74 | # equivalent to [ 1 (x0 - xs) ** 2; (x0 - xs) ** 2 1] >> 0 75 | matrix_of_expressions_init = np.array([[1, (x0 - xs) ** 2], [(x0 - xs) ** 2, 1]]) 76 | problem.add_psd_matrix(matrix_of_expressions=matrix_of_expressions_init) 77 | 78 | 79 | # Run the proximal point method 80 | x = x0 81 | for _ in range(n): 82 | x, _, fx = proximal_step(x, func, gamma) 83 | 84 | # Set the performance metric to the final distance to optimum in function values 85 | problem.set_performance_metric(fx - fs) 86 | 87 | # Solve the PEP 88 | pepit_verbose = max(verbose, 0) 89 | pepit_tau = problem.solve(verbose=pepit_verbose) 90 | 91 | # Compute theoretical guarantee (for comparison) 92 | theoretical_tau = 1 / (4 * gamma * n) 93 | 94 | # Print conclusion if required 95 | if verbose != -1: 96 | print('*** Example file: worst-case performance of the Proximal Point Method in function values***') 97 | print('\tPEPit guarantee:\t f(x_n)-f_* <= {:.6} ||x0 - xs||^2'.format(pepit_tau)) 98 | print('\tTheoretical guarantee :\t f(x_n)-f_* <= {:.6} ||x0 - xs||^2 '.format(theoretical_tau)) 99 | 100 | # Return the worst-case guarantee of the evaluated method ( and the reference theoretical value) 101 | return pepit_tau, theoretical_tau 102 | 103 | 104 | if __name__ == "__main__": 105 | pepit_tau, theoretical_tau = wc_proximal_point_complexified3(gamma=1, n=2, verbose=1) 106 | -------------------------------------------------------------------------------- /tests/additional_complexified_examples_tests/proximal_point_useless_partition.py: -------------------------------------------------------------------------------- 1 | from PEPit import PEP 2 | from PEPit.functions import ConvexFunction 3 | from PEPit.primitive_steps import proximal_step 4 | 5 | 6 | def wc_proximal_point_complexified2(gamma, n, verbose=1): 7 | """ 8 | See description in `PEPit/examples/unconstrained_convex_minimization/proximal_point.py`. 9 | This example is for testing purposes; the worst-case result is supposed to be the same as that of the other routine, 10 | but the parameterization is different (convex function to be minimized is explicitly formed as a sum of two convex 11 | functions). That is, the minimization problem is 12 | 13 | .. math:: f_\\star = \\min_x \\{f(x) \\equiv f_1(x) + f_2(x)\\}, 14 | 15 | where :math:`f_1` and :math:`f_2` are closed, proper, and convex (and potentially non-smooth) functions. 16 | 17 | Args: 18 | gamma (float): the step size parameter. 19 | n (int): number of iterations. 20 | verbose (int): Level of information details to print. 21 | -1: No verbose at all. 22 | 0: This example's output. 23 | 1: This example's output + PEPit information. 24 | 2: This example's output + PEPit information + CVXPY details. 25 | 26 | Returns: 27 | tuple: worst_case value, theoretical value 28 | 29 | Example: 30 | >>> pepit_tau, theoretical_tau = wc_proximal_point_complexified2(gamma=1, n=2, verbose=1) 31 | (PEPit) Setting up the problem: size of the main PSD matrix: 15x15 32 | (PEPit) Setting up the problem: performance measure is minimum of 1 element(s) 33 | (PEPit) Setting up the problem: Adding initial conditions and general constraints ... 34 | (PEPit) Setting up the problem: initial conditions and general constraints (1 constraint(s) added) 35 | (PEPit) Setting up the problem: interpolation conditions for 2 function(s) 36 | function 1 : Adding 6 scalar constraint(s) ... 37 | function 1 : 6 scalar constraint(s) added 38 | function 2 : Adding 6 scalar constraint(s) ... 39 | function 2 : 6 scalar constraint(s) added 40 | (PEPit) Setting up the problem: constraints for 0 function(s) 41 | (PEPit) Setting up the problem: 1 partition(s) added 42 | partition 1 with 5 blocks: Adding 40 scalar constraint(s)... 43 | partition 1 with 5 blocks: 40 scalar constraint(s) added 44 | (PEPit) Compiling SDP 45 | (PEPit) Calling SDP solver 46 | (PEPit) Solver status: optimal (solver: SCS); optimal value: 0.12500080627897306 47 | *** Example file: worst-case performance of the Proximal Point Method in function values*** 48 | PEPit guarantee: f(x_n)-f_* <= 0.125001 ||x0 - xs||^2 49 | Theoretical guarantee : f(x_n)-f_* <= 0.125 ||x0 - xs||^2 50 | 51 | """ 52 | 53 | # Instantiate PEP 54 | problem = PEP() 55 | partition = problem.declare_block_partition(d=5) 56 | 57 | # Declare a convex function 58 | convex_1 = problem.declare_function(ConvexFunction) 59 | convex_2 = problem.declare_function(ConvexFunction) 60 | func = convex_1 + convex_2 61 | 62 | # Start by defining its unique optimal point xs = x_* and its function value fs = F(x_*) 63 | xs = func.stationary_point() 64 | _ = partition.get_block(xs, 1) # useless partition 65 | fs = func(xs) 66 | 67 | # Then define the starting point x0 of the algorithm 68 | x0 = problem.set_initial_point() 69 | _ = partition.get_block(x0, 1) # useless partition 70 | 71 | # Set the initial constraint that is the distance between x0 and x^* 72 | problem.set_initial_condition((x0 - xs) ** 2 <= 1) 73 | 74 | # Run the proximal point method 75 | x = x0 76 | for _ in range(n): 77 | x, _, fx = proximal_step(x, func, gamma) 78 | 79 | # Set the performance metric to the final distance to optimum in function values 80 | problem.set_performance_metric(fx - fs) 81 | 82 | # Solve the PEP 83 | pepit_verbose = max(verbose, 0) 84 | pepit_tau = problem.solve(verbose=pepit_verbose) 85 | 86 | # Compute theoretical guarantee (for comparison) 87 | theoretical_tau = 1 / (4 * gamma * n) 88 | 89 | # Print conclusion if required 90 | if verbose != -1: 91 | print('*** Example file: worst-case performance of the Proximal Point Method in function values***') 92 | print('\tPEPit guarantee:\t f(x_n)-f_* <= {:.6} ||x0 - xs||^2'.format(pepit_tau)) 93 | print('\tTheoretical guarantee :\t f(x_n)-f_* <= {:.6} ||x0 - xs||^2 '.format(theoretical_tau)) 94 | 95 | # Return the worst-case guarantee of the evaluated method ( and the reference theoretical value) 96 | return pepit_tau, theoretical_tau 97 | 98 | 99 | if __name__ == "__main__": 100 | pepit_tau, theoretical_tau = wc_proximal_point_complexified2(gamma=1, n=2, verbose=1) 101 | -------------------------------------------------------------------------------- /tests/test_block_partition.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from PEPit.pep import PEP 4 | from PEPit.block_partition import BlockPartition 5 | from PEPit.point import Point 6 | 7 | 8 | class TestExpression(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.problem = PEP() 12 | 13 | self.point1 = Point() 14 | self.point2 = Point() 15 | self.point3 = Point() 16 | 17 | self.partition0 = BlockPartition(d=2) 18 | self.partition1 = BlockPartition(d=2) 19 | self.partition2 = self.problem.declare_block_partition(d=5) 20 | self.partition3 = self.problem.declare_block_partition(d=1) 21 | 22 | self.partition0.get_block(self.point1, 0) 23 | 24 | self.partition1.get_block(self.point1, 0) 25 | self.partition1.get_block(self.point2, 1) 26 | self.partition1.get_block(self.point3, 1) 27 | 28 | self.partition2.get_block(self.point1, 0) 29 | self.partition2.get_block(self.point1, 2) 30 | self.partition2.get_block(self.point2, 4) 31 | self.partition2.get_block(self.point2, 2) 32 | 33 | self.partition0.add_partition_constraints() 34 | self.partition1.add_partition_constraints() 35 | self.partition2.add_partition_constraints() 36 | 37 | def test_instances(self): 38 | self.assertIsInstance(self.partition0, BlockPartition) 39 | 40 | def test_counter(self): 41 | self.assertIs(BlockPartition.counter, 4) 42 | 43 | def test_list_size(self): 44 | self.assertIs(len(BlockPartition.list_of_partitions), 4) 45 | 46 | def test_list_elements(self): 47 | self.assertIs(BlockPartition.list_of_partitions[0], self.partition0) 48 | self.assertIs(BlockPartition.list_of_partitions[1], self.partition1) 49 | self.assertIs(BlockPartition.list_of_partitions[2], self.partition2) 50 | 51 | def test_same_blocks(self): 52 | pt1 = self.partition1.get_block(self.point1, 0) 53 | pt2 = self.partition1.get_block(self.point1, 0) 54 | self.assertIsInstance(pt1, Point) 55 | self.assertIsInstance(pt2, Point) 56 | self.assertEqual(pt1.decomposition_dict, pt2.decomposition_dict) 57 | self.assertEqual(self.partition1.get_block(self.point1, 1), self.partition1.get_block(self.point1, 1)) 58 | self.assertEqual(self.partition1.get_block(self.point2, 0), self.partition1.get_block(self.point2, 0)) 59 | self.assertEqual(self.partition1.get_block(self.point2, 1), self.partition1.get_block(self.point2, 1)) 60 | 61 | def test_no_partition(self): 62 | self.assertEqual(self.partition3.get_block(self.point1, 0).decomposition_dict, self.point1.decomposition_dict) 63 | self.assertEqual(self.partition3.get_block(self.point2, 0).decomposition_dict, self.point2.decomposition_dict) 64 | self.assertEqual(self.partition3.get_block(self.point3, 0).decomposition_dict, self.point3.decomposition_dict) 65 | 66 | def test_sizes(self): 67 | self.assertIs(self.partition1.get_nb_blocks(), 2) 68 | self.assertIs(self.partition2.get_nb_blocks(), 5) 69 | 70 | def test_number_of_points(self): 71 | self.assertIs(len(self.partition1.blocks_dict), 3) 72 | self.assertIs(len(self.partition2.blocks_dict), 2) 73 | 74 | def test_constraints(self): 75 | self.assertIs(len(self.partition0.list_of_constraints), 1) 76 | self.assertIs(len(self.partition1.list_of_constraints), 9) 77 | 78 | def test_decompose(self): 79 | pt1 = self.partition1.get_block(self.point1, 0) + self.partition1.get_block(self.point1, 1) 80 | pt2 = self.point1 81 | self.assertIs(pt1.decomposition_dict[self.point1], pt2.decomposition_dict[self.point1]) 82 | -------------------------------------------------------------------------------- /tests/test_block_smooth_convex_functions.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from PEPit import PEP 4 | from PEPit.point import Point 5 | from PEPit.function import Function 6 | from PEPit.functions.block_smooth_convex_function import BlockSmoothConvexFunction 7 | from PEPit.functions.smooth_convex_function import SmoothConvexFunction 8 | 9 | 10 | class TestConstraintsBlockSmoothConvex(unittest.TestCase): 11 | 12 | def setUp(self): 13 | self.pep = PEP() 14 | 15 | self.L1 = 1. 16 | self.L2 = [1.] 17 | self.L3 = [1., 2., 10.] 18 | 19 | self.partition2 = self.pep.declare_block_partition(d=1) 20 | self.partition3 = self.pep.declare_block_partition(d=3) 21 | 22 | self.func1 = SmoothConvexFunction(L=self.L1) 23 | self.func2 = BlockSmoothConvexFunction(L=self.L2, partition=self.partition2) 24 | self.func3 = BlockSmoothConvexFunction(L=self.L3, partition=self.partition3) 25 | 26 | self.point1 = Point(is_leaf=True, decomposition_dict=None) 27 | self.point2 = Point(is_leaf=True, decomposition_dict=None) 28 | self.point3 = Point(is_leaf=True, decomposition_dict=None) 29 | 30 | self.func1.gradient(self.point1) 31 | self.func1.gradient(self.point2) 32 | self.func1.gradient(self.point3) 33 | self.func2.gradient(self.point1) 34 | self.func2.gradient(self.point2) 35 | self.func2.gradient(self.point3) 36 | self.func3.gradient(self.point1) 37 | self.func3.gradient(self.point2) 38 | self.func3.gradient(self.point3) 39 | 40 | def test_is_instance(self): 41 | self.assertIsInstance(self.func1, Function) 42 | self.assertIsInstance(self.func2, Function) 43 | self.assertIsInstance(self.func3, Function) 44 | self.assertIsInstance(self.func3, BlockSmoothConvexFunction) 45 | self.assertIsInstance(self.func2, BlockSmoothConvexFunction) 46 | self.assertIsInstance(self.func1, SmoothConvexFunction) 47 | 48 | def test_sizes(self): 49 | self.assertEqual(len(self.func2.L), 1) 50 | self.assertEqual(len(self.func3.L), 3) 51 | 52 | def test_interpolation_numbers(self): 53 | self.func1.set_class_constraints() 54 | self.func2.set_class_constraints() 55 | self.func3.set_class_constraints() 56 | 57 | self.assertEqual(len(self.func1.list_of_class_constraints), 6) 58 | self.assertEqual(len(self.func2.list_of_class_constraints), 6) 59 | self.assertEqual(len(self.func3.list_of_class_constraints), 18) 60 | -------------------------------------------------------------------------------- /tests/test_dict_operations.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from PEPit.tools.dict_operations import merge_dict, prune_dict, multiply_dicts, symmetrize_dict 4 | 5 | 6 | class TestDictOperations(unittest.TestCase): 7 | 8 | def setUp(self): 9 | self.dict1 = {'a': 5, 'b': 6, 'q': 11} 10 | self.dict2 = {'a': 2, 'b': 8, 'w': 0} 11 | 12 | def test_merge_dict(self): 13 | summed_dict = {'a': 7, 'b': 14, 'q': 11, 'w': 0} 14 | self.assertEqual(merge_dict(dict1=self.dict1, dict2=self.dict2), summed_dict) 15 | 16 | def test_multiply_dicts(self): 17 | 18 | product_dict = {('a', 'a'): 10, ('a', 'b'): 40, ('a', 'w'): 0, 19 | ('b', 'a'): 12, ('b', 'b'): 48, ('b', 'w'): 0, 20 | ('q', 'a'): 22, ('q', 'b'): 88, ('q', 'w'): 0, 21 | } 22 | self.assertEqual(multiply_dicts(dict1=self.dict1, dict2=self.dict2), product_dict) 23 | 24 | def test_prune_dict(self): 25 | self.assertEqual(prune_dict(self.dict2), {'a': 2, 'b': 8}) 26 | 27 | def test_symmetrize_dict(self): 28 | symmetric_product_dict = {'a': 5, 'b': 6, 'q': 11, 29 | ('a', 'a'): 10, ('a', 'b'): 26, ('a', 'w'): 0, ('w', 'a'): 0, 30 | ('b', 'a'): 26, ('b', 'b'): 48, ('b', 'w'): 0, ('w', 'b'): 0, 31 | ('q', 'a'): 11, ('q', 'b'): 44, ('q', 'w'): 0, 32 | ('a', 'q'): 11, ('b', 'q'): 44, ('w', 'q'): 0, 33 | } 34 | 35 | self.assertEqual(symmetrize_dict(merge_dict(self.dict1, multiply_dicts(dict1=self.dict1, dict2=self.dict2))), 36 | symmetric_product_dict) 37 | -------------------------------------------------------------------------------- /tests/test_expression.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from PEPit import PEP 4 | from PEPit.point import Point 5 | from PEPit.expression import Expression 6 | from PEPit.constraint import Constraint 7 | 8 | 9 | class TestExpression(unittest.TestCase): 10 | 11 | def setUp(self): 12 | self.pep = PEP() 13 | 14 | self.point1 = Point(is_leaf=True, decomposition_dict=None) 15 | self.point2 = Point(is_leaf=True, decomposition_dict=None) 16 | 17 | self.inner_product = self.point1 * self.point2 18 | self.function_value = Expression(is_leaf=True, decomposition_dict=None, name="fx") 19 | 20 | def test_is_instance(self): 21 | 22 | self.assertIsInstance(self.inner_product, Expression) 23 | self.assertIsInstance(self.function_value, Expression) 24 | 25 | def test_name(self): 26 | 27 | self.assertIsNone(self.inner_product.get_name()) 28 | 29 | self.inner_product.set_name("x1*x2") 30 | 31 | self.assertEqual(self.inner_product.get_name(), "x1*x2") 32 | self.assertEqual(self.function_value.get_name(), "fx") 33 | 34 | def test_counter(self): 35 | 36 | composite_expression = self.inner_product + self.function_value 37 | self.assertIs(self.inner_product.counter, None) 38 | self.assertIs(self.function_value.counter, 0) 39 | self.assertIs(composite_expression.counter, None) 40 | self.assertIs(Expression.counter, 1) 41 | 42 | new_expression = Expression(is_leaf=True, decomposition_dict=None) 43 | self.assertIs(new_expression.counter, 1) 44 | self.assertIs(Expression.counter, 2) 45 | 46 | def test_linear_combination(self): 47 | 48 | new_expression = 1 + 2 * (4 - (- self.inner_product * 3) - 5 49 | + 2 * self.function_value - self.function_value / 5 + 2) 50 | 51 | self.assertIsInstance(new_expression, Expression) 52 | self.assertEqual(new_expression.decomposition_dict, {1: 3, 53 | (self.point1, self.point2): 6, 54 | self.function_value: 18 / 5}) 55 | 56 | def test_constraint(self): 57 | 58 | constraint = self.inner_product <= self.function_value 59 | 60 | self.assertIsInstance(constraint, Constraint) 61 | self.assertEqual(constraint.expression.decomposition_dict, (self.inner_product - self.function_value).decomposition_dict) 62 | -------------------------------------------------------------------------------- /tests/test_expression_to_matrices.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from PEPit.point import Point 5 | from PEPit.expression import Expression 6 | 7 | from PEPit.tools.expressions_to_matrices import expression_to_matrices, expression_to_sparse_matrices 8 | 9 | 10 | class TestExpressionToMatrices(unittest.TestCase): 11 | 12 | def setUp(self): 13 | 14 | self.tearDown() 15 | 16 | self.point1 = Point() 17 | unused_point = Point() 18 | self.point2 = Point() 19 | unused_expr = Expression() 20 | self.expr = Expression() 21 | 22 | self.combined_expression = self.expr + self.point1 * self.point2 - 1 23 | 24 | def test_expression_to_matrices(self): 25 | 26 | # Run expression_to_matrices. 27 | Gweights, Fweights, cons = expression_to_matrices(self.combined_expression) 28 | 29 | # Compute expected outputs. 30 | Gweights_expected = np.array([[0., 0., 0.5], [0., 0., 0.], [0.5, 0., 0.]]) 31 | Fweights_expected = np.array([0., 1.]) 32 | cons_expected = -1 33 | 34 | # Compare the obtained outputs with the desired ones. 35 | G_error = np.sum((Gweights - Gweights_expected)**2) 36 | F_error = np.sum((Fweights - Fweights_expected)**2) 37 | cons_error = (cons - cons_expected)**2 38 | 39 | self.assertEqual(G_error, 0) 40 | self.assertEqual(F_error, 0) 41 | self.assertEqual(cons_error, 0) 42 | 43 | def test_expression_to_sparse_matrices(self): 44 | 45 | # Run expression_to_sparse_matrices. 46 | Gweights_indi, Gweights_indj, Gweights_val,\ 47 | Fweights_ind, Fweights_val, cons_val = expression_to_sparse_matrices(self.combined_expression) 48 | 49 | # Compute expected outputs. 50 | Gweights_indi_expected = np.array([2]) 51 | Gweights_indj_expected = np.array([0]) 52 | Gweights_val_expected = np.array([0.5]) 53 | Fweights_ind_expected = np.array([1]) 54 | Fweights_val_expected = np.array([1]) 55 | cons_val_expected = -1 56 | 57 | # Compare the obtained outputs with the desired ones. 58 | Gweights_indi_error = np.sum((Gweights_indi - Gweights_indi_expected) ** 2) 59 | Gweights_indj_error = np.sum((Gweights_indj - Gweights_indj_expected) ** 2) 60 | Gweights_val_error = (Gweights_val - Gweights_val_expected) ** 2 61 | Fweights_ind_error = (Fweights_ind - Fweights_ind_expected) ** 2 62 | Fweights_val_error = (Fweights_val - Fweights_val_expected) ** 2 63 | cons_val_error = (cons_val - cons_val_expected) ** 2 64 | 65 | self.assertEqual(Gweights_indi_error, 0) 66 | self.assertEqual(Gweights_indj_error, 0) 67 | self.assertEqual(Gweights_val_error, 0) 68 | self.assertEqual(Fweights_ind_error, 0) 69 | self.assertEqual(Fweights_val_error, 0) 70 | self.assertEqual(cons_val_error, 0) 71 | 72 | def tearDown(self): 73 | 74 | Expression.counter = 0 75 | Expression.list_of_leaf_expressions = list() 76 | 77 | Point.counter = 0 78 | Point.list_of_leaf_points = list() 79 | -------------------------------------------------------------------------------- /tests/test_point.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from PEPit import PEP 4 | from PEPit.point import Point 5 | from PEPit.expression import Expression 6 | 7 | 8 | class TestPoint(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.pep = PEP() 12 | 13 | self.A = Point(is_leaf=True, decomposition_dict=None, name="pointA") 14 | self.B = Point(is_leaf=True, decomposition_dict=None) 15 | 16 | def test_is_instance(self): 17 | 18 | self.assertIsInstance(self.A, Point) 19 | self.assertIsInstance(self.B, Point) 20 | 21 | def test_name(self): 22 | 23 | self.assertEqual(self.A.get_name(), "pointA") 24 | 25 | self.assertIsNone(self.B.get_name()) 26 | self.B.set_name("pointB") 27 | self.assertEqual(self.B.get_name(), "pointB") 28 | 29 | C = self.A + self.B 30 | self.assertIsNone(C.get_name()) 31 | C.set_name("pointC") 32 | self.assertEqual(C.get_name(), "pointC") 33 | 34 | def test_counter(self): 35 | 36 | C = self.A + self.B 37 | self.assertIs(self.A.counter, 0) 38 | self.assertIs(self.B.counter, 1) 39 | self.assertIs(C.counter, None) 40 | self.assertIs(Point.counter, 2) 41 | 42 | D = Point(is_leaf=True, decomposition_dict=None) 43 | self.assertIs(D.counter, 2) 44 | self.assertIs(Point.counter, 3) 45 | 46 | def test_linear_combination(self): 47 | 48 | new_point = - self.A * 1. + 2 * self.B - self.B / 5 49 | 50 | self.assertIsInstance(new_point, Point) 51 | self.assertEqual(new_point.decomposition_dict, {self.A: -1, self.B: 9 / 5}) 52 | 53 | def test_rmul_between_two_points(self): 54 | 55 | inner_product = self.A * self.B 56 | 57 | self.assertIsInstance(inner_product, Expression) 58 | self.assertFalse(inner_product._is_leaf) 59 | self.assertEqual(inner_product.decomposition_dict, {(self.A, self.B): 1}) 60 | 61 | def test_pow(self): 62 | 63 | norm_square = (self.A - self.B) ** 2 64 | 65 | self.assertIsInstance(norm_square, Expression) 66 | self.assertFalse(norm_square._is_leaf) 67 | self.assertEqual(norm_square.decomposition_dict, {(self.A, self.A): 1, 68 | (self.A, self.B): -1, 69 | (self.B, self.A): -1, 70 | (self.B, self.B): 1 71 | }) 72 | --------------------------------------------------------------------------------