├── .editorconfig ├── .github └── workflows │ ├── C.yml │ ├── R.yml │ ├── python.yml │ ├── rchk.yml │ └── revdepcheck.yml ├── .gitignore ├── .pre-commit-config.yaml ├── DEVEL-README.md ├── LICENSE ├── Makefile ├── README.md ├── TODO ├── TODO-hv ├── bibkeys.txt ├── c ├── .gitignore ├── Hypervolume_MEX.c ├── Makefile ├── NEWS.md ├── README.md ├── avl.c ├── avl.h ├── avl_tiny.h ├── bit_array.h ├── cmdline.c ├── cmdline.h ├── common.h ├── config.h ├── cvector.h ├── dominatedsets.c ├── eaf.c ├── eaf.h ├── eaf3d.c ├── eaf_main.c ├── eafdiff.c ├── epsilon.c ├── epsilon.h ├── gcc.mk ├── gcc_attribs.h ├── gitversion.mk ├── hv.c ├── hv.h ├── hv3dplus.c ├── hv4d.c ├── hv_contrib.c ├── hv_priv.h ├── igd.c ├── igd.h ├── io.c ├── io.h ├── io_priv.h ├── libhv.mk ├── libutil.c ├── main-hv.c ├── mt19937 │ ├── LICENSE.md │ ├── mt19937.c │ └── mt19937.h ├── ndsort.c ├── nondominated.c ├── nondominated.h ├── pareto.c ├── pow_int.h ├── rng.c ├── rng.h ├── sort.h ├── svnversion.mk ├── timer.c ├── timer.h ├── whv.c ├── whv.h ├── whv_hype.c ├── whv_hype.h └── ziggurat_constants.h ├── codecov.yml ├── python ├── .gitattributes ├── .gitignore ├── Makefile ├── README.md ├── benchmarks │ ├── bench.py │ ├── bench_epsilon.py │ ├── bench_hv.py │ ├── bench_igdplus.py │ ├── bench_ndom.py │ └── python-requirements.txt ├── doc │ ├── Makefile │ ├── conftest.py │ ├── make.bat │ └── source │ │ ├── REFERENCES.bib │ │ ├── _static │ │ ├── css │ │ │ └── custom.css │ │ ├── eps_bench-rmnk_10D_random_search-time.png │ │ ├── hv_bench-DTLZLinearShape.3d-time.png │ │ ├── hv_bench-DTLZLinearShape.4d-time.png │ │ ├── igd_plus_bench-ran.40000pts.3d-time.png │ │ ├── index_getting_started.svg │ │ ├── index_user_guide.svg │ │ ├── ndom_bench-ran3d-10k-time.png │ │ └── ndom_bench-test2D-200k-time.png │ │ ├── _templates │ │ └── autosummary │ │ │ ├── class.rst │ │ │ └── function.rst │ │ ├── conf.py │ │ ├── contribute │ │ └── index.rst │ │ ├── index.rst │ │ ├── my_unsrt_style.py │ │ ├── reference │ │ ├── functions.dominance.rst │ │ ├── functions.eaf.rst │ │ ├── functions.io.rst │ │ ├── functions.metrics.rst │ │ ├── functions.rst │ │ └── index.rst │ │ └── whatsnew │ │ └── index.rst ├── examples │ ├── README.rst │ ├── plot_hv_approx.py │ ├── plot_metrics.py │ └── plot_pandas.py ├── pyproject.toml ├── requirements_dev.txt ├── setup.py ├── src │ ├── conftest.py │ └── moocore │ │ ├── __init__.py │ │ ├── _datasets.py │ │ ├── _ffi_build.py │ │ ├── _moocore.py │ │ ├── _utils.py │ │ ├── data │ │ ├── CPFs.txt │ │ ├── input1.dat │ │ ├── spherical-250-10-3d.txt │ │ ├── tpls50x20_1_MWT.csv │ │ ├── uniform-250-10-3d.txt │ │ ├── wrots_l100w10_dat.xz │ │ └── wrots_l10w100_dat.xz │ │ ├── libmoocore │ │ └── libmoocore.h ├── tests │ ├── conftest.py │ ├── test_data │ │ ├── 100_diff_points_1.txt │ │ ├── 100_diff_points_2.txt │ │ ├── column_error.dat │ │ ├── duplicated3.inp │ │ ├── empty │ │ └── expected_output │ │ │ ├── R_generate_expected_output.R │ │ │ ├── eaf │ │ │ ├── ALG_1_dat_eaf.txt.xz │ │ │ ├── dat1_eaf.txt │ │ │ ├── pct_ALG_1_dat_eaf.txt.xz │ │ │ ├── pct_dat1_eaf.txt │ │ │ ├── pct_spherical_eaf.txt.xz │ │ │ ├── pct_uniform_eaf.txt.xz │ │ │ ├── pct_wrots_l100_eaf.txt.xz │ │ │ ├── pct_wrots_l10_eaf.txt.xz │ │ │ ├── spherical_eaf.txt.xz │ │ │ ├── uniform_eaf.txt.xz │ │ │ ├── wrots_l100_eaf.txt.xz │ │ │ └── wrots_l10_eaf.txt.xz │ │ │ ├── get_diff_eaf │ │ │ ├── int3_points12_get_diff_eaf.txt │ │ │ └── points12_get_diff_eaf.txt │ │ │ └── read_datasets │ │ │ ├── ALG_1_dat_read_datasets.txt.xz │ │ │ ├── dat1_read_datasets.txt │ │ │ ├── spherical_read_datasets.txt.xz │ │ │ ├── uniform_read_datasets.txt.xz │ │ │ ├── wrots_l100_read_datasets.txt │ │ │ └── wrots_l10_read_datasets.txt │ ├── test_moocore.py │ └── test_pandas.py └── tox.ini ├── r ├── .Rbuildignore ├── .Rbuildignore.cran ├── .gitignore ├── DESCRIPTION ├── Makefile ├── NAMESPACE ├── NEWS.md ├── R │ ├── eaf.R │ ├── eafdiff.R │ ├── epsilon.R │ ├── hv.R │ ├── igd.R │ ├── largest_eafdiff.R │ ├── moocore-package.R │ ├── nondominated.R │ ├── normalise.R │ ├── rbind_datasets.R │ ├── read_datasets.R │ ├── utils.R │ ├── vorob.R │ ├── whv.R │ └── zzz.R ├── README.md ├── _pkgdown.yml ├── cleanup ├── data │ ├── CPFs.rda │ ├── HybridGA.rda │ ├── SPEA2minstoptimeRichmond.rda │ ├── SPEA2relativeRichmond.rda │ ├── SPEA2relativeVanzyl.rda │ └── tpls50x20_1_MWT.rda ├── do.R ├── inst │ ├── COPYRIGHTS │ ├── REFERENCES.bib │ ├── WORDLIST │ └── extdata │ │ ├── ALG_1_dat.xz │ │ ├── ALG_2_dat.xz │ │ ├── example1_dat │ │ ├── rest.xz │ │ ├── spherical-250-10-3d.txt │ │ ├── tpls.xz │ │ ├── uniform-250-10-3d.txt │ │ ├── wrots_l100w10_dat │ │ └── wrots_l10w100_dat ├── man │ ├── CPFs.Rd │ ├── HybridGA.Rd │ ├── SPEA2minstoptimeRichmond.Rd │ ├── SPEA2relativeRichmond.Rd │ ├── SPEA2relativeVanzyl.Rd │ ├── Vorob.Rd │ ├── as_double_matrix.Rd │ ├── attsurf2df.Rd │ ├── choose_eafdiff.Rd │ ├── compute_eaf_call.Rd │ ├── compute_eafdiff_call.Rd │ ├── eaf.Rd │ ├── eaf_as_list.Rd │ ├── eafdiff.Rd │ ├── epsilon.Rd │ ├── hv_contributions.Rd │ ├── hypervolume.Rd │ ├── igd.Rd │ ├── largest_eafdiff.Rd │ ├── macros │ │ └── macros.Rd │ ├── moocore-package.Rd │ ├── nondominated.Rd │ ├── normalise.Rd │ ├── rbind_datasets.Rd │ ├── read_datasets.Rd │ ├── tpls50x20_1_MWT.Rd │ ├── transform_maximise.Rd │ ├── whv_hype.Rd │ ├── whv_rect.Rd │ └── write_datasets.Rd ├── src │ ├── Makevars │ ├── Makevars.ucrt │ ├── Makevars.win │ ├── Rcommon.h │ ├── Rmoocore.c │ ├── init.c │ ├── init.h │ ├── install.libs.R │ └── libmoocore ├── tests │ ├── spelling.R │ ├── testthat.R │ └── testthat │ │ ├── ALG_1_dat-eaf.rds │ │ ├── DTLZDiscontinuousShape.3d.front.1000pts.10.rds │ │ ├── SPEA2relativeRichmond-eaf.rds │ │ ├── _snaps │ │ └── eafdiff │ │ │ ├── eafdiff-ALG_1_dat-ALG_2_dat.csv.xz │ │ │ ├── eafdiff-tpls-rest.csv.xz │ │ │ ├── eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-max-max.csv.xz │ │ │ ├── eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-max-min.csv.xz │ │ │ ├── eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-min-max.csv.xz │ │ │ └── eafdiff-wrots_l10w100_dat-wrots_l100w10_dat.csv.xz │ │ ├── duplicated3.inp │ │ ├── helper-common.R │ │ ├── lin.S-sph.S-diff.txt.xz │ │ ├── lin.S.txt │ │ ├── sph.S.txt │ │ ├── test-doctest-eafdiff.R │ │ ├── test-doctest-epsilon_additive.R │ │ ├── test-doctest-igd.R │ │ ├── test-doctest-vorob_t.R │ │ ├── test-doctest-whv_hype.R │ │ ├── test-eaf.R │ │ ├── test-eafdiff.R │ │ ├── test-epsilon.R │ │ ├── test-hv.R │ │ ├── test-igd.R │ │ ├── test-normalise.R │ │ ├── test-pareto-rank.R │ │ ├── test-vorob.R │ │ ├── test-whv.R │ │ ├── test-whv_hype.R │ │ └── test-write_datasets.R └── vignettes │ ├── .gitignore │ └── articles │ └── eaf.Rmd ├── release.py └── update_bib.sh /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: http://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file 7 | [*] 8 | end_of_line = lf 9 | insert_final_newline = true 10 | trim_trailing_whitespace = true 11 | 12 | # Matches multiple files with brace expansion notation 13 | # 4 space indentation 14 | [*.{c,cpp,h,hpp,R,r,py}] 15 | charset = utf-8 16 | indent_style = space 17 | indent_size = 4 18 | 19 | [*.{R,r}] 20 | charset = utf-8 21 | indent_style = space 22 | indent_size = 2 # FIXME: We should change it to 4 when we start linting. 23 | -------------------------------------------------------------------------------- /.github/workflows/C.yml: -------------------------------------------------------------------------------- 1 | name: C 2 | 3 | on: 4 | push: 5 | branches-ignore: [gh-pages] 6 | paths: 7 | - '.github/workflows/C.yml' 8 | - 'c/Make*' 9 | - 'c/*.mk' 10 | - 'c/*.[ch]p?p?' 11 | 12 | pull_request: 13 | branches-ignore: [gh-pages] 14 | paths: 15 | - '.github/workflows/C.yml' 16 | - 'c/Make*' 17 | - 'c/*.mk' 18 | - 'c/*.[ch]p?p?' 19 | # Manual run 20 | workflow_dispatch: 21 | 22 | concurrency: 23 | group: ${{ github.workflow }}-${{ github.ref }} 24 | cancel-in-progress: true 25 | 26 | jobs: 27 | build: 28 | if: "! contains(github.event.head_commit.message, '[skip ci]')" 29 | name: Build C code on ${{ matrix.os }} with ${{ matrix.cc }} 30 | runs-on: ${{ matrix.os }} 31 | strategy: 32 | fail-fast: false 33 | matrix: 34 | # macos-13 is an intel runner, macos-14 is apple silicon 35 | os: [ubuntu-22.04, windows-latest, macos-13, macos-14] 36 | cc: [ gcc ] 37 | include: 38 | - { os: macos-13, cc: clang } 39 | - { os: macos-14, cc: clang } 40 | - { os: ubuntu-24.04, cc: gcc-14 } 41 | env: 42 | CC: ${{ matrix.cc }} 43 | 44 | steps: 45 | - uses: actions/checkout@v4 46 | 47 | - name: Put MSYS2_MinGW64 on PATH 48 | if: runner.os == 'Windows' 49 | run: | 50 | echo "${{ runner.temp }}/msys64/mingw64/bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 51 | echo "${{ runner.temp }}/msys64/mingw64/usr/bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 52 | 53 | - run: $CC -v 54 | shell: bash 55 | 56 | - run: make -C c all DEBUG=1 WERROR=1 57 | if: runner.os != 'Windows' 58 | # FIXME: How to install sanitizers in Windows? 59 | 60 | - run: make -C c all DEBUG=0 WERROR=1 61 | 62 | - name: Functional tests 63 | run: | 64 | ./bin/dominatedsets --help 65 | ./bin/dominatedsets --help 66 | ./bin/eaf --help 67 | ./bin/epsilon --help 68 | ./bin/hv --help 69 | ./bin/igd --help 70 | ./bin/ndsort --help 71 | ./bin/nondominated --help 72 | 73 | msys2: 74 | runs-on: windows-latest 75 | strategy: 76 | fail-fast: false 77 | matrix: 78 | sys: [ clang64 ] 79 | cc: [ clang ] 80 | #- { sys: mingw64, cc: gcc } 81 | #- { sys: ucrt64, cc: gcc } 82 | 83 | defaults: 84 | run: 85 | shell: msys2 {0} 86 | 87 | steps: 88 | - name: Disable autocrlf 89 | shell: pwsh 90 | run: | 91 | git config --global core.autocrlf false 92 | git config --global core.eol lf 93 | 94 | - uses: actions/checkout@v4 95 | 96 | - uses: msys2/setup-msys2@v2 97 | with: 98 | msystem: ${{ matrix.sys }} 99 | update: true 100 | install: >- 101 | make 102 | pacboy: >- 103 | ${{ matrix.cc }}:p 104 | 105 | - run: make -C c all DEBUG=1 WARN_FLAGS='-Werror' CC=${{ matrix.cc }} 106 | - run: make -C c all DEBUG=0 WARN_FLAGS='-Werror' CC=${{ matrix.cc }} 107 | 108 | - name: Functional tests 109 | run: | 110 | ./bin/dominatedsets --help 111 | ./bin/dominatedsets --help 112 | ./bin/eaf --help 113 | ./bin/epsilon --help 114 | ./bin/hv --help 115 | ./bin/igd --help 116 | ./bin/ndsort --help 117 | ./bin/nondominated --help 118 | -------------------------------------------------------------------------------- /.github/workflows/rchk.yml: -------------------------------------------------------------------------------- 1 | # adapted from a similar check run by {arrow} 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, 13 | # software distributed under the License is distributed on an 14 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | # KIND, either express or implied. See the License for the 16 | # specific language governing permissions and limitations 17 | # under the License. 18 | name: rchk 19 | 20 | on: 21 | workflow_call: 22 | 23 | jobs: 24 | rchk: 25 | runs-on: ubuntu-latest 26 | name: Run rchk 27 | steps: 28 | - uses: actions/checkout@v4 29 | - uses: r-lib/actions/setup-r@v2 30 | with: 31 | r-version: 'devel' 32 | 33 | - name: Install minimal dependencies 34 | run: | 35 | install.packages(c("Rdpack","matrixStats")) 36 | shell: Rscript {0} 37 | working-directory: r 38 | 39 | - name: Build 40 | run: | 41 | R CMD build --no-build-vignettes r 42 | mkdir packages 43 | mv moocore_*.tar.gz packages 44 | 45 | - name: Run rchk 46 | run: | 47 | docker run -v `pwd`/packages:/rchk/packages kalibera/rchk:latest /rchk/packages/moocore_*.tar.gz |& tee rchk.out 48 | - name: Confirm that rchk has no errors 49 | # Suspicious call, [UP], and [PB] are all of the error types currently at 50 | # https://github.com/kalibera/cran-checks/tree/HEAD/rchk/results 51 | # though this might not be exhaustive, there does not appear to be a way to have rchk return an error code 52 | # CRAN also will remove some of the outputs (especially those related to Rcpp and strptime, e.g. 53 | # ERROR: too many states (abstraction error?)) 54 | # https://github.com/kalibera/rchk 55 | run: | 56 | if [ $(grep -Fc "Suspicious call" rchk.out) -gt 0 ] || [ $(grep -F "[UP]" rchk.out | grep -Fvc "results will be incomplete") -gt 0 ] || [ $(grep -Fc "[PB]" rchk.out) -gt 0 ]; then 57 | echo "Found rchk errors" 58 | exit 1 59 | fi 60 | if: always() 61 | -------------------------------------------------------------------------------- /.github/workflows/revdepcheck.yml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_dispatch: 3 | inputs: 4 | which: 5 | type: choice 6 | description: Which dependents to check 7 | options: 8 | - most # strong + "Suggests" 9 | - strong # ("Depends", "Imports", "LinkingTo") 10 | 11 | name: Reverse dependency check 12 | 13 | env: 14 | NOT_CRAN: false 15 | 16 | jobs: 17 | revdep_check: 18 | name: Reverse check ${{ inputs.which }} dependents 19 | uses: r-devel/recheck/.github/workflows/recheck.yml@v1 20 | with: 21 | which: ${{ inputs.which }} 22 | subdirectory: 'r' # set if your R package is in a subdir of the git repo 23 | repository: '' # set to recheck an R package from another git repo 24 | ref: '' # set to recheck a custom tag/branch from another repo 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Object files 5 | *.o 6 | *.ko 7 | *.obj 8 | *.elf 9 | 10 | # Linker output 11 | *.ilk 12 | *.map 13 | *.exp 14 | 15 | # Precompiled Headers 16 | *.gch 17 | *.pch 18 | 19 | # Libraries 20 | *.lib 21 | *.a 22 | *.la 23 | *.lo 24 | 25 | # Shared objects (inc. Windows DLLs) 26 | *.dll 27 | *.so 28 | *.so.* 29 | *.dylib 30 | 31 | # Executables 32 | *.exe 33 | *.out 34 | *.app 35 | *.i*86 36 | *.x86_64 37 | *.hex 38 | 39 | # Debug files 40 | *.dSYM/ 41 | *.su 42 | *.idb 43 | *.pdb 44 | 45 | bin/ 46 | moocore.Rcheck/ 47 | testsuite/ 48 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | ci: 2 | autofix_prs: true 3 | autofix_commit_msg: | 4 | [pre-commit.ci] auto fixes from pre-commit.com hooks 5 | 6 | for more information, see https://pre-commit.ci 7 | 8 | repos: 9 | - repo: https://github.com/lorenzwalthert/precommit 10 | rev: v0.4.3.9008 11 | hooks: 12 | - id: parsable-R 13 | - id: no-browser-statement 14 | - id: no-print-statement 15 | - id: no-debug-statement 16 | - id: deps-in-desc 17 | args: [--root=r/] 18 | exclude: 'do.R' 19 | 20 | - repo: https://github.com/pre-commit/pre-commit-hooks 21 | rev: v5.0.0 22 | hooks: 23 | - id: trailing-whitespace 24 | - id: check-yaml 25 | - id: check-merge-conflict 26 | args: [ --assume-in-merge ] 27 | - id: check-added-large-files 28 | name: check-added-large-files (>200kb) 29 | args: ['--maxkb=200'] 30 | exclude: 'python/tests/test_data/expected_output/.*' 31 | - id: check-added-large-files 32 | name: check-added-large-files in tests (>999kb) 33 | args: ['--maxkb=999'] 34 | files: 'python/tests/test_data/expected_output/.*' 35 | - id: file-contents-sorter 36 | files: '\.Rbuildignore$|^bibkeys.txt$|WORDLIST' 37 | - id: end-of-file-fixer 38 | exclude: '(\.Rd|python/doc/source/reference/.*|test-doctest-.*)' 39 | 40 | - repo: https://github.com/tox-dev/tox-ini-fmt 41 | rev: "1.5.0" 42 | hooks: 43 | - id: tox-ini-fmt 44 | - repo: https://github.com/tox-dev/pyproject-fmt 45 | rev: "v2.5.1" 46 | hooks: 47 | - id: pyproject-fmt 48 | additional_dependencies: ["tox>=4.12.1"] 49 | - repo: https://github.com/astral-sh/ruff-pre-commit 50 | # Ruff version. 51 | rev: v0.11.2 52 | hooks: 53 | # Run the formatter. 54 | - id: ruff-format 55 | types_or: [ python, pyi, jupyter ] 56 | # Run the linter. 57 | - id: ruff 58 | args: [ --fix, --exit-non-zero-on-fix ] 59 | types_or: [ python, pyi, jupyter ] 60 | require_serial: true 61 | - repo: https://github.com/sphinx-contrib/sphinx-lint 62 | rev: v1.0.0 63 | hooks: 64 | - id: sphinx-lint 65 | -------------------------------------------------------------------------------- /DEVEL-README.md: -------------------------------------------------------------------------------- 1 | 2 | How to release 3 | ============== 4 | 5 | 1. Bump version number `PACKAGEVERSION` in `Makefile`. 6 | 7 | 1. `./release.py` 8 | 9 | 1. `git ci -a -m "Prepare to release v${PACKAGEVERSION}"` 10 | 11 | 1. R release (within `r/`): 12 | 13 | 1. `make releasecheck` 14 | 1. [Check reverse dependencies](https://github.com/multi-objective/moocore/actions/workflows/revdepcheck.yml) 15 | 1. Update `cran-comments.md` 16 | 1. Submit to CRAN: `make submit` 17 | 18 | 1. [Publish a release in github](https://github.com/multi-objective/moocore/releases/new) to automatically submit to PyPi. 19 | 20 | 1. `./release.py --dev NEW_VERSION` 21 | 22 | 1. `git ci -a -m "Start development of v{NEW_VERSION}"` 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PACKAGEVERSION=0.1.8 2 | 3 | .PHONY: default clean check test pre-commit 4 | 5 | default: test 6 | 7 | test check: 8 | $(MAKE) -C r/ check 9 | $(MAKE) -C python/ test 10 | $(MAKE) -C c/ test 11 | 12 | clean: 13 | $(MAKE) -C c/ clean 14 | $(MAKE) -C r/ clean 15 | $(MAKE) -C python/ clean 16 | rm -rf *.Rcheck/ 17 | 18 | pre-commit: 19 | pre-commit autoupdate 20 | pre-commit run -a 21 | 22 | 23 | closeversion: 24 | git push origin :refs/tags/v$(PACKAGEVERSION) # Remove any existing tag 25 | git tag -f -a v$(PACKAGEVERSION) -m "Version $(PACKAGEVERSION)" 26 | git push --tags 27 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | - [ ] Replace AVL code 2 | - [ ] Merge eaf README 3 | - [ ] Merge hypervolume/trunk README 4 | - [ ] Merge hypervolume/Makefile README 5 | - [ ] Document logHYP: T. Friedrich, K. Bringmann, T. Voß, C. Igel. The Logarithmic Hypervolume Indicator. 11th ACM Foundations of Genetic Algorithms (FOGA), pages 81-92, 2009. https://hpi.de/friedrich/publications/all/Document/puma-friedrich/FriedrichBVI11.pdf/a839ddd47688611dcf92512ca010daf3.html?tx_extbibsonomycsl_publicationlist%5Baction%5D=view&tx_extbibsonomycsl_publicationlist%5Bpreview%5D=large&cHash=7cabd8bc9699c0ab3f88886a3f62976f 6 | - [ ] Document relation between hypervolume and attainment function. 7 | - [ ] R2 indicator: [Reinvestigating the R2 Indicator: Achieving Pareto Compliance by Integration](https://link.springer.com/chapter/10.1007/978-3-031-70085-9_13) [Brockhoff, D., Wagner, T., Trautmann, H.: On the properties of the R2 indicator. In: Proceedings of the 14th Annual Conference on Genetic and Evolutionary 8 | Computation, pp. 465–472 (2012)] 9 | - [ ] Convert code from https://github.com/fieldsend/multiobjective_data_structures 10 | - [X] pre-commit for R: https://lorenzwalthert.github.io/precommit/articles/ci.html 11 | - [X] create moocore R package 12 | - [X] create moocore Python package 13 | - [X] Setup github actions 14 | - [X] Rewrite regtest.pl in python. 15 | - [X] move eaftools/eaf code here. 16 | - [x] move hypervolume/trunk code here. 17 | - [x] move hypervolume testsuite here. 18 | -------------------------------------------------------------------------------- /TODO-hv: -------------------------------------------------------------------------------- 1 | ## Hypervolume 2 | 3 | Klee's Measure Problem Made Easy: O(n^{d/3} polylog n) for d >= 3. 4 | https://ieeexplore.ieee.org/document/6686177 5 | 6 | https://github.com/esa/pagmo2/tree/master/src/utils/hv_algos 7 | https://github.com/esa/pagmo/tree/master/src/util/hv_algorithm 8 | 9 | Version 2.0rc2 has some improvements from Andreia to the recursive hypervolume that are still not part of moocore: https://lopez-ibanez.eu/hypervolume#download 10 | 11 | ## Hypervolume contributions: 12 | 13 | https://hpi.de/en/friedrich/research/the-hypervolume-indicator.html 14 | 15 | https://github.com/apguerreiro/HVC 16 | 17 | https://github.com/adbjesus/mooutils/blob/main/include/mooutils/indicators.hpp 18 | 19 | ## Other 20 | 21 | a) Issue error if number is infinite or NaN 22 | x = strtod(str, &endp); 23 | if(!isnormal(x)) error("x is infinite or NaN"); 24 | 25 | b) check overflow/underflow: strtod() returns plus or minus HUGE_VAL 26 | (HUGE_VALF, HUGE_VALL) is returned (according to the sign of the 27 | value), and ERANGE is stored in errno. If the correct value would 28 | cause underflow, zero is returned and ERANGE is stored in errno. 29 | 30 | errno = 0; 31 | x = strtod(str, &endp); 32 | if(errno != 0) { error("Overflow/underflow"); 33 | 34 | 6) Sort array in memory by 3rd (or 2nd) dimension, depending on 35 | special case used. This may (or may not) improve performance 36 | significantly for large sets, by speeding up the special case. 37 | 38 | 7) verify the handling of repeated coordinate values and of dominated 39 | points, to avoid performing more math operations than actually 40 | required in these cases. 41 | -------------------------------------------------------------------------------- /bibkeys.txt: -------------------------------------------------------------------------------- 1 | AugBadBroZit2009gecco 2 | BeuFonLopPaqVah09:tec 3 | BezLopStu2017assessment 4 | BezLopStu2017emo 5 | BinGinRou2015gaupar 6 | CheGinBecMol2013moda 7 | ChiarandiniPhD 8 | CoeSie2004igd 9 | Deb02nsga2 10 | DenZha2019approxhv 11 | DiaLop2020ejor 12 | DubLopStu2011amai 13 | FonGueLopPaq2011emo 14 | FonPaqLop06:hypervolume 15 | GruFon2009:emaa 16 | Grunert01 17 | GueFon2017hv4d 18 | HerWer1987tabucol 19 | IshMasTanNoj2015igd 20 | Jen03 21 | JohAraMcGSch1991 22 | KnoCor2002cec 23 | KunLucPre1975jacm 24 | LopPaqStu09emaa 25 | LopVerDreDoe2025 26 | LopezIbanezPhD 27 | Molchanov2005theory 28 | SchEsqLarCoe2012tec 29 | VelLam1998gp 30 | ZhoZhaJin2009igdx 31 | ZitThi1998ppsn 32 | ZitThiLauFon2003:tec 33 | -------------------------------------------------------------------------------- /c/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | autom4te.cache/ 3 | config.log 4 | config.status 5 | dominatedsets 6 | eaf 7 | epsilon 8 | igd 9 | ndsort 10 | nondominated 11 | Makevars 12 | Makevars.win 13 | git_version 14 | Hypervolume_MEX.mex 15 | -------------------------------------------------------------------------------- /c/NEWS.md: -------------------------------------------------------------------------------- 1 | # moocore C library 2 | 3 | ## 0.16.5 4 | 5 | * The base case of the recursive HV algorithm is HV4D+. 6 | * Remove the `--shift` option of `hv` executable. 7 | 8 | ## 0.16.4 9 | 10 | * Implementation of HV4D+ algorithm. 11 | 12 | ## 0.16.3 13 | 14 | * Implementation of HV3D+ algorithm. 15 | 16 | ## 0.16.2 17 | 18 | * Faster reading of datasets. 19 | -------------------------------------------------------------------------------- /c/cmdline.c: -------------------------------------------------------------------------------- 1 | #include // If GNU libc, this includes feature.h and defines __USE_GNU 2 | #include "common.h" 3 | #ifdef __USE_GNU // Defined if _GNU_SOURCE is defined and GNU libc is used. 4 | extern char *program_invocation_short_name; 5 | #else 6 | char *program_invocation_short_name; 7 | #endif 8 | 9 | /* Do not inline it so we can set a breakpoint when debugging. */ 10 | void fatal_error(const char *format,...) 11 | { 12 | va_list ap; 13 | fprintf(stderr, "%s: fatal error: ", program_invocation_short_name); 14 | va_start(ap,format); 15 | vfprintf(stderr, format, ap); 16 | va_end(ap); 17 | fprintf(stderr, "\n"); 18 | exit(EXIT_FAILURE); 19 | } 20 | 21 | void errprintf(const char *format,...) 22 | { 23 | va_list ap; 24 | fprintf(stderr, "%s: error: ", program_invocation_short_name); 25 | va_start(ap,format); 26 | vfprintf(stderr, format, ap); 27 | va_end(ap); 28 | fprintf(stderr, "\n"); 29 | } 30 | 31 | void warnprintf(const char *format,...) 32 | { 33 | va_list ap; 34 | fprintf(stderr, "%s: warning: ", program_invocation_short_name); 35 | va_start(ap,format); 36 | vfprintf(stderr, format, ap); 37 | va_end(ap); 38 | fprintf(stderr, "\n"); 39 | } 40 | -------------------------------------------------------------------------------- /c/config.h: -------------------------------------------------------------------------------- 1 | #ifndef _MOOCORE_CONFIG_H_ 2 | #define _MOOCORE_CONFIG_H_ 3 | 4 | #if (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)) \ 5 | && !defined(_UCRT) && !defined(__USE_MINGW_ANSI_STDIO) 6 | #if !defined(__cplusplus) && !defined(_ISOC99_SOURCE) 7 | #define _ISOC99_SOURCE 8 | #endif 9 | #define __USE_MINGW_ANSI_STDIO 1 10 | #endif 11 | 12 | #endif // _MOOCORE_CONFIG_H_ 13 | -------------------------------------------------------------------------------- /c/eafdiff.c: -------------------------------------------------------------------------------- 1 | #include "eaf.h" 2 | 3 | /* FIXME: Rmoocore.R contains another version of this function. */ 4 | /* FIXME: data cannot be const because eaf_compute_rectangles will sort it. */ 5 | double * 6 | eafdiff_compute_rectangles(int *eaf_npoints, double * data, int nobj, 7 | const int *cumsizes, int nruns, int intervals) 8 | { 9 | /* This returns all levels. attsurf() should probably handle this case. */ 10 | int * level = levels_from_percentiles(NULL, nruns, nruns); 11 | eaf_t **eaf = attsurf(data, nobj, cumsizes, nruns, level, nruns); 12 | free (level); 13 | 14 | eaf_polygon_t * rects = eaf_compute_rectangles(eaf, nobj, nruns); 15 | eaf_free(eaf, nruns); 16 | 17 | const int division = nruns / 2; 18 | int nrow = (int) vector_int_size(&rects->col); 19 | // Two points per row + color 20 | const int ncol = 2 * nobj + 1; 21 | double *result = malloc(sizeof(double) * nrow * ncol); 22 | const double * p_xy = vector_objective_begin(&rects->xy); 23 | for (int k = 0; k < nrow; k++) { 24 | for (int i = 0; i < ncol - 1; i++, p_xy++) 25 | result[k * ncol + i] = *p_xy; 26 | double color = vector_int_at(&rects->col, k); 27 | // Each color is within [0, nruns / 2] or [-nruns / 2, 0] 28 | result[k * ncol + ncol - 1] = intervals * color / (double) division; 29 | } 30 | vector_objective_dtor (&rects->xy); 31 | vector_int_dtor (&rects->col); 32 | free(rects); 33 | *eaf_npoints = nrow; 34 | // FIXME: This may return duplicated rows, remove them. 35 | return result; 36 | } 37 | 38 | double * 39 | eafdiff_compute_matrix(int *eaf_npoints, double * data, int nobj, 40 | const int *cumsizes, int nruns, int intervals) 41 | { 42 | // FIXME: This assumes that half of the runs come from each side of the 43 | // difference but we could make this a parameter. 44 | const int nsets1 = nruns / 2; 45 | /* This returns all levels. attsurf() should probably handle this case. */ 46 | int * level = levels_from_percentiles(NULL, nruns, nruns); 47 | eaf_t **eaf = attsurf(data, nobj, cumsizes, nruns, level, nruns); 48 | free (level); 49 | 50 | const int nrow = eaf_totalpoints (eaf, nruns); 51 | const int ncol = nobj + 1; 52 | double *result = malloc(sizeof(double) * nrow * ncol); 53 | int pos = 0; 54 | for (int k = 0; k < nruns; k++) { 55 | int npoints = (int) eaf[k]->size; 56 | // FIXME: Find the most efficient order of the loop. 57 | for (int i = 0; i < npoints; i++) { 58 | for (int j = 0; j < nobj; j++) { 59 | result[pos * ncol + j] = eaf[k]->data[i * nobj + j]; 60 | } 61 | result[pos * ncol + nobj] = eafdiff_percentile(eaf[k], i, nsets1, nruns, intervals); 62 | pos++; 63 | } 64 | } 65 | eaf_free(eaf, nruns); 66 | *eaf_npoints = nrow; 67 | return result; 68 | } 69 | -------------------------------------------------------------------------------- /c/gcc.mk: -------------------------------------------------------------------------------- 1 | # -*- Makefile-gmake -*- 2 | WERROR= 3 | ifdef WERROR 4 | WERROR_FLAG:=-Werror 5 | endif 6 | WARN_CFLAGS = -pedantic -Wall -Wextra -Wvla -Wconversion -Wno-sign-conversion -Wstrict-prototypes -Wundef $(WERROR_FLAG) 7 | ifeq ($(DEBUG), 0) 8 | SANITIZERS ?= 9 | OPT_CFLAGS ?= -DNDEBUG -O3 -flto 10 | # Options -funroll-loops -ffast-math -msse -mfpmath=sse improve performance but are not portable. 11 | # Options -fstandard-precision=fast -ftree-vectorize are not well supported 12 | # in some versions/architectures. 13 | else 14 | SANITIZERS ?= -fsanitize=undefined -fsanitize=address -fsanitize=float-cast-overflow -fsanitize=float-divide-by-zero 15 | OPT_CFLAGS ?= -g3 -O0 16 | endif 17 | 18 | ifdef march 19 | MARCH=$(march) 20 | endif 21 | ifndef MARCH 22 | MARCH=native 23 | endif 24 | ifneq ($(MARCH),none) 25 | MARCH_FLAGS = -march=$(MARCH) 26 | gcc-guess-march = $(strip $(shell $(CC) $(CFLAGS) $(OPT_CFLAGS) $(MARCH_FLAGS) -x c -S -\#\#\# - < /dev/null 2>&1 | \ 27 | grep -m 1 -e cc1 | grep -o -e "march=[^'\"]*" | head -n 1 | sed 's,march=,,')) 28 | ifeq ($(gcc-guess-march),) 29 | gcc-guess-march=unknown 30 | endif 31 | endif 32 | -------------------------------------------------------------------------------- /c/gitversion.mk: -------------------------------------------------------------------------------- 1 | ## Do we have git? 2 | ifeq ($(shell sh -c 'which git 1> /dev/null 2>&1 && echo y'),y) 3 | ## Is this a working copy? 4 | ifeq ($(shell sh -c "LC_ALL=C git rev-parse --is-inside-work-tree 2>&1 | grep -F true"),true) 5 | $(shell sh -c "git describe --dirty --first-parent --always --exclude '*' > git_version") 6 | endif 7 | endif 8 | ## Set version information: 9 | REVISION=.$(shell sh -c 'cat git_version 2> /dev/null') 10 | -------------------------------------------------------------------------------- /c/hv.h: -------------------------------------------------------------------------------- 1 | /************************************************************************* 2 | 3 | hv.h 4 | 5 | --------------------------------------------------------------------- 6 | 7 | Copyright (c) 2005, 2006 8 | Carlos M. Fonseca 9 | Manuel Lopez-Ibanez 10 | Luis Paquete 11 | 12 | This program is free software (software libre); you can redistribute 13 | it and/or modify it under the terms of the GNU General Public License 14 | as published by the Free Software Foundation; either version 2 of the 15 | License, or (at your option) any later version. 16 | 17 | This program is distributed in the hope that it will be useful, but 18 | WITHOUT ANY WARRANTY; without even the implied warranty of 19 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 | General Public License for more details. 21 | 22 | You should have received a copy of the GNU General Public License 23 | along with this program; if not, you can obtain a copy of the GNU 24 | General Public License at: 25 | http://www.gnu.org/copyleft/gpl.html 26 | or by writing to: 27 | Free Software Foundation, Inc., 59 Temple Place, 28 | Suite 330, Boston, MA 02111-1307 USA 29 | 30 | ---------------------------------------------------------------------- 31 | 32 | 33 | *************************************************************************/ 34 | #ifndef HV_H_ 35 | #define HV_H_ 36 | 37 | #ifdef __cplusplus 38 | extern "C" { 39 | #endif 40 | 41 | double fpli_hv(const double *data, int d, int n, const double *ref); 42 | void hv_contributions (double *hvc, double *points, int dim, int size, const double * ref); 43 | #ifdef __cplusplus 44 | } 45 | #endif 46 | 47 | #endif 48 | -------------------------------------------------------------------------------- /c/hv_contrib.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "common.h" 6 | #include "hv.h" 7 | 8 | /* Given a list of points, compute the hypervolume of each set that can be 9 | obtained by removing just one point. 10 | 11 | It does not actually compute the contribution but HV_total - HV_i, 12 | where HV_total is the total HV and HV_i is the contribution of the 13 | point, that is, it actually computes the HV minus the point i. 14 | */ 15 | static double * 16 | hv_1point_diffs (double *hvc, double *points, int dim, int size, const double * ref, 17 | const bool * uev) 18 | { 19 | bool keep_uevs = uev != NULL; 20 | if (hvc == NULL) 21 | hvc = MOOCORE_MALLOC(size, double); 22 | 23 | double * tmp = MOOCORE_MALLOC(dim, double); 24 | for (int i = 0; i < size; i++) { 25 | memcpy (tmp, points + i * dim, sizeof(double) * dim); 26 | memcpy (points + i * dim, ref, sizeof(double) * dim); 27 | hvc[i] = (keep_uevs && uev[i]) 28 | ? 0.0 29 | : fpli_hv(points, dim, size, ref); 30 | memcpy (points + i * dim, tmp, sizeof(double) * dim); 31 | } 32 | free(tmp); 33 | return hvc; 34 | } 35 | 36 | void 37 | hv_contributions (double *hvc, double *points, int dim, int size, const double * ref) 38 | { 39 | const double tolerance = sqrt(DBL_EPSILON); 40 | double hv_total = fpli_hv(points, dim, size, ref); 41 | hv_1point_diffs(hvc, points, dim, size, ref, NULL); 42 | for (int i = 0; i < size; i++) { 43 | hvc[i] = hv_total - hvc[i]; 44 | // Handle very small values. 45 | hvc[i] = (fabs(hvc[i]) >= tolerance) ? hvc[i] : 0.0; 46 | assert(hvc[i] >= 0); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /c/libhv.mk: -------------------------------------------------------------------------------- 1 | # -*- Makefile-gmake -*- 2 | LIBHV_SRCS = hv.c hv3dplus.c hv4d.c 3 | LIBHV_HDRS = hv.h hv_priv.h 4 | LIBHV_OBJS = $(LIBHV_SRCS:.c=.o) 5 | HV_LIB = fpli_hv.a 6 | 7 | $(HV_LIB): $(LIBHV_OBJS) libutil.o 8 | @$(RM) $@ 9 | $(QUIET_AR)$(AR) rcs $@ $^ 10 | 11 | ## Dependencies: 12 | $(LIBHV_OBJS): $(LIBHV_HDRS) 13 | -------------------------------------------------------------------------------- /c/libutil.c: -------------------------------------------------------------------------------- 1 | /* Functions that behave differently for library code and command-line code. */ 2 | 3 | #include "common.h" 4 | 5 | #ifndef R_PACKAGE 6 | void fatal_error(const char *format,...) 7 | { 8 | va_list ap; 9 | va_start(ap, format); 10 | vfprintf(stderr, format, ap); 11 | va_end(ap); 12 | exit(EXIT_FAILURE); 13 | } 14 | 15 | void errprintf(const char *format,...) 16 | { 17 | va_list ap; 18 | fprintf(stderr, "error: "); 19 | va_start(ap,format); 20 | vfprintf(stderr, format, ap); 21 | va_end(ap); 22 | fprintf(stderr, "\n"); 23 | } 24 | 25 | void warnprintf(const char *format,...) 26 | { 27 | va_list ap; 28 | fprintf(stderr, "warning: "); 29 | va_start(ap,format); 30 | vfprintf(stderr, format, ap); 31 | va_end(ap); 32 | fprintf(stderr, "\n"); 33 | } 34 | 35 | #endif // R_PACKAGE 36 | -------------------------------------------------------------------------------- /c/mt19937/LICENSE.md: -------------------------------------------------------------------------------- 1 | # MT19937 2 | 3 | Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) 4 | 5 | The rk_random and rk_seed functions algorithms and the original design of 6 | the Mersenne Twister RNG: 7 | 8 | Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, 9 | All rights reserved. 10 | 11 | Redistribution and use in source and binary forms, with or without 12 | modification, are permitted provided that the following conditions 13 | are met: 14 | 15 | 1. Redistributions of source code must retain the above copyright 16 | notice, this list of conditions and the following disclaimer. 17 | 18 | 2. Redistributions in binary form must reproduce the above copyright 19 | notice, this list of conditions and the following disclaimer in the 20 | documentation and/or other materials provided with the distribution. 21 | 22 | 3. The names of its contributors may not be used to endorse or promote 23 | products derived from this software without specific prior written 24 | permission. 25 | 26 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 27 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 28 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 29 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 30 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 | 38 | Original algorithm for the implementation of rk_interval function from 39 | Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by 40 | Magnus Jonsson. 41 | 42 | Constants used in the rk_double implementation by Isaku Wada. 43 | 44 | Permission is hereby granted, free of charge, to any person obtaining a 45 | copy of this software and associated documentation files (the 46 | "Software"), to deal in the Software without restriction, including 47 | without limitation the rights to use, copy, modify, merge, publish, 48 | distribute, sublicense, and/or sell copies of the Software, and to 49 | permit persons to whom the Software is furnished to do so, subject to 50 | the following conditions: 51 | 52 | The above copyright notice and this permission notice shall be included 53 | in all copies or substantial portions of the Software. 54 | 55 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 56 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 57 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 58 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 59 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 60 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 61 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 62 | -------------------------------------------------------------------------------- /c/mt19937/mt19937.h: -------------------------------------------------------------------------------- 1 | /* From https://github.com/numpy/numpy/blob/467be385f57cfdf373ba076836054fee3a8218ca/numpy/random/src/mt19937/mt19937.h 2 | */ 3 | #pragma once 4 | #include 5 | 6 | #if defined(_WIN32) && !defined (__MINGW32__) 7 | #define inline __forceinline 8 | #endif 9 | 10 | #define RK_STATE_LEN 624 11 | 12 | #define N 624 13 | #define M 397 14 | #define MATRIX_A 0x9908b0dfUL 15 | #define UPPER_MASK 0x80000000UL 16 | #define LOWER_MASK 0x7fffffffUL 17 | 18 | typedef struct s_mt19937_state { 19 | uint32_t key[RK_STATE_LEN]; 20 | int pos; 21 | } mt19937_state; 22 | 23 | extern void mt19937_seed(mt19937_state *state, uint32_t seed); 24 | 25 | extern void mt19937_gen(mt19937_state *state); 26 | 27 | /* Slightly optimized reference implementation of the Mersenne Twister */ 28 | static inline uint32_t mt19937_next(mt19937_state *state) { 29 | uint32_t y; 30 | 31 | if (state->pos == RK_STATE_LEN) { 32 | // Move to function to help inlining 33 | mt19937_gen(state); 34 | } 35 | y = state->key[state->pos++]; 36 | 37 | /* Tempering */ 38 | y ^= (y >> 11); 39 | y ^= (y << 7) & 0x9d2c5680UL; 40 | y ^= (y << 15) & 0xefc60000UL; 41 | y ^= (y >> 18); 42 | 43 | return y; 44 | } 45 | 46 | extern void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key, 47 | int key_length); 48 | 49 | static inline uint64_t mt19937_next64(mt19937_state *state) { 50 | return (uint64_t)mt19937_next(state) << 32 | mt19937_next(state); 51 | } 52 | 53 | static inline uint32_t mt19937_next32(mt19937_state *state) { 54 | return mt19937_next(state); 55 | } 56 | 57 | static inline double mt19937_next_double(mt19937_state *state) { 58 | int32_t a = mt19937_next(state) >> 5, b = mt19937_next(state) >> 6; 59 | return (a * 67108864.0 + b) / 9007199254740992.0; 60 | } 61 | 62 | void mt19937_jump(mt19937_state *state); 63 | -------------------------------------------------------------------------------- /c/rng.h: -------------------------------------------------------------------------------- 1 | #include "mt19937/mt19937.h" 2 | 3 | typedef mt19937_state rng_state; 4 | 5 | #include "common.h" 6 | 7 | static inline rng_state * 8 | rng_new(uint32_t seed) 9 | { 10 | rng_state * rng = malloc(sizeof(rng_state)); 11 | mt19937_seed(rng, seed); 12 | return rng; 13 | } 14 | 15 | static inline void 16 | rng_free(rng_state * rng) 17 | { 18 | free(rng); 19 | } 20 | 21 | /* Returns a value between [0, 1) */ 22 | static inline double 23 | rng_random(rng_state * rng) 24 | { 25 | return mt19937_next_double(rng); 26 | } 27 | 28 | static inline double 29 | rng_uniform(rng_state * rng, double low, double high) 30 | { 31 | assert(rng != NULL); 32 | if (low >= high) 33 | return low; 34 | return low + (high - low) * rng_random(rng); 35 | } 36 | 37 | 38 | double rng_standard_normal(rng_state *rng); 39 | void rng_bivariate_normal_fill(rng_state * rng, 40 | double mu1, double mu2, 41 | double sigma1, double sigma2, double rho, 42 | double *out, int n); 43 | -------------------------------------------------------------------------------- /c/sort.h: -------------------------------------------------------------------------------- 1 | #ifndef SORT_H_ 2 | # define SORT_H_ 3 | 4 | #include "common.h" 5 | 6 | // ---------- Relational functions (return bool) ----------------------------- 7 | 8 | 9 | /* 10 | x < y, i.e., x is strictly lower than y in all dimensions. Assumes minimization. 11 | */ 12 | 13 | static inline bool 14 | strongly_dominates(const double * restrict x, const double * restrict y, dimension_t dim) 15 | { 16 | ASSUME(dim >= 2); 17 | for (dimension_t d = 0; d < dim; d++) 18 | if (x[d] >= y[d]) 19 | return false; 20 | return true; 21 | } 22 | 23 | static inline bool 24 | weakly_dominates(const double * restrict x, const double * restrict y, const dimension_t dim) 25 | { 26 | ASSUME(dim >= 2); 27 | /* The code below is a vectorized version of this code: 28 | for (dimension_t d = 0; d < dim; d++) 29 | if (x[d] > y[d]) 30 | return false; 31 | return true; 32 | */ 33 | // GCC 15 is not yet able to infer this from attribute ASSUME(). 34 | bool dominated = (x[0] <= y[0]) & (x[1] <= y[1]); 35 | for (dimension_t d = 2; d < dim; d++) 36 | dominated &= (x[d] <= y[d]); 37 | return dominated; 38 | } 39 | 40 | static inline bool 41 | lexicographic_less_3d(const double * restrict a, const double * restrict b) 42 | { 43 | return a[2] < b[2] || (a[2] == b[2] && (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0]))); 44 | } 45 | 46 | // ---------- Comparison functions (e.g, qsort). Return 'int' ---------------- 47 | 48 | // General type for comparison functions used in qsort(). 49 | typedef int (*cmp_fun_t)(const void *, const void *); 50 | 51 | static inline int 52 | cmp_double_asc_rev(const void * restrict p1, const void * restrict p2, dimension_t dim) 53 | { 54 | const double *x1 = *((const double **)p1); 55 | const double *x2 = *((const double **)p2); 56 | for (int i = dim - 1; i >= 0; i--) { 57 | if (x1[i] < x2[i]) 58 | return -1; 59 | if (x1[i] > x2[i]) 60 | return 1; 61 | } 62 | return 0; 63 | } 64 | 65 | static inline int 66 | cmp_double_asc_rev_3d(const void * restrict p1, const void * restrict p2) 67 | { 68 | return cmp_double_asc_rev(p1, p2, 3); 69 | } 70 | 71 | static inline int 72 | cmp_double_asc_rev_4d(const void * restrict p1, const void * restrict p2) 73 | { 74 | return cmp_double_asc_rev(p1, p2, 4); 75 | } 76 | 77 | static inline int 78 | cmp_double_asc_only_3d(const void * restrict p1, const void * restrict p2) 79 | { 80 | const double x1 = *(*(const double **)p1 + 2); 81 | const double x2 = *(*(const double **)p2 + 2); 82 | return (x1 < x2) ? -1 : (x1 > x2 ? 1 : 0); 83 | } 84 | 85 | static inline int 86 | cmp_double_asc_only_4d(const void * restrict p1, const void * restrict p2) 87 | { 88 | const double x1 = *(*(const double **)p1 + 3); 89 | const double x2 = *(*(const double **)p2 + 3); 90 | return (x1 < x2) ? -1 : (x1 > x2 ? 1 : 0); 91 | } 92 | 93 | static inline int 94 | cmp_double_asc_y_des_x(const void * restrict p1, const void * restrict p2) 95 | { 96 | const double x1 = *(const double *)p1; 97 | const double x2 = *(const double *)p2; 98 | const double y1 = *((const double *)p1+1); 99 | const double y2 = *((const double *)p2+1); 100 | return (y1 < y2) ? -1: ((y1 > y2) ? 1 : (x1 > x2 ? -1 : 1)); 101 | } 102 | 103 | static inline int 104 | cmp_doublep_x_asc_y_asc(const void * restrict p1, const void * restrict p2) 105 | { 106 | const double x1 = **(const double **)p1; 107 | const double x2 = **(const double **)p2; 108 | const double y1 = *(*(const double **)p1 + 1); 109 | const double y2 = *(*(const double **)p2 + 1); 110 | return (x1 < x2) ? -1 : ((x1 > x2) ? 1 : 111 | ((y1 < y2) ? -1 : ((y1 > y2) ? 1 : 0))); 112 | } 113 | 114 | 115 | #endif /* !SORT_H_ */ 116 | -------------------------------------------------------------------------------- /c/svnversion.mk: -------------------------------------------------------------------------------- 1 | ## Do we have svnversion? 2 | ifeq ($(shell sh -c 'which svnversion 1> /dev/null 2>&1 && echo y'),y) 3 | ## Is this a working copy? 4 | ifeq ($(shell sh -c 'LC_ALL=C svnversion -n . | grep -q ^[0-9] && echo y'),y) 5 | $(shell sh -c 'svnversion -n . > svn_version') 6 | endif 7 | endif 8 | ## Set version information: 9 | REVISION = $(shell sh -c 'cat svn_version 2> /dev/null') 10 | -------------------------------------------------------------------------------- /c/timer.h: -------------------------------------------------------------------------------- 1 | /************************************************************************* 2 | 3 | Simple timer functions. 4 | 5 | --------------------------------------------------------------------- 6 | 7 | Copyright (c) 2005, 2006, 2007 Manuel Lopez-Ibanez 8 | TeX: \copyright 2005, 2006, 2007 Manuel L{\'o}pez-Ib{\'a}{\~n}ez 9 | 10 | This program is free software (software libre); you can redistribute 11 | it and/or modify it under the terms of the GNU General Public License 12 | as published by the Free Software Foundation; either version 2 of the 13 | License, or (at your option) any later version. 14 | 15 | This program is distributed in the hope that it will be useful, but 16 | WITHOUT ANY WARRANTY; without even the implied warranty of 17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 | General Public License for more details. 19 | 20 | You should have received a copy of the GNU General Public License 21 | along with this program; if not, you can obtain a copy of the GNU 22 | General Public License at: 23 | http://www.gnu.org/copyleft/gpl.html 24 | or by writing to: 25 | Free Software Foundation, Inc., 59 Temple Place, 26 | Suite 330, Boston, MA 02111-1307 USA 27 | 28 | ---------------------------------------------------------------------- 29 | *************************************************************************/ 30 | #ifndef TIMER_H_ 31 | #define TIMER_H_ 32 | 33 | typedef enum type_timer {REAL_TIME, VIRTUAL_TIME} TIMER_TYPE; 34 | void Timer_start(void); 35 | double Timer_elapsed_virtual(void); 36 | double Timer_elapsed_real(void); 37 | double Timer_elapsed(TIMER_TYPE type); 38 | void Timer_stop(void); 39 | void Timer_continue(void); 40 | 41 | #endif // TIMER_H_ 42 | -------------------------------------------------------------------------------- /c/whv.h: -------------------------------------------------------------------------------- 1 | #ifndef WHV_H 2 | #define WHV_H 3 | #include 4 | double rect_weighted_hv2d(double *data, int n, double * rectangles, int rectangles_nrow, const double *reference); 5 | #endif // WHV_H 6 | -------------------------------------------------------------------------------- /c/whv_hype.h: -------------------------------------------------------------------------------- 1 | #ifndef WHV_HYPE_H 2 | #define WHV_HYPE_H 3 | #include 4 | 5 | double whv_hype_unif(const double *points, int npoints, 6 | const double *ideal, const double *ref, 7 | int nsamples, uint32_t seed); 8 | double whv_hype_expo(const double *points, int npoints, 9 | const double *ideal, const double *ref, 10 | int nsamples, uint32_t seed, double mu); 11 | double whv_hype_gaus(const double *points, int npoints, 12 | const double *ideal, const double *ref, 13 | int nsamples, uint32_t seed, const double *mu); 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | fixes: 2 | - "python/src/moocore/libmoocore/::c/" # Python package symlink 3 | - "src/moocore/libmoocore/::c/" # Python package symlink 4 | - "r/src/libmoocore/::c/" # R package symlink 5 | - "src/libmoocore/::c/" # R package symlink 6 | - ".tox/cov*/lib/*/site-packages/::python/src/" 7 | 8 | comment: 9 | layout: "header, diff, flags, components" # show component info in the PR comment 10 | 11 | flag_management: 12 | default_rules: # the rules that will be followed for any flag added, generally 13 | carryforward: true 14 | 15 | component_management: 16 | default_rules: # default rules that will be inherited by all components 17 | statuses: 18 | - type: project # in this case every component that doesn't have a status defined will have a project type one 19 | target: auto 20 | branches: 21 | - "!main" 22 | individual_components: 23 | - component_id: tests # this is an identifier that should not be changed 24 | name: tests # this is a display name, and can be changed freely 25 | paths: 26 | - python/tests/** 27 | - python/examples/** 28 | statuses: 29 | - type: project 30 | target: 100% 31 | informational: false 32 | - type: patch 33 | 34 | - component_id: src 35 | name: src 36 | paths: 37 | - python/src/** 38 | statuses: 39 | - type: project 40 | target: auto 41 | threshold: 1% 42 | informational: true 43 | - type: patch 44 | -------------------------------------------------------------------------------- /python/.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | 4 | # Explicitly declare text files you want to always be normalized and converted 5 | # to native line endings on checkout. 6 | *.c text 7 | *.h text 8 | *.md text 9 | *.py text 10 | -------------------------------------------------------------------------------- /python/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | #R related aritfacts 10 | .RData 11 | .Rhistory 12 | 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | cover/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | db.sqlite3 67 | db.sqlite3-journal 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | doc/_build/ 78 | doc/jupyter_execute/ 79 | doc/source/reference/generated/ 80 | doc/source/auto_examples 81 | doc/source/sg_execution_times.rst 82 | 83 | # PyBuilder 84 | .pybuilder/ 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | # For a library or package, you might want to ignore these files since the code is 96 | # intended to run in multiple environments; otherwise, check them in: 97 | # .python-version 98 | 99 | # pipenv 100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 103 | # install all needed dependencies. 104 | #Pipfile.lock 105 | 106 | # poetry 107 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 108 | # This is especially recommended for binary packages to ensure reproducibility, and is more 109 | # commonly ignored for libraries. 110 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 111 | #poetry.lock 112 | 113 | # pdm 114 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 115 | #pdm.lock 116 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 117 | # in version control. 118 | # https://pdm.fming.dev/#use-with-ide 119 | .pdm.toml 120 | 121 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 122 | __pypackages__/ 123 | 124 | # Celery stuff 125 | celerybeat-schedule 126 | celerybeat.pid 127 | 128 | # SageMath parsed files 129 | *.sage.py 130 | 131 | # Environments 132 | .env 133 | .venv 134 | env/ 135 | venv/ 136 | ENV/ 137 | env.bak/ 138 | venv.bak/ 139 | 140 | # Spyder project settings 141 | .spyderproject 142 | .spyproject 143 | 144 | # Rope project settings 145 | .ropeproject 146 | 147 | # mkdocs documentation 148 | /site 149 | 150 | # mypy 151 | .mypy_cache/ 152 | .dmypy.json 153 | dmypy.json 154 | 155 | # Pyre type checker 156 | .pyre/ 157 | 158 | # pytype static type analyzer 159 | .pytype/ 160 | 161 | # Cython debug symbols 162 | cython_debug/ 163 | 164 | # PyCharm 165 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 166 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 167 | # and can be added to the global gitignore or merged into this file. For a more nuclear 168 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 169 | #.idea/ 170 | -------------------------------------------------------------------------------- /python/Makefile: -------------------------------------------------------------------------------- 1 | # This file should be almost identical to 2 | # https://github.com/multi-objective/mooplot/blob/main/python/Makefile 3 | .PHONY : install dev-install build test doc fastdoc clean docdeps pre-commit 4 | 5 | install: build 6 | python3 -m pip install . --disable-pip-version-check 7 | 8 | dev-install: build 9 | python3 -m pip install -e . --disable-pip-version-check 10 | 11 | build: 12 | python3 -m build 13 | 14 | test: 15 | $(MAKE) -C src/moocore/libmoocore/ clean 16 | tox 17 | 18 | pre-commit: 19 | pre-commit autoupdate 20 | pre-commit run -a 21 | 22 | docdeps: 23 | python3 -m pip install -r requirements_dev.txt --disable-pip-version-check --quiet 24 | 25 | show: 26 | $(MAKE) -C doc show 27 | 28 | doc: 29 | $(MAKE) -C doc clean html 30 | 31 | fastdoc: 32 | $(MAKE) -C doc clean html-noplot 33 | 34 | clean: 35 | $(MAKE) -C doc clean 36 | $(MAKE) -C src/moocore/libmoocore/ clean 37 | find . -name '__pycache__' | xargs $(RM) -r 38 | $(RM) -r .pytest_cache .tox .ruff_cache build src/*.egg-info/ doc/source/auto_examples 39 | $(RM) .coverage coverage.xml c_coverage.xml dist/* src/moocore/*.so 40 | -------------------------------------------------------------------------------- /python/README.md: -------------------------------------------------------------------------------- 1 | **moocore**: Core Algorithms for Multi-Objective Optimization 2 | ============================================================= 3 | 4 | 5 | [![PyPI - Version](https://img.shields.io/pypi/v/moocore)][py-moocore-pypi] 6 | [![PyPI - Downloads](https://img.shields.io/pypi/dm/moocore?color=blue)][py-moocore-pypi] 7 | [![Python build status][py-build-badge]][py-build-link] 8 | [![coverage][py-coverage-badge]][py-coverage-link] 9 | 10 | 11 | [ [**Homepage**][py-moocore-homepage] ] 12 | [ [**GitHub**][py-moocore-github] ] 13 | 14 | 15 | **Contributors:** 16 | [Manuel López-Ibáñez](https://lopez-ibanez.eu), 17 | Fergus Rooney. 18 | 19 | --------------------------------------- 20 | 21 | Introduction 22 | ============ 23 | 24 | The goal of **moocore** is to collect fast implementations of core mathematical functions and algorithms for multi-objective optimization. These functions include: 25 | 26 | * Identifying and filtering dominated vectors. 27 | * Quality metrics such as (weighted) hypervolume, epsilon, IGD, etc. 28 | * Computation of the Empirical Attainment Function. The empirical attainment function (EAF) describes the probabilistic 29 | distribution of the outcomes obtained by a stochastic algorithm in the 30 | objective space. 31 | 32 | **Keywords**: empirical attainment function, summary attainment surfaces, EAF 33 | differences, multi-objective optimization, bi-objective optimization, 34 | performance measures, performance assessment 35 | 36 | 37 | Install 38 | ------- 39 | 40 | You can install the latest released using `pip`: 41 | 42 | ```bash 43 | python3 -m pip install moocore 44 | ``` 45 | 46 | 47 | Or to build the latest development version from github: 48 | 49 | ```bash 50 | python3 -m pip install 'git+https://github.com/multi-objective/moocore.git#egg=moocore&subdirectory=python' 51 | ``` 52 | 53 | You can also install binary development wheels for your operating system. See the list of wheels here (https://github.com/multi-objective/moocore/tree/wheels), click in the wheel you wish to install then copy the **View Raw** link. For example, 54 | 55 | ```bash 56 | python3 -m pip install https://github.com/multi-objective/moocore/raw/refs/heads/wheels/moocore-0.1.5.dev0-py3-none-macosx_10_9_universal2.whl 57 | ``` 58 | 59 | If the URL does not have the word `raw` then you are not using the raw link. 60 | 61 | 62 | R package 63 | --------- 64 | 65 | There is also a `moocore` package for R: https://multi-objective.github.io/moocore/r 66 | 67 | 68 | [py-build-badge]: https://github.com/multi-objective/moocore/actions/workflows/python.yml/badge.svg?event=push 69 | [py-build-link]: https://github.com/multi-objective/moocore/actions/workflows/python.yml 70 | [py-coverage-badge]: https://codecov.io/gh/multi-objective/moocore/branch/main/graph/badge.svg?flag=python 71 | [py-coverage-link]: https://app.codecov.io/gh/multi-objective/moocore/tree/main/python 72 | [py-moocore-github]: https://github.com/multi-objective/moocore/tree/main/python#readme 73 | [py-moocore-homepage]: https://multi-objective.github.io/moocore/python 74 | [py-moocore-pypi]: https://pypi.org/project/moocore/ 75 | -------------------------------------------------------------------------------- /python/benchmarks/bench.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import numpy as np 3 | import pandas as pd 4 | import matplotlib.pyplot as plt 5 | import moocore 6 | import timeit 7 | import cpuinfo 8 | 9 | timeit.template = """ 10 | def inner(_it, _timer{init}): 11 | {setup} 12 | _dt = float('inf') 13 | for _i in _it: 14 | _t0 = _timer() 15 | retval = {stmt} 16 | _t1 = _timer() 17 | _dt = min(_dt, _t1 - _t0) 18 | return _dt, retval 19 | """ 20 | 21 | 22 | def read_data(filename): 23 | if not filename.startswith("http"): 24 | filename = pathlib.Path(filename).expanduser() 25 | x = np.loadtxt(filename) 26 | x = moocore.filter_dominated(x) 27 | return x 28 | 29 | 30 | def read_datasets_and_filter_dominated(filename): 31 | filename = pathlib.Path(filename).expanduser() 32 | x = moocore.read_datasets(filename)[:, :-1] 33 | x = moocore.filter_dominated(x) 34 | return x 35 | 36 | 37 | def get_range(lenx, start, stop, step): 38 | return np.arange(start, min(stop, lenx) + 1, step) 39 | 40 | 41 | def get_package_version(package): 42 | match package: 43 | case "moocore": 44 | from moocore import __version__ as version 45 | case "botorch": 46 | from botorch import __version__ as version 47 | case "pymoo": 48 | from pymoo import __version__ as version 49 | case "jMetalPy": 50 | from jmetal import __version__ as version 51 | case "DEAP_er": 52 | # Requires version >= 0.2.0 53 | from deap_er import __version__ as version 54 | case _: 55 | raise ValueError(f"unknown package {package}") 56 | 57 | return version 58 | 59 | 60 | class Bench: 61 | cpu_model = cpuinfo.get_cpu_info()["brand_raw"] 62 | 63 | def __init__(self, name, n, bench): 64 | self.name = name 65 | self.n = n 66 | self.bench = bench 67 | self.times = {k: [] for k in bench.keys()} 68 | self.versions = { 69 | what: f"{what} ({get_package_version(what)})" 70 | for what in bench.keys() 71 | } 72 | 73 | def keys(self): 74 | return self.bench.keys() 75 | 76 | def __call__(self, what, maxrow, *args): 77 | fun = self.bench[what] 78 | duration, value = timeit.Timer(lambda: fun(*args)).timeit(number=3) 79 | self.times[what] += [duration] 80 | print(f"{self.name}:{maxrow}:{what}:{duration}") 81 | return value 82 | 83 | def plots(self, title, file_prefix): 84 | for what in self.keys(): 85 | self.times[what] = np.asarray(self.times[what]) 86 | 87 | df = ( 88 | pd.DataFrame(dict(n=self.n, **self.times)) 89 | .set_index("n") 90 | .rename(columns=self.versions) 91 | ) 92 | df.plot( 93 | grid=True, 94 | logy=True, 95 | style="o-", 96 | title="", 97 | xticks=df.index, 98 | ylabel="CPU time (seconds)", 99 | ) 100 | plt.title(f"({self.cpu_model})", fontsize=10) 101 | plt.suptitle(f"{title} for {self.name}", fontsize=12) 102 | plt.savefig(f"{file_prefix}_bench-{self.name}-time.png") 103 | 104 | reltimes = {} 105 | for what in self.keys(): 106 | if what == "moocore": 107 | continue 108 | reltimes["Rel_" + what] = self.times[what] / self.times["moocore"] 109 | 110 | df = ( 111 | pd.DataFrame(dict(n=self.n, **reltimes)) 112 | .set_index("n") 113 | .rename(columns=self.versions) 114 | ) 115 | df.plot( 116 | grid=True, 117 | style="o-", 118 | title="", 119 | xticks=df.index, 120 | ylabel="Time relative to moocore", 121 | ) 122 | plt.title(f"({self.cpu_model})", fontsize=10) 123 | plt.suptitle(f"{title} for {self.name}", fontsize=12) 124 | plt.savefig(f"{file_prefix}_bench-{self.name}-reltime.png") 125 | -------------------------------------------------------------------------------- /python/benchmarks/bench_epsilon.py: -------------------------------------------------------------------------------- 1 | """Hypervolume Computation Benchmarks 2 | ======================================= 3 | 4 | This example benchmarks the hypervolume implementation in ``moocore`` against other implementations. 5 | 6 | """ 7 | 8 | from bench import Bench, read_data 9 | 10 | import numpy as np 11 | import moocore 12 | import pathlib 13 | import matplotlib.pyplot as plt 14 | 15 | from jmetal.core.quality_indicator import EpsilonIndicator as jmetal_EPS 16 | 17 | path_to_data = "../../testsuite/data/" 18 | if not pathlib.Path(path_to_data).expanduser().exists(): 19 | path_to_data = ( 20 | "https://github.com/multi-objective/testsuite/raw/refs/heads/main/data/" 21 | ) 22 | 23 | files = { 24 | "rmnk_10D_random_search": ( 25 | path_to_data + "rmnk_0.0_10_16_1_0_random_search_1.txt.xz", 26 | path_to_data + "rmnk_0.0_10_16_1_0_ref.txt.xz", 27 | ), 28 | } 29 | 30 | title = "eps+ computation" 31 | file_prefix = "eps" 32 | 33 | names = files.keys() 34 | for name in names: 35 | x = read_data(files[name][0]) 36 | ref = read_data(files[name][1]) 37 | n = np.arange(200, min(len(x), 1000) + 1, 200) 38 | 39 | bench = Bench( 40 | name=name, 41 | n=n, 42 | bench={ 43 | "moocore": lambda z, ref=ref: moocore.epsilon_additive(z, ref=ref), 44 | "jMetalPy": lambda z, eps=jmetal_EPS(ref): eps.compute(z), 45 | }, 46 | ) 47 | 48 | values = {} 49 | for maxrow in n: 50 | z = x[:maxrow, :] 51 | for what in bench.keys(): 52 | values[what] = bench(what, maxrow, z) 53 | 54 | # Check values 55 | for what in bench.keys(): 56 | if what == "moocore": 57 | continue 58 | a = values["moocore"] 59 | b = values[what] 60 | assert np.isclose(a, b), ( 61 | f"In {name}, maxrow={maxrow}, {what}={b} not equal to moocore={a}" 62 | ) 63 | 64 | del values 65 | bench.plots(file_prefix=file_prefix, title=title) 66 | 67 | plt.show() 68 | -------------------------------------------------------------------------------- /python/benchmarks/bench_hv.py: -------------------------------------------------------------------------------- 1 | """Hypervolume Computation Benchmarks 2 | ======================================= 3 | 4 | This example benchmarks the hypervolume implementation in ``moocore`` against other implementations. 5 | 6 | """ 7 | 8 | from bench import Bench, read_datasets_and_filter_dominated, get_range 9 | 10 | import numpy as np 11 | import moocore 12 | import pathlib 13 | import matplotlib.pyplot as plt 14 | 15 | from botorch.utils.multi_objective.hypervolume import Hypervolume as botorch_HV 16 | import torch 17 | from pymoo.indicators.hv import Hypervolume as pymoo_HV 18 | from jmetal.core.quality_indicator import HyperVolume as jmetal_HV 19 | 20 | # See https://github.com/multi-objective/testsuite/tree/main/data 21 | path_to_data = "../../testsuite/data/" 22 | assert pathlib.Path(path_to_data).expanduser().exists() 23 | 24 | files = { 25 | "DTLZLinearShape.3d": dict( 26 | file=path_to_data + "DTLZLinearShape.3d.front.1000pts.10", 27 | range=(500, 3000, 500), 28 | ), 29 | "DTLZLinearShape.4d": dict( 30 | file=path_to_data + "DTLZLinearShape.4d.front.1000pts.10", 31 | range=(300, 1500, 200), 32 | ), 33 | } 34 | 35 | 36 | title = "HV computation" 37 | file_prefix = "hv" 38 | names = files.keys() 39 | for name in names: 40 | x = read_datasets_and_filter_dominated(files[name]["file"]) 41 | ref = np.ones(x.shape[1]) 42 | n = get_range(len(x), *files[name]["range"]) 43 | 44 | bench = Bench( 45 | name=name, 46 | n=n, 47 | bench={ 48 | "moocore": moocore.Hypervolume(ref=ref), 49 | "pymoo": lambda z, hv=pymoo_HV(ref_point=ref): hv(z), 50 | "jMetalPy": lambda z, hv=jmetal_HV(ref): hv.compute(z), 51 | "botorch": lambda z, 52 | hv=botorch_HV(ref_point=torch.from_numpy(-ref)): hv.compute(z), 53 | }, 54 | ) 55 | 56 | values = {} 57 | for maxrow in n: 58 | z = x[:maxrow, :] 59 | for what in bench.keys(): 60 | if what == "botorch": 61 | zz = torch.from_numpy(-z) 62 | else: 63 | zz = z 64 | values[what] = bench(what, maxrow, zz) 65 | 66 | # Check values 67 | for what in bench.keys(): 68 | if what == "moocore": 69 | continue 70 | a = values["moocore"] 71 | b = values[what] 72 | assert np.isclose(a, b), ( 73 | f"In {name}, maxrow={maxrow}, {what}={b} not equal to moocore={a}" 74 | ) 75 | 76 | del values 77 | bench.plots(file_prefix=file_prefix, title=title) 78 | 79 | plt.show() 80 | -------------------------------------------------------------------------------- /python/benchmarks/bench_igdplus.py: -------------------------------------------------------------------------------- 1 | """Hypervolume Computation Benchmarks 2 | ======================================= 3 | 4 | This example benchmarks the hypervolume implementation in ``moocore`` against other implementations. 5 | 6 | """ 7 | 8 | from bench import Bench, read_data 9 | 10 | import numpy as np 11 | import moocore 12 | import pathlib 13 | import matplotlib.pyplot as plt 14 | 15 | from pymoo.indicators.igd_plus import IGDPlus as pymoo_IGDplus 16 | 17 | path_to_data = "../../testsuite/data/" 18 | if not pathlib.Path(path_to_data).expanduser().exists(): 19 | path_to_data = ( 20 | "https://github.com/multi-objective/testsuite/raw/refs/heads/main/data/" 21 | ) 22 | 23 | files = { 24 | "ran.40000pts.3d": ( 25 | path_to_data + "ran.40000pts.3d.1.xz", 26 | path_to_data + "ran.40001pts.3d.1.xz", 27 | ), 28 | } 29 | 30 | title = "IGD+ computation" 31 | file_prefix = "igd_plus" 32 | names = files.keys() 33 | for name in names: 34 | x = read_data(files[name][0]) 35 | ref = read_data(files[name][1]) 36 | n = np.arange(100, min(len(x), 1300) + 1, 200) 37 | 38 | bench = Bench( 39 | name=name, 40 | n=n, 41 | bench={ 42 | "moocore": lambda z, ref=ref: moocore.igd_plus(z, ref=ref), 43 | "pymoo": lambda z, ind=pymoo_IGDplus(ref): ind(z), 44 | }, 45 | ) 46 | 47 | values = {} 48 | for maxrow in n: 49 | z = x[:maxrow, :] 50 | for what in bench.keys(): 51 | values[what] = bench(what, maxrow, z) 52 | 53 | # Check values 54 | for what in bench.keys(): 55 | if what == "moocore": 56 | continue 57 | a = values["moocore"] 58 | b = values[what] 59 | assert np.isclose(a, b), ( 60 | f"In {name}, maxrow={maxrow}, {what}={b} not equal to moocore={a}" 61 | ) 62 | 63 | del values 64 | bench.plots(file_prefix=file_prefix, title=title) 65 | 66 | plt.show() 67 | -------------------------------------------------------------------------------- /python/benchmarks/bench_ndom.py: -------------------------------------------------------------------------------- 1 | """Dominance Filtering Benchmarks 2 | ============================== 3 | 4 | This example benchmarks the hypervolume implementation in ``moocore`` against other implementations. 5 | 6 | """ 7 | 8 | from bench import Bench, get_range 9 | 10 | import numpy as np 11 | import moocore 12 | import matplotlib.pyplot as plt 13 | 14 | 15 | import torch 16 | from botorch.utils.multi_objective.pareto import ( 17 | is_non_dominated as botorch_is_non_dominated, 18 | ) 19 | from pymoo.util.nds.non_dominated_sorting import ( 20 | find_non_dominated as pymoo_find_non_dominated, 21 | ) 22 | 23 | # See https://github.com/multi-objective/testsuite/tree/main/data 24 | files = { 25 | "test2D-200k": dict(file="test2D-200k.inp.xz", range=(1000, 10000, 1000)), 26 | "ran3d-10k": dict(file="ran.1000pts.3d.10", range=(1000, 5000, 1000)), 27 | } 28 | 29 | 30 | title = "is_non_dominated()" 31 | file_prefix = "ndom" 32 | 33 | names = files.keys() 34 | for name in names: 35 | x = moocore.get_dataset(files[name]["file"])[:, :-1] 36 | n = get_range(len(x), *files[name]["range"]) 37 | 38 | bench = Bench( 39 | name=name, 40 | n=n, 41 | bench={ 42 | "moocore": lambda z: np.nonzero( 43 | moocore.is_nondominated(z, maximise=True) 44 | )[0], 45 | "pymoo": lambda z: pymoo_find_non_dominated(-z), 46 | "botorch": lambda z: np.nonzero( 47 | np.asarray(botorch_is_non_dominated(z)) 48 | )[0], 49 | }, 50 | ) 51 | 52 | values = {} 53 | for maxrow in n: 54 | z = x[:maxrow, :] 55 | for what in bench.keys(): 56 | if what == "botorch": 57 | # Exclude the conversion to torch from the timing. 58 | zz = torch.from_numpy(z) 59 | else: 60 | zz = z 61 | values[what] = bench(what, maxrow, zz) 62 | 63 | # Check values 64 | for what in bench.keys(): 65 | if what == "moocore": 66 | continue 67 | a = values["moocore"] 68 | b = values[what] 69 | assert np.allclose(a, b), ( 70 | f"In {name}, maxrow={maxrow}, {what}={b} not equal to moocore={a}" 71 | ) 72 | 73 | del values 74 | bench.plots(file_prefix=file_prefix, title=title) 75 | 76 | plt.show() 77 | -------------------------------------------------------------------------------- /python/benchmarks/python-requirements.txt: -------------------------------------------------------------------------------- 1 | cpuinfo 2 | numpy 3 | pandas 4 | matplotlib 5 | botorch 6 | pymoo 7 | jmetal 8 | -------------------------------------------------------------------------------- /python/doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= LANG=C sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = _build 10 | 11 | # For merging a documentation archive into a git checkout of numpy/doc 12 | # Turn a tag like v1.18.0 into 1.18 13 | # Use sed -n -e 's/patttern/match/p' to return a blank value if no match 14 | # TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p') 15 | 16 | root_dir:=$(abspath $(shell dirname $(firstword $(MAKEFILE_LIST)))) 17 | 18 | .PHONY: help Makefile 19 | 20 | # Put it first so that "make" without argument is like "make help". 21 | help: 22 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 23 | 24 | show: 25 | @python3 -c "import webbrowser; webbrowser.open_new_tab('file://$(root_dir)/$(BUILDDIR)/html/index.html')" 26 | 27 | html-noplot: 28 | @$(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) "$(SOURCEDIR)" "$(BUILDDIR)/html" 29 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 30 | 31 | clean: 32 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" -WT --keep-going -d $(BUILDDIR)/doctrees $(SPHINXOPTS) $(O) 33 | $(RM) -r "./$(SOURCEDIR)/reference/generated" 34 | 35 | # Catch-all target: route all unknown targets to Sphinx using the new 36 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 37 | %: Makefile 38 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" -WT --keep-going -d $(BUILDDIR)/doctrees $(SPHINXOPTS) $(O) 39 | -------------------------------------------------------------------------------- /python/doc/conftest.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa: D100, D103 2 | import pytest 3 | import moocore 4 | 5 | 6 | @pytest.fixture(autouse=True) 7 | def add_namespace(doctest_namespace): 8 | doctest_namespace["moocore"] = moocore 9 | -------------------------------------------------------------------------------- /python/doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /python/doc/source/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | 2 | html { 3 | --pst-sidebar-secondary: 17rem; 4 | 5 | } 6 | .bd-page-width { 7 | max-width: 100rem; 8 | } 9 | 10 | .intro-card { 11 | padding: 30px 10px 20px 10px; 12 | } 13 | 14 | .intro-card .sd-card-img-top { 15 | margin: 10px; 16 | height: 52px; 17 | background: none !important; 18 | } 19 | 20 | /* https://sphinx-gallery.github.io/stable/advanced.html#using-sphinx-gallery-sidebar-components */ 21 | .sphx-glr-download-link-note, /* Download link note in header */ 22 | .binder-badge, /* Binder launch badge in footer */ 23 | .lite-badge, /* Lite launch badge in footer */ 24 | .sphx-glr-download-jupyter, /* Download Jupyter notebook link in footer */ 25 | .sphx-glr-download-python, /* Download Python script link in footer */ 26 | .sphx-glr-download-zip /* Download zipped link in footer */ 27 | { 28 | display: none; 29 | } 30 | -------------------------------------------------------------------------------- /python/doc/source/_static/eps_bench-rmnk_10D_random_search-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/doc/source/_static/eps_bench-rmnk_10D_random_search-time.png -------------------------------------------------------------------------------- /python/doc/source/_static/hv_bench-DTLZLinearShape.3d-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/doc/source/_static/hv_bench-DTLZLinearShape.3d-time.png -------------------------------------------------------------------------------- /python/doc/source/_static/hv_bench-DTLZLinearShape.4d-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/doc/source/_static/hv_bench-DTLZLinearShape.4d-time.png -------------------------------------------------------------------------------- /python/doc/source/_static/igd_plus_bench-ran.40000pts.3d-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/doc/source/_static/igd_plus_bench-ran.40000pts.3d-time.png -------------------------------------------------------------------------------- /python/doc/source/_static/ndom_bench-ran3d-10k-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/doc/source/_static/ndom_bench-ran3d-10k-time.png -------------------------------------------------------------------------------- /python/doc/source/_static/ndom_bench-test2D-200k-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/doc/source/_static/ndom_bench-test2D-200k-time.png -------------------------------------------------------------------------------- /python/doc/source/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {{ objname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :members: 7 | :inherited-members: 8 | :special-members: __call__ 9 | 10 | .. minigallery:: {{ module }}.{{ objname }} {% for meth in methods %}{{ module }}.{{ objname }}.{{ meth }} {% endfor %} 11 | :add-heading: Gallery examples 12 | :heading-level: - 13 | -------------------------------------------------------------------------------- /python/doc/source/_templates/autosummary/function.rst: -------------------------------------------------------------------------------- 1 | {{ objname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autofunction:: {{ objname }} 6 | 7 | .. _sphx_glr_backref_{{fullname}}: 8 | 9 | .. minigallery:: {{fullname}} 10 | :add-heading: Gallery examples 11 | :heading-level: - 12 | -------------------------------------------------------------------------------- /python/doc/source/contribute/index.rst: -------------------------------------------------------------------------------- 1 | .. _contribute: 2 | 3 | ================================================ 4 | Contribute 5 | ================================================ 6 | 7 | Contributions are welcome !!! 8 | 9 | Ways to Contribute to the Development of moocore 10 | ================================================ 11 | 12 | There are a lot of ways you can contribute: 13 | 14 | 1. Contribute code: 15 | The code is available at https://github.com/multi-objective/moocore/ . 16 | Feel free to fork the project and create a pull request. 17 | 18 | 2. Comments/suggestions on the :ref:`api_reference`: 19 | What functionality is missing? What would be useful APIs of existing functionality? What code/algorithms would be useful to implement? 20 | 21 | 3. Improvements to the documentation: 22 | A goal of ``moocore`` is to have up-to-date, detailed and informative documentation such as :ref:`unary_quality_metrics`. 23 | 24 | 4. Interesting :ref:`auto_examples` 25 | For example, tutorials about multi-objective optimization, interesting applications, etc. 26 | 27 | 5. Adding other languages, such as MATLAB, Java, Julia, Rust, etc. 28 | There is also a `moocore R package `_. 29 | And the C code can be used as a library. 30 | -------------------------------------------------------------------------------- /python/doc/source/my_unsrt_style.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa: D100, D101, D102 2 | from pybtex.style.formatting.unsrt import Style as UnsrtStyle 3 | from pybtex.style.template import sentence, href, join, optional, field 4 | from pybtex.plugin import register_plugin 5 | 6 | 7 | class MyUnsrtStyle(UnsrtStyle): 8 | def format_web_refs(self, e): 9 | # based on urlbst output.web.refs 10 | return sentence[ 11 | optional[ 12 | self.format_url(e), 13 | optional[" (visited on ", field("urldate"), ")"], 14 | ], 15 | optional[self.format_eprint(e)], 16 | optional[self.format_pubmed(e)], 17 | optional[self.format_doi(e)], 18 | optional[self.format_bibtex(e)], 19 | ] 20 | 21 | def format_bibtex(self, e): 22 | url = join[ 23 | "https://iridia-ulb.github.io/references/index_bib.html#", e.key 24 | ] 25 | return href[url, "[BibTeX]"] 26 | 27 | 28 | register_plugin("pybtex.style.formatting", "my_unsrt", MyUnsrtStyle) 29 | -------------------------------------------------------------------------------- /python/doc/source/reference/functions.dominance.rst: -------------------------------------------------------------------------------- 1 | Dominance relation 2 | ================== 3 | 4 | .. currentmodule:: moocore 5 | 6 | Dominance relations 7 | ------------------- 8 | 9 | .. autosummary:: 10 | :toctree: generated/ 11 | 12 | is_nondominated 13 | is_nondominated_within_sets 14 | filter_dominated 15 | filter_dominated_within_sets 16 | pareto_rank 17 | -------------------------------------------------------------------------------- /python/doc/source/reference/functions.eaf.rst: -------------------------------------------------------------------------------- 1 | Empirical Attainment Function (EAF) 2 | =================================== 3 | 4 | .. currentmodule:: moocore 5 | 6 | EAF computation 7 | --------------- 8 | .. autosummary:: 9 | :toctree: generated/ 10 | 11 | eaf 12 | eafdiff 13 | largest_eafdiff 14 | 15 | Vorob'ev 16 | -------- 17 | .. autosummary:: 18 | :toctree: generated/ 19 | 20 | vorob_t 21 | vorob_dev 22 | -------------------------------------------------------------------------------- /python/doc/source/reference/functions.io.rst: -------------------------------------------------------------------------------- 1 | Read/write/transform data 2 | ========================= 3 | 4 | .. currentmodule:: moocore 5 | 6 | Read data 7 | --------- 8 | .. autosummary:: 9 | :toctree: generated/ 10 | 11 | read_datasets 12 | ReadDatasetsError 13 | get_dataset 14 | get_dataset_path 15 | 16 | Transform data 17 | -------------- 18 | .. autosummary:: 19 | :toctree: generated/ 20 | 21 | normalise 22 | apply_within_sets 23 | 24 | 25 | -------------------------------------------------------------------------------- /python/doc/source/reference/functions.rst: -------------------------------------------------------------------------------- 1 | .. _functions: 2 | 3 | ****************** 4 | Functions by topic 5 | ****************** 6 | 7 | In this chapter functions docstrings are presented, grouped by functionality. 8 | Many docstrings contain example code, which demonstrates basic usage 9 | of the function. The examples assume that moocore is imported with:: 10 | 11 | >>> import moocore 12 | 13 | A convenient way to execute examples is the ``%doctest_mode`` mode of 14 | IPython, which allows for pasting of multi-line examples and preserves 15 | indentation. 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | 20 | functions.io 21 | functions.dominance 22 | functions.metrics 23 | functions.eaf 24 | -------------------------------------------------------------------------------- /python/doc/source/reference/index.rst: -------------------------------------------------------------------------------- 1 | .. module:: mooocore 2 | 3 | .. _api_reference: 4 | 5 | ############# 6 | API reference 7 | ############# 8 | 9 | :Release: |version| 10 | :Date: |today| 11 | 12 | This reference manual details functions, modules, and objects 13 | included in moocore, describing what they are and what they do. 14 | For learning how to use moocore, see the :ref:`complete documentation `. 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | 19 | functions 20 | -------------------------------------------------------------------------------- /python/doc/source/whatsnew/index.rst: -------------------------------------------------------------------------------- 1 | .. _whatsnew: 2 | 3 | ********** 4 | What's new 5 | ********** 6 | 7 | Version 0.1.7 (04/06/2025) 8 | -------------------------- 9 | 10 | - :func:`~moocore.hypervolume` now uses the HV3D\ :sup:`+` algorithm for the 3D case and the HV4D\ :sup:`+` algorithm for the 4D case. 11 | For dimensions larger than 4, the recursive algorithm uses HV4D\ :sup:`+` as the base case, which is significantly faster. 12 | - :func:`~moocore.read_datasets` is significantly faster for large files. 13 | - :func:`~moocore.is_nondominated` and :func:`~moocore.filter_dominated` are 14 | faster for 3D inputs. 15 | - New function: :func:`~moocore.hv_contributions`. 16 | - New online datasets: ``test2D-200k.inp.xz`` and ``ran.1000pts.3d.10`` (see 17 | :func:`~moocore.get_dataset`). 18 | 19 | Version 0.1.6 (14/05/2025) 20 | -------------------------- 21 | 22 | - New function: :func:`~moocore.largest_eafdiff`. 23 | - New class: :class:`~moocore.RelativeHypervolume`. 24 | - New dataset ``tpls50x20_1_MWT.csv``. 25 | - Extended example :ref:`sphx_glr_auto_examples_plot_metrics.py`. 26 | - ``vorobT()`` and ``vorobDev()`` were renamed to :func:`~moocore.vorob_t` and 27 | :func:`~moocore.vorob_dev` to follow Python convention. 28 | - :func:`~moocore.get_dataset_path` and :func:`~moocore.get_dataset` can download large datasets from a remote repository. 29 | 30 | Version 0.1.4 (30/10/2024) 31 | -------------------------- 32 | 33 | - Improved example :ref:`sphx_glr_auto_examples_plot_pandas.py` to work in Pandas version >= 2. 34 | - Changed behavior of :func:`~moocore.apply_within_sets`. The previous behavior could lead to subtle bugs. 35 | 36 | 37 | Version 0.1.3 (28/10/2024) 38 | -------------------------- 39 | 40 | - New: :class:`~moocore.Hypervolume`: Object-oriented API for hypervolume indicator. 41 | - New: :func:`~moocore.apply_within_sets`: Utility function to apply operations to individual datasets. 42 | - New: :func:`~moocore.is_nondominated_within_sets`: Utility function to identify nondominated points within sets. 43 | - New example using :class:`pandas.DataFrame` in :ref:`sphx_glr_auto_examples_plot_pandas.py`. 44 | - Fix bug in :func:`~moocore.normalise` when the input is :class:`pandas.DataFrame` or some other non-contiguous array. 45 | 46 | 47 | Version 0.1.2 (18/09/2024) 48 | -------------------------- 49 | 50 | - New: :func:`~moocore.hv_approx` 51 | - Documentation improvements. 52 | - New gallery examples. 53 | -------------------------------------------------------------------------------- /python/examples/README.rst: -------------------------------------------------------------------------------- 1 | .. _auto_examples: 2 | 3 | Examples 4 | ======== 5 | 6 | These are longer and more detailed examples than those accompanying the 7 | documentation of each function. These examples may require additional packages 8 | to run. 9 | -------------------------------------------------------------------------------- /python/examples/plot_hv_approx.py: -------------------------------------------------------------------------------- 1 | r"""Comparing methods for approximating the hypervolume 2 | =================================================== 3 | 4 | This example shows how to approximate the hypervolume metric of the 5 | ``CPFs.txt`` dataset using both HypE, :func:`moocore.whv_hype()`, and DZ2019, 6 | :func:`moocore.hv_approx()` for several values of the number of samples between 7 | :math:`10^1` and :math:`10^5`. 8 | 9 | """ 10 | 11 | import numpy as np 12 | import moocore 13 | 14 | # %% 15 | # 16 | # First calculate the exact hypervolume. 17 | 18 | ref = 2.1 19 | x = moocore.get_dataset("CPFs.txt")[:, :-1] 20 | x = moocore.filter_dominated(x) 21 | x = moocore.normalise(x, to_range=[1, 2]) 22 | true_hv = moocore.hypervolume(x, ref=ref) 23 | 24 | # %% 25 | # 26 | # Next, we approximate the hypervolume using :math:`\{10^1, 10^2, \ldots, 27 | # 10^5\}` random samples to show the higher samples reduce the approximation 28 | # error. Since the approximation is stochastic, we perform 10 repetitions of 29 | # each computation. 30 | 31 | nreps = 10 32 | nsamples_exp = 5 33 | rng1 = np.random.default_rng(42) 34 | rng2 = np.random.default_rng(42) 35 | hype = {} 36 | dz = {} 37 | for i in range(1, nsamples_exp + 1): 38 | hype[i] = [] 39 | dz[i] = [] 40 | for r in range(nreps): 41 | res = moocore.whv_hype(x, ref=ref, ideal=0, nsamples=10**i, seed=rng1) 42 | hype[i].append(res) 43 | res = moocore.hv_approx(x, ref=ref, nsamples=10**i, seed=rng2) 44 | dz[i].append(res) 45 | 46 | print( 47 | f"True HV : {true_hv:.5f}", 48 | f"Mean HYPE : {np.mean(hype[5]):.5f} [{np.min(hype[5]):.5f}, {np.max(hype[5]):.5f}]", 49 | f"Mean DZ2019: {np.mean(dz[5]):.5f} [{np.min(dz[5]):.5f}, {np.max(dz[5]):.5f}]", 50 | sep="\n", 51 | ) 52 | 53 | 54 | # %% 55 | # 56 | # Next, we plot the results. 57 | 58 | import pandas as pd 59 | 60 | hype = pd.DataFrame(hype) 61 | dz = pd.DataFrame(dz) 62 | hype["Method"] = "HypE" 63 | dz["Method"] = "DZ2019" 64 | df = ( 65 | pd.concat([hype, dz]) 66 | .reset_index(names="rep") 67 | .melt(id_vars=["rep", "Method"], var_name="samples") 68 | ) 69 | df["samples"] = 10 ** df["samples"] 70 | df["value"] = np.abs(df["value"] - true_hv) / true_hv 71 | 72 | import matplotlib.pyplot as plt 73 | import seaborn as sns 74 | 75 | ax = sns.lineplot(x="samples", y="value", hue="Method", data=df, marker="o") 76 | ax.set(xscale="log", yscale="log", ylabel="Relative error") 77 | plt.show() 78 | -------------------------------------------------------------------------------- /python/examples/plot_pandas.py: -------------------------------------------------------------------------------- 1 | """========================= 2 | Using moocore with Pandas 3 | ========================= 4 | 5 | This example shows how to use ``moocore`` functions with Pandas (https://pandas.pydata.org/). This example requires pandas version >= 2.0.0 6 | 7 | """ 8 | 9 | import moocore 10 | import pandas as pd 11 | 12 | print(f"pandas version: {pd.__version__}") 13 | 14 | # %% 15 | # First, we create a toy Pandas :class:`~pandas.DataFrame`. 16 | 17 | df = pd.DataFrame( 18 | dict( 19 | obj1=[1, 2, 3, 4, 5], 20 | obj2=[5, 4, 3, 2, 1], 21 | obj3=[100, 200, 200, 300, 100], 22 | algo=2 * ["foo"] + 2 * ["bar"] + ["foo"], 23 | ) 24 | ) 25 | df 26 | 27 | # %% 28 | # Normalize it (only replace the objective columns!). 29 | 30 | obj_cols = ["obj1", "obj2", "obj3"] 31 | df[obj_cols] = moocore.normalise(df[obj_cols], to_range=[1, 2]) 32 | df 33 | 34 | # %% 35 | # Calculate the hypervolume for each ``algo`` using :meth:`~pandas.DataFrame.groupby` and :meth:`~pandas.core.groupby.DataFrameGroupBy.apply`. 36 | 37 | ref = 2.1 38 | hv = ( 39 | df.groupby("algo")[obj_cols] 40 | .apply(moocore.hypervolume, ref=ref) 41 | .reset_index(name="hv") 42 | ) 43 | hv 44 | 45 | # %% 46 | # Or we can just use: 47 | 48 | hv = moocore.apply_within_sets( 49 | df[obj_cols], df["algo"], moocore.hypervolume, ref=ref 50 | ) 51 | hv 52 | 53 | 54 | # %% 55 | # :func:`moocore.apply_within_sets()` processes each group in 56 | # order, even if the elements of the same group are not contiguous. That is, it 57 | # processes the groups like :meth:`pandas.Series.unique` and not like 58 | # :class:`set` or :func:`numpy.unique()`. 59 | 60 | df["algo"].unique() 61 | 62 | # %% 63 | # If we have multiple columns that we want to use to define the sets, such as ``algo`` and ``run``: 64 | 65 | df = pd.DataFrame( 66 | dict( 67 | obj1=[1, 2, 3, 4, 5, 6, 5, 4, 3, 1], 68 | obj2=[6, 5, 4, 3, 2, 1, 5, 4, 5, 6], 69 | obj3=[1, 2, 3, 4, 5, 6, 6, 7, 5, 2], 70 | algo=["a"] * 3 + ["b"] * 3 + ["a", "b"] * 2, 71 | run=[1, 1, 2, 1, 1, 2, 2, 2, 1, 1], 72 | ) 73 | ) 74 | obj_cols = ["obj1", "obj2", "obj3"] 75 | df 76 | 77 | # %% 78 | # We can still use :meth:`~pandas.DataFrame.groupby` but we may need to reset and clean-up the index. 79 | 80 | df.groupby(["algo", "run"])[obj_cols].apply( 81 | moocore.filter_dominated 82 | ).reset_index(level=["algo", "run"]) 83 | 84 | # %% 85 | # Or we can combine the multiple columns as one to define the sets: 86 | # 87 | sets = df["algo"].astype(str) + "-" + df["run"].astype(str) 88 | sets 89 | 90 | # %% 91 | # then identify nondominated rows within each set: 92 | # 93 | is_nondom = moocore.is_nondominated_within_sets(df[obj_cols], sets=sets) 94 | is_nondom 95 | 96 | # %% 97 | # And use the boolean vector above to filter rows: 98 | # 99 | df[is_nondom] 100 | -------------------------------------------------------------------------------- /python/requirements_dev.txt: -------------------------------------------------------------------------------- 1 | setuptools>=70.1,<74 # Sync with .pre-commit-config.yaml 2 | cffi >= 1.15.1 3 | numpy >= 1.23.0 4 | 5 | pre-commit >= 3.3.2 6 | ruff >= 0.11.2 7 | 8 | tox >= 4.6.2 # Sync with tox.ini 9 | pytest >= 7 # Sync with tox.ini 10 | pytest-cov >= 4.1.0 11 | virtualenv >= 20 12 | build 13 | 14 | sphinx >= 6.0 15 | pydata_sphinx_theme>=0.16.0 16 | jupyter 17 | ipykernel 18 | kaleido 19 | sphinx-gallery>=0.19.0 20 | sphinxcontrib-napoleon 21 | sphinxcontrib-bibtex 22 | sphinx-autodoc-typehints 23 | sphinx-copybutton 24 | sphinx-design 25 | jupyterlab 26 | ipywidgets 27 | 28 | # Gallery examples 29 | pandas >=2.0.0 30 | seaborn 31 | -------------------------------------------------------------------------------- /python/setup.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa: D100, D101, D102, D103, N801 2 | from setuptools import setup 3 | from wheel.bdist_wheel import bdist_wheel as _bdist_wheel 4 | 5 | 6 | class bdist_wheel_abi_none(_bdist_wheel): 7 | def finalize_options(self): 8 | _bdist_wheel.finalize_options(self) 9 | self.root_is_pure = False 10 | 11 | def get_tag(self): 12 | python, abi, plat = _bdist_wheel.get_tag(self) 13 | return "py3", "none", plat 14 | 15 | 16 | setup( 17 | cffi_modules=["src/moocore/_ffi_build.py:ffibuilder"], 18 | cmdclass={"bdist_wheel": bdist_wheel_abi_none}, 19 | ) 20 | -------------------------------------------------------------------------------- /python/src/conftest.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa: D100, D101, D102, D103 2 | import pytest 3 | import moocore 4 | 5 | 6 | @pytest.fixture(autouse=True) 7 | def add_doctest_imports(doctest_namespace) -> None: 8 | doctest_namespace["moocore"] = moocore 9 | -------------------------------------------------------------------------------- /python/src/moocore/__init__.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa: D104 2 | from ._moocore import ( 3 | Hypervolume, 4 | ReadDatasetsError, 5 | RelativeHypervolume, 6 | apply_within_sets, 7 | avg_hausdorff_dist, 8 | eaf, 9 | eafdiff, 10 | epsilon_additive, 11 | epsilon_mult, 12 | filter_dominated, 13 | filter_dominated_within_sets, 14 | hv_approx, 15 | hv_contributions, 16 | hypervolume, 17 | igd, 18 | igd_plus, 19 | is_nondominated, 20 | is_nondominated_within_sets, 21 | largest_eafdiff, 22 | normalise, 23 | pareto_rank, 24 | read_datasets, 25 | total_whv_rect, 26 | vorob_dev, 27 | vorob_t, 28 | whv_hype, 29 | whv_rect, 30 | ) 31 | 32 | from ._datasets import ( 33 | get_dataset, 34 | get_dataset_path, 35 | ) 36 | 37 | from importlib.metadata import version as _metadata_version 38 | 39 | __version__ = _metadata_version("moocore") 40 | # Remove symbols imported for internal use 41 | del _metadata_version 42 | 43 | 44 | __all__ = [ 45 | "Hypervolume", 46 | "ReadDatasetsError", 47 | "RelativeHypervolume", 48 | "apply_within_sets", 49 | "avg_hausdorff_dist", 50 | "eaf", 51 | "eafdiff", 52 | "epsilon_additive", 53 | "epsilon_mult", 54 | "filter_dominated", 55 | "filter_dominated_within_sets", 56 | "get_dataset", 57 | "get_dataset_path", 58 | "hv_approx", 59 | "hv_contributions", 60 | "hypervolume", 61 | "igd", 62 | "igd_plus", 63 | "is_nondominated", 64 | "is_nondominated_within_sets", 65 | "largest_eafdiff", 66 | "normalise", 67 | "pareto_rank", 68 | "read_datasets", 69 | "total_whv_rect", 70 | "vorob_dev", 71 | "vorob_t", 72 | "whv_hype", 73 | "whv_rect", 74 | ] 75 | -------------------------------------------------------------------------------- /python/src/moocore/_ffi_build.py: -------------------------------------------------------------------------------- 1 | """C library compilation config. 2 | 3 | This script is part of the compilation of the C library using CFFi. 4 | 5 | Every time a new C function is created, its prototype must be added to the `ffibuilder.cdef` function call 6 | 7 | The header files required must be placed in the first argument of `ffibuilder.set_source`, and any additional `.C` files must be added to the `sources` argument of `ffibuilder.set_source` 8 | 9 | """ 10 | 11 | import os 12 | import platform 13 | from cffi import FFI 14 | 15 | libmoocore_h = "src/moocore/libmoocore.h" 16 | sources_path = "src/moocore/libmoocore/" 17 | headers = """ 18 | #include "io.h" 19 | #include "hv.h" 20 | #include "igd.h" 21 | #include "nondominated.h" 22 | #include "epsilon.h" 23 | #include "eaf.h" 24 | #include "whv.h" 25 | #include "whv_hype.h" 26 | """ 27 | sources = [ 28 | "avl.c", 29 | "eaf.c", 30 | "eaf3d.c", 31 | "eafdiff.c", 32 | "hv.c", 33 | "hv3dplus.c", 34 | "hv4d.c", 35 | "hv_contrib.c", 36 | "io.c", 37 | "libutil.c", # For fatal_error() 38 | "mt19937/mt19937.c", 39 | "pareto.c", 40 | "rng.c", 41 | "whv.c", 42 | "whv_hype.c", 43 | ] 44 | sources = [sources_path + f for f in sources] 45 | 46 | 47 | is_windows = platform.system() == "Windows" 48 | 49 | 50 | def get_config(): 51 | from distutils.core import Distribution 52 | from distutils.sysconfig import get_config_vars 53 | 54 | get_config_vars() # workaround for a bug of distutils, e.g. on OS/X 55 | config = Distribution().get_command_obj("config") 56 | return config 57 | 58 | 59 | def uses_msvc(): 60 | config = get_config() 61 | return config.try_compile('#ifndef _MSC_VER\n#error "not MSVC"\n#endif') 62 | 63 | 64 | # Try to detect cross-compilation. 65 | def _get_target_platform(arch_flags, default): 66 | flags = [f for f in arch_flags.split(" ") if f.strip() != ""] 67 | try: 68 | pos = flags.index("-arch") 69 | 70 | return flags[pos + 1].lower() 71 | except ValueError: 72 | pass 73 | 74 | return default 75 | 76 | 77 | MSVC_CFLAGS = ["/GL", "/O2", "/GS-", "/wd4996"] 78 | MSVC_LDFLAGS = ["/LTCG"] 79 | GCC_CFLAGS = ["-flto", "-O3"] 80 | 81 | extra_compile_args = [] 82 | extra_link_args = [] 83 | if is_windows and uses_msvc(): 84 | extra_compile_args.extend(MSVC_CFLAGS) 85 | extra_link_args.extend(MSVC_LDFLAGS) 86 | else: 87 | extra_compile_args.extend(GCC_CFLAGS) 88 | extra_link_args.extend(GCC_CFLAGS) 89 | target_platform = _get_target_platform( 90 | os.environ.get("ARCHFLAGS", ""), platform.machine() 91 | ) 92 | # Optimized version requires SSE2 extensions. They have been around since 93 | # 2001 so we try to compile it on every recent-ish x86. 94 | sse2 = target_platform in ("i686", "x86", "x86_64", "AMD64") 95 | if sse2: 96 | extra_compile_args.append("-msse2") 97 | 98 | cflags = os.environ.get("CFLAGS", "") 99 | if cflags != "": 100 | extra_compile_args.extend(cflags.split()) 101 | ldflags = os.environ.get("LDFLAGS", "") 102 | if ldflags != "": 103 | extra_link_args.extend(ldflags.split()) 104 | 105 | ffibuilder = FFI() 106 | file_path = os.path.dirname(os.path.realpath(__file__)) 107 | libmoocore_path = os.path.join(file_path, "libmoocore") 108 | 109 | with open(libmoocore_h) as f: 110 | ffibuilder.cdef(f.read()) 111 | 112 | 113 | ffibuilder.set_source( 114 | "moocore._libmoocore", 115 | headers, 116 | sources=sources, 117 | include_dirs=[libmoocore_path], 118 | extra_compile_args=extra_compile_args, 119 | extra_link_args=extra_compile_args, 120 | ) 121 | 122 | if __name__ == "__main__": 123 | ffibuilder.compile(verbose=True) 124 | -------------------------------------------------------------------------------- /python/src/moocore/_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ._libmoocore import ffi 3 | 4 | 5 | def get1_and_is_copied(x, x_): 6 | x_copied = id(x) != id(x_) 7 | return x, x_copied 8 | 9 | 10 | def asarray_maybe_copy(x, dtype=float): 11 | """Convert to numpy array of dtype=float and detect copies.""" 12 | return get1_and_is_copied(np.asarray(x, dtype=dtype), x) 13 | 14 | 15 | def unique_nosort(array, **kwargs): 16 | """Return unique values without sorting them. 17 | 18 | See https://github.com/numpy/numpy/issues/7265 19 | 20 | """ 21 | uniq, index = np.unique(array, return_index=True, **kwargs) 22 | return uniq[index.argsort()] 23 | 24 | 25 | def np2d_to_double_array(x): 26 | nrows = ffi.cast("int", x.shape[0]) 27 | ncols = ffi.cast("int", x.shape[1]) 28 | # FIXME: This may cause an unexpected copy. Make this an assert and force 29 | # the caller to enforce it if needed. 30 | x = np.ascontiguousarray(x) 31 | x = ffi.from_buffer("double []", x) 32 | return x, nrows, ncols 33 | 34 | 35 | def np1d_to_double_array(x): 36 | size = ffi.cast("int", x.shape[0]) 37 | x = np.ascontiguousarray(x) 38 | x = ffi.from_buffer("double []", x) 39 | return x, size 40 | 41 | 42 | def np1d_to_int_array(x): 43 | size = ffi.cast("int", x.shape[0]) 44 | x = np.ascontiguousarray(x, dtype=np.intc()) 45 | x = ffi.from_buffer("int []", x) 46 | return x, size 47 | 48 | 49 | def atleast_1d_of_length_n(x, n): 50 | x = np.atleast_1d(x) 51 | if len(x) == 1: 52 | return np.full((n), x[0]) 53 | if x.shape[0] == n: 54 | return x 55 | raise ValueError( 56 | f"1D array must have length {n} but it has length {x.shape[0]}" 57 | ) 58 | 59 | 60 | def is_integer_value(n): 61 | if isinstance(n, int): 62 | return True 63 | if n is None: 64 | return False 65 | # FIXME: When we bump to Python 3.12, we can use float().is_integer() 66 | try: 67 | return int(n) == n 68 | except ValueError: 69 | return False 70 | except TypeError: 71 | return False 72 | -------------------------------------------------------------------------------- /python/src/moocore/data/wrots_l100w10_dat.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/src/moocore/data/wrots_l100w10_dat.xz -------------------------------------------------------------------------------- /python/src/moocore/data/wrots_l10w100_dat.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/src/moocore/data/wrots_l10w100_dat.xz -------------------------------------------------------------------------------- /python/src/moocore/libmoocore: -------------------------------------------------------------------------------- 1 | ../../../c -------------------------------------------------------------------------------- /python/src/moocore/libmoocore.h: -------------------------------------------------------------------------------- 1 | /* From stdlib.h */ 2 | void free(void *); 3 | int read_datasets(const char * filename, double **data_p, int *ncols_p, int *datasize_p); 4 | double fpli_hv(const double *data, int d, int n, const double *ref); 5 | void hv_contributions (double *hvc, double *points, int dim, int size, const double * ref); 6 | double IGD (const double *data, int nobj, int npoints, const double *ref, int ref_size, const bool * maximise); 7 | double IGD_plus (const double *data, int nobj, int npoints, const double *ref, int ref_size, const bool * maximise); 8 | double avg_Hausdorff_dist (const double *data, int nobj, int npoints, const double *ref, int ref_size, const bool * maximise, unsigned int p); 9 | double epsilon_additive (const double *data, int nobj, int npoints, const double *ref, int ref_size, const bool * maximise); 10 | double epsilon_mult (const double *data, int nobj, int npoints, const double *ref, int ref_size, const bool * maximise); 11 | bool * is_nondominated (const double * data, int nobj, int npoint, const bool * maximise, bool keep_weakly); 12 | int * pareto_rank (const double *points, int dim, int size); 13 | void agree_normalise (double *data, int nobj, int npoint, const bool * maximise, 14 | const double lower_range, const double upper_range, const double *lbound, const double *ubound); 15 | double * eaf_compute_matrix (int *eaf_npoints, double * data, int nobj, const int *cumsizes, 16 | int nruns, const double * percentile, int nlevels); 17 | double * eafdiff_compute_rectangles(int *eaf_npoints, double * data, int nobj, 18 | const int *cumsizes, int nruns, int intervals); 19 | double * 20 | eafdiff_compute_matrix(int *eaf_npoints, double * data, int nobj, 21 | const int *cumsizes, int nruns, int intervals); 22 | 23 | /* whv_hype.h */ 24 | double whv_hype_unif(const double *points, int npoints, 25 | const double *ideal, const double *ref, 26 | int nsamples, uint32_t seed); 27 | double whv_hype_expo(const double *points, int npoints, 28 | const double *ideal, const double *ref, 29 | int nsamples, uint32_t seed, double mu); 30 | double whv_hype_gaus(const double *points, int npoints, 31 | const double *ideal, const double *ref, 32 | int nsamples, uint32_t seed, const double *mu); 33 | /* whv.h */ 34 | double rect_weighted_hv2d(double *data, int n, double * rectangles, int rectangles_nrow, const double * reference); 35 | 36 | /* 37 | typedef ... hype_sample_dist; 38 | hype_sample_dist * hype_dist_unif_new(unsigned long seed); 39 | hype_sample_dist * hype_dist_exp_new(double mu, unsigned long seed); 40 | hype_sample_dist * hype_dist_gaussian_new(const double *mu, unsigned long int seed); 41 | void hype_dist_free(hype_sample_dist * d); 42 | double whv_hype_estimate(const double *points, size_t n, const double *ideal, const double *ref, hype_sample_dist * dist, size_t nsamples); 43 | */ 44 | -------------------------------------------------------------------------------- /python/tests/conftest.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa: D100, D101, D102, D103 2 | import pytest 3 | import moocore 4 | 5 | 6 | @pytest.fixture(autouse=True, scope="module") 7 | def test_datapath(request): 8 | """Return the directory of the currently running test script.""" 9 | 10 | def _(file_path: str): 11 | filename = request.path.parent / "test_data" / file_path 12 | if filename.is_file(): 13 | return filename 14 | return moocore.get_dataset_path(file_path) 15 | 16 | return _ 17 | -------------------------------------------------------------------------------- /python/tests/test_data/empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/empty -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/R_generate_expected_output.R: -------------------------------------------------------------------------------- 1 | # Run from this folder 2 | # Rscript R_generate_expected_output.R 3 | library(eaf) 4 | 5 | dat1 <- read_datasets("../input1.dat") 6 | dat_sphere <- read_datasets("../spherical-250-10-3d.txt") 7 | dat_uniform <- read_datasets("../uniform-250-10-3d.txt") 8 | wrots_l10 <- read_datasets("../wrots_l10w100_dat.xz") 9 | wrots_l100 <- read_datasets("../wrots_l100w10_dat.xz") 10 | ALG_1_dat <- read_datasets("../ALG_1_dat.xz") 11 | 12 | # These datasets are already in the form "Data + set number" 13 | diff_points_100_1 <- read.table("../100_diff_points_1.txt") 14 | diff_points_100_2 <- read.table("../100_diff_points_2.txt") 15 | 16 | # get_eaf test fetch results 17 | eaf_dat1 <- eafs(dat1[,1:2], dat1[,3]) 18 | eaf_dat_sphere <- eafs(dat_sphere[,1:3], dat_sphere[,4]) 19 | eaf_dat_uniform <- eafs(dat_uniform[,1:3], dat_uniform[,4]) 20 | eaf_wrots_l10 <- eafs(wrots_l10[,1:2], wrots_l10[,3]) 21 | eaf_wrots_l100 <- eafs(wrots_l100[,1:2], wrots_l100[,3]) 22 | eaf_alg_1_dat <- eafs(ALG_1_dat[,1:2], ALG_1_dat[,3]) 23 | 24 | # get_eaf test with percentiles fetch results 25 | eaf_dat1_pct <- eafs(dat1[,1:2], dat1[,3], percentiles=c(0,50,100)) 26 | eaf_dat_sphere_pct <- eafs(dat_sphere[,1:3], dat_sphere[,4],percentiles=c(0,50,100)) 27 | eaf_dat_uniform_pct <- eafs(dat_uniform[,1:3], dat_uniform[,4], percentiles=c(0,50,100)) 28 | eaf_wrots_l10_pct <- eafs(wrots_l10[,1:2], wrots_l10[,3],percentiles=c(0,50,100)) 29 | eaf_wrots_l100_pct <- eafs(wrots_l100[,1:2], wrots_l100[,3], percentiles=c(0,50,100)) 30 | eaf_alg_1_dat_pct <- eafs(eaf_alg_1_dat[,1:2], eaf_alg_1_dat[,3], percentiles=c(0,50,100)) 31 | 32 | # get_diff_eaf tests 33 | eaf_diff_point12 <- eafdiff(diff_points_100_1, diff_points_100_2) 34 | 35 | # get_diff_eaf tests with intervals 36 | eaf_diff_point12_int3 <- eafdiff(diff_points_100_1, diff_points_100_2, intervals=3) 37 | 38 | # Write results 39 | write.table(dat1, "read_datasets/dat1_read_datasets.txt", row.names = FALSE, col.names = FALSE) 40 | write.table(dat_sphere, "read_datasets/spherical_read_datasets.txt", row.names = FALSE, col.names = FALSE) 41 | write.table(dat_uniform, "read_datasets/uniform_read_datasets.txt", row.names = FALSE, col.names = FALSE) 42 | write.table(wrots_l10, "read_datasets/wrots_l10_read_datasets.txt", row.names = FALSE, col.names = FALSE) 43 | write.table(wrots_l100, "read_datasets/wrots_l100_read_datasets.txt", row.names = FALSE, col.names = FALSE) 44 | write.table(ALG_1_dat, "read_datasets/ALG_1_dat_read_datasets.txt", row.names = FALSE, col.names = FALSE) 45 | # corresponds to get_eaf function 46 | write.table(eaf_dat1, "get_eaf/dat1_get_eaf.txt", row.names = FALSE, col.names = FALSE) 47 | write.table(eaf_dat_sphere, "get_eaf/spherical_get_eaf.txt", row.names = FALSE, col.names = FALSE) 48 | write.table(eaf_dat_uniform, "get_eaf/uniform_get_eaf.txt", row.names = FALSE, col.names = FALSE) 49 | write.table(eaf_wrots_l10, "get_eaf/wrots_l10_get_eaf.txt", row.names = FALSE, col.names = FALSE) 50 | write.table(eaf_wrots_l100, "get_eaf/wrots_l100_get_eaf.txt", row.names = FALSE, col.names = FALSE) 51 | write.table(eaf_alg_1_dat, "get_eaf/ALG_1_dat_get_eaf.txt", row.names = FALSE, col.names = FALSE) 52 | # EAF with percentile values (0,50,100) 53 | write.table(eaf_dat1_pct, "get_eaf/pct_dat1_get_eaf.txt", row.names = FALSE, col.names = FALSE) 54 | write.table(eaf_dat_sphere_pct, "get_eaf/pct_spherical_get_eaf.txt", row.names = FALSE, col.names = FALSE) 55 | write.table(eaf_dat_uniform_pct, "get_eaf/pct_uniform_get_eaf.txt", row.names = FALSE, col.names = FALSE) 56 | write.table(eaf_wrots_l10_pct, "get_eaf/pct_wrots_l10_get_eaf.txt", row.names = FALSE, col.names = FALSE) 57 | write.table(eaf_wrots_l100_pct, "get_eaf/pct_wrots_l100_get_eaf.txt", row.names = FALSE, col.names = FALSE) 58 | write.table(eaf_alg_1_dat_pct, "get_eaf/pct_ALG_1_dat_get_eaf.txt", row.names = FALSE, col.names = FALSE) 59 | 60 | # get_diff_eaf tests 61 | write.table(eaf_diff_point12, "get_diff_eaf/points12_get_diff_eaf.txt", row.names = FALSE, col.names = FALSE) 62 | 63 | # get_diff_eaf tests with intervals 64 | write.table(eaf_diff_point12_int3, "get_diff_eaf/int3_points12_get_diff_eaf.txt", row.names = FALSE, col.names = FALSE) 65 | -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/ALG_1_dat_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/ALG_1_dat_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/pct_ALG_1_dat_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/pct_ALG_1_dat_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/pct_dat1_eaf.txt: -------------------------------------------------------------------------------- 1 | 0.174705559719517 8.89066343098892 0 2 | 0.208164313192983 4.6227546908596 0 3 | 0.229973669857712 1.11772205048885 0 4 | 0.587994749876203 0.738911812540355 0 5 | 1.54506255017944 0.383031223326265 0 6 | 8.57911868245088 0.351697523915606 0 7 | 0.531730871946022 9.73244829021451 50 8 | 0.622302713145572 9.02211752170048 50 9 | 0.792935737907638 8.89066343098892 50 10 | 0.901706796640461 8.32259412265217 50 11 | 0.974686764000143 7.65893643549851 50 12 | 1.06855706598576 7.49376946248667 50 13 | 1.54506255017944 6.71024290266456 50 14 | 1.59648879949154 5.98825094023213 50 15 | 2.16315952106685 4.73944349565763 50 16 | 2.85891341397281 4.49240941089634 50 17 | 3.3403539670376 2.8937744445502 50 18 | 4.61023931520997 2.87955366508796 50 19 | 4.96525837197324 2.29231997983118 50 20 | 7.04694466937728 1.83484357690552 50 21 | 9.73980549839088 1.00153569238064 50 22 | 1.13096306363143 9.72645436275669 100 23 | 2.71891213524363 8.84691922988203 100 24 | 3.3403539670376 7.49376946248667 100 25 | 4.43498452265863 6.94327480946511 100 26 | 4.96525837197324 6.20957074206312 100 27 | 7.92511294874715 3.92669597935785 100 28 | -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/pct_spherical_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/pct_spherical_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/pct_uniform_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/pct_uniform_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/pct_wrots_l100_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/pct_wrots_l100_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/pct_wrots_l10_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/pct_wrots_l10_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/spherical_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/spherical_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/uniform_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/uniform_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/wrots_l100_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/wrots_l100_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/eaf/wrots_l10_eaf.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/eaf/wrots_l10_eaf.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/read_datasets/ALG_1_dat_read_datasets.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/read_datasets/ALG_1_dat_read_datasets.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/read_datasets/spherical_read_datasets.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/read_datasets/spherical_read_datasets.txt.xz -------------------------------------------------------------------------------- /python/tests/test_data/expected_output/read_datasets/uniform_read_datasets.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/python/tests/test_data/expected_output/read_datasets/uniform_read_datasets.txt.xz -------------------------------------------------------------------------------- /python/tests/test_pandas.py: -------------------------------------------------------------------------------- 1 | # ruff: noqa: D100, D101, D102, D103 2 | import pytest 3 | import moocore 4 | from numpy.testing import assert_array_equal, assert_allclose 5 | 6 | pd = pytest.importorskip("pandas") 7 | 8 | 9 | def test_normalise_pandas(): 10 | df = pd.DataFrame( 11 | dict( 12 | Bus=[1, 2, 3, 4, 5], 13 | TA=[8, 3, 5, 7, 4], 14 | Time=[22292, 47759, 49860, 88740, 92086], 15 | Method=2 * ["No Delay"] + 3 * ["Implicit"], 16 | ) 17 | ) 18 | obj_cols = ["Bus", "TA", "Time"] 19 | df[obj_cols] = moocore.normalise(df[obj_cols]) 20 | 21 | df_true = pd.DataFrame( 22 | dict( 23 | Bus=[0.0, 0.25, 0.5, 0.75, 1.0], 24 | TA=[1.0, 0.0, 0.4, 0.8, 0.2], 25 | Time=[0.0, 0.3648881, 0.39499097, 0.95205892, 1.0], 26 | Method=2 * ["No Delay"] + 3 * ["Implicit"], 27 | ) 28 | ) 29 | 30 | pd.testing.assert_frame_equal(df, df_true) 31 | 32 | 33 | def test_example_pandas(): 34 | """Corresponds to ``examples/plot_pandas.py``.""" 35 | df = pd.DataFrame( 36 | dict( 37 | obj1=[1, 2, 3, 4, 5], 38 | obj2=[5, 4, 3, 2, 1], 39 | obj3=[100, 200, 200, 300, 100], 40 | algo=2 * ["foo"] + 2 * ["bar"] + ["foo"], 41 | ) 42 | ) 43 | obj_cols = ["obj1", "obj2", "obj3"] 44 | 45 | df[obj_cols] = moocore.normalise(df[obj_cols], to_range=[1, 2]) 46 | 47 | ref = 2.1 48 | hv = ( 49 | df.groupby("algo")[obj_cols] 50 | .apply(moocore.hypervolume, ref=ref) 51 | .reset_index(name="hv") 52 | ) 53 | pd.testing.assert_frame_equal( 54 | hv, pd.DataFrame(dict(algo=["bar", "foo"], hv=[0.22475, 0.34350])) 55 | ) 56 | 57 | hv = moocore.apply_within_sets( 58 | df[obj_cols], df["algo"], moocore.hypervolume, ref=ref 59 | ) 60 | assert_allclose(hv, [0.3435, 0.22475]) 61 | 62 | df = pd.DataFrame( 63 | dict( 64 | algo=["a"] * 3 + ["b"] * 3 + ["a", "b"] * 2, 65 | run=[1, 1, 2, 1, 1, 2, 2, 2, 1, 1], 66 | obj1=[1, 2, 3, 4, 5, 6, 5, 4, 3, 1], 67 | obj2=[6, 5, 4, 3, 2, 1, 5, 4, 5, 6], 68 | obj3=[1, 2, 3, 4, 5, 6, 6, 7, 5, 2], 69 | ) 70 | ) 71 | pd.testing.assert_frame_equal( 72 | df.groupby(["algo", "run"])[obj_cols] 73 | .apply(moocore.filter_dominated) 74 | .reset_index(level=["algo", "run"]), 75 | df.iloc[[0, 1, 2, 3, 4, 9, 5, 7], :], 76 | ) 77 | 78 | sets = df["algo"].astype(str) + "-" + df["run"].astype(str) 79 | is_nondom = moocore.is_nondominated_within_sets(df[obj_cols], sets=sets) 80 | assert_array_equal( 81 | is_nondom, 82 | [True, True, True, True, True, True, False, True, False, True], 83 | ) 84 | -------------------------------------------------------------------------------- /python/tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | requires = 3 | tox>=4.2 4 | env_list = 5 | numpy-v2 6 | py{312, 311, 310} 7 | 8 | [testenv] 9 | description = Run unit tests 10 | package = wheel 11 | wheel_build_env = .pkg 12 | deps = 13 | numpy<2 14 | pandas>=2 15 | pytest>=7 16 | cov: coverage[toml] 17 | cov: gcovr 18 | commands = 19 | pytest --doctest-modules --doctest-continue-on-failure --import-mode=importlib {envsitepackagesdir}/moocore tests 20 | 21 | [testenv:numpy-v2] 22 | description = Run unit tests with Numpy v2 23 | base_python = py312 24 | package = wheel 25 | wheel_build_env = .pkg 26 | deps = 27 | numpy>=2 28 | pandas>=2 29 | pytest>=7 30 | cov: coverage[toml] 31 | cov: gcovr 32 | commands = 33 | pytest --doctest-modules --doctest-continue-on-failure --import-mode=importlib {envsitepackagesdir}/moocore tests 34 | 35 | [testenv:report] 36 | skip_install = true 37 | deps = 38 | coverage[toml] 39 | commands = 40 | coverage report -m 41 | 42 | [testenv:cov] 43 | description = Run coverage 44 | package = wheel 45 | wheel_build_env = .pkg-cov 46 | commands = 47 | coverage run --source={envsitepackagesdir}/moocore,tests -m pytest --doctest-modules --import-mode=importlib {envsitepackagesdir}/moocore tests 48 | coverage report -m 49 | coverage xml 50 | gcovr --print-summary --delete -r {toxinidir} {toxinidir} --xml=c_coverage.xml --exclude '.*/moocore\._libmoocore.c' --fail-under-line 1 51 | 52 | [testenv:docs] 53 | description = Build documentation 54 | deps = 55 | -r{toxinidir}/requirements_dev.txt 56 | extras = 57 | doc 58 | commands = 59 | sphinx-build -M html ./doc/source ./doc/_build/ -WT --keep-going -d ./doc/_build/doctrees 60 | 61 | [testenv:type] 62 | deps = 63 | mypy 64 | commands = 65 | mypy sr 66 | 67 | [pkgenv] 68 | setenv = 69 | .pkg-cov: CFLAGS={env:CFLAGS:--coverage -UNDEBUG -DDEBUG=1} 70 | .pkg-cov: LDFLAGS=--coverage 71 | 72 | [gh-actions] 73 | python = 74 | 3.10: py310 75 | 3.11: py311 76 | 3.12: py312 77 | -------------------------------------------------------------------------------- /r/.Rbuildignore: -------------------------------------------------------------------------------- 1 | .+\.diff$ 2 | .+\.o$ 3 | .+\.so$ 4 | TAGS 5 | \.RData 6 | \.Rhistory 7 | \.auctex-auto 8 | \.git 9 | ^.Rbuildignore.backup$ 10 | ^.Rbuildignore.cran$ 11 | ^CRAN-RELEASE$ 12 | ^CRAN-SUBMISSION$ 13 | ^DEVEL-README$ 14 | ^Makefile 15 | ^TODO 16 | ^\.github 17 | ^\.lintr 18 | ^\.pre-commit-config\.yaml$ 19 | ^\.travis 20 | ^_pkgdown.yml 21 | ^codecov.yml 22 | ^config.status 23 | ^do.R$ 24 | ^docs$ 25 | ^eaf-Ex.R 26 | ^git_version 27 | ^inst/.+\.svn 28 | ^inst/.+~ 29 | ^man-roxygen 30 | ^pkgdown 31 | ^src/libmoocore/Hypervolume_MEX.c 32 | ^svn_version 33 | ^testing 34 | ^v2 35 | ^vignettes/articles$ 36 | ^web 37 | ^windows$ 38 | cran-comments.md 39 | -------------------------------------------------------------------------------- /r/.Rbuildignore.cran: -------------------------------------------------------------------------------- 1 | tests/testthat/_snaps/ 2 | -------------------------------------------------------------------------------- /r/.gitignore: -------------------------------------------------------------------------------- 1 | *.a 2 | *.o 3 | .Rbuildignore.backup 4 | .RData 5 | .Rhistory 6 | .Rproj.user 7 | .auctex-auto 8 | .httr-oauth 9 | R/TAGS 10 | autom4te.cache 11 | config.status 12 | docs 13 | git_version 14 | inst/doc 15 | revdep/checks 16 | revdep/library 17 | src/*.o 18 | src/*.so 19 | src/dominatedsets 20 | src/eaf 21 | src/epsilon 22 | src/hv 23 | src/igd 24 | src/ndsort 25 | src/nondominated 26 | -------------------------------------------------------------------------------- /r/DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: moocore 2 | Type: Package 3 | Title: Core Mathematical Functions for Multi-Objective Optimization 4 | Version: 0.1.7.900 5 | Authors@R: c(person("Manuel", "López-Ibáñez", role = c("aut", "cre"), 6 | email = "manuel.lopez-ibanez@manchester.ac.uk", 7 | comment = c(ORCID = "0000-0001-9974-1295")), 8 | person("Carlos", "Fonseca", role = "ctb"), 9 | person("Luís", "Paquete", role = "ctb"), 10 | person(c("Andreia", "P."), "Guerreiro", role = "ctb"), 11 | person("Mickaël", "Binois", role = "ctb"), 12 | person(c("Michael", "H."), "Buselli", role = "cph", comment = "AVL-tree library"), 13 | person("Wessel", "Dankers", role = "cph", comment = "AVL-tree library"), 14 | person("NumPy Developers", role = "cph", comment = "RNG and ziggurat constants"), 15 | person("Jean-Sebastien", "Roy", role = "cph", comment = "mt19937 library"), 16 | person("Makoto", "Matsumoto", role = "cph", comment = "mt19937 library"), 17 | person("Takuji", "Nishimura", role = "cph", comment = "mt19937 library")) 18 | Description: Fast implementation of mathematical operations and performance metrics for multi-objective optimization, including filtering and ranking of dominated vectors according to Pareto optimality, computation of the empirical attainment function, V.G. da Fonseca, C.M. Fonseca, A.O. Hall (2001) , hypervolume metric, C.M. Fonseca, L. Paquete, M. López-Ibáñez (2006) , epsilon indicator, inverted generational distance, and Vorob'ev threshold, expectation and deviation, M. Binois, D. Ginsbourger, O. Roustant (2015) , among others. 19 | Depends: R (>= 4.0) 20 | Imports: 21 | matrixStats, 22 | Rdpack 23 | Suggests: 24 | doctest (>= 0.2.0), 25 | knitr, 26 | spelling, 27 | testthat (>= 3.0.0), 28 | withr 29 | License: LGPL (>= 2) 30 | Copyright: file COPYRIGHTS 31 | BugReports: https://github.com/multi-objective/moocore/issues 32 | URL: https://multi-objective.github.io/moocore/r/, https://github.com/multi-objective/moocore/tree/main/r 33 | LazyLoad: true 34 | LazyData: true 35 | Encoding: UTF-8 36 | UseLTO: true 37 | RoxygenNote: 7.3.2 38 | Roxygen: list(markdown = TRUE, 39 | roclets = c("collate", "rd", "namespace", 40 | "doctest::dt_roclet")) 41 | SystemRequirements: GNU make 42 | RdMacros: Rdpack 43 | Config/testthat/edition: 3 44 | Language: en-GB 45 | Config/Needs/website: rmarkdown, tidyr, ggplot2 46 | -------------------------------------------------------------------------------- /r/NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | export(as_double_matrix) 4 | export(attsurf2df) 5 | export(avg_hausdorff_dist) 6 | export(choose_eafdiff) 7 | export(compute_eaf_call) 8 | export(compute_eafdiff_call) 9 | export(eaf) 10 | export(eaf_as_list) 11 | export(eafdiff) 12 | export(epsilon_additive) 13 | export(epsilon_mult) 14 | export(filter_dominated) 15 | export(hv_contributions) 16 | export(hypervolume) 17 | export(igd) 18 | export(igd_plus) 19 | export(is_nondominated) 20 | export(largest_eafdiff) 21 | export(normalise) 22 | export(pareto_rank) 23 | export(rbind_datasets) 24 | export(read_datasets) 25 | export(total_whv_rect) 26 | export(transform_maximise) 27 | export(vorob_dev) 28 | export(vorob_t) 29 | export(whv_hype) 30 | export(whv_rect) 31 | export(write_datasets) 32 | importFrom(Rdpack,reprompt) 33 | importFrom(matrixStats,colRanges) 34 | importFrom(utils,modifyList) 35 | importFrom(utils,tail) 36 | importFrom(utils,write.table) 37 | useDynLib(moocore, .registration = TRUE) 38 | -------------------------------------------------------------------------------- /r/NEWS.md: -------------------------------------------------------------------------------- 1 | # moocore 0.1.7 2 | 3 | * `hypervolume()` now uses the HV3D+ algorithm for the 3D case and the HV4D+ algorithm for the 4D case. 4 | For dimensions larger than 4, the recursive algorithm uses HV4D+ as the base case, which is significantly faster. 5 | 6 | * `read_datasets()` is significantly faster for large files. 7 | 8 | * `is_nondominated()` and `filter_dominated()` are faster for 3D inputs. 9 | 10 | # moocore 0.1.6 11 | 12 | * Fix parallel build in CRAN. 13 | 14 | # moocore 0.1.5 15 | 16 | * Rename `vorobT()` and `vorobDev()` to `vorob_t()` and `vorob_dev()` to be 17 | consistent with other function names. 18 | 19 | # moocore 0.1.2 20 | 21 | * Fix more warnings and problems that only show in CRAN. 22 | 23 | # moocore 0.1.1 24 | 25 | * Fix problems that only show in CRAN. 26 | 27 | # moocore 0.1.0 28 | 29 | * Initial version uploaded to CRAN. 30 | -------------------------------------------------------------------------------- /r/R/moocore-package.R: -------------------------------------------------------------------------------- 1 | #' @keywords internal 2 | "_PACKAGE" 3 | 4 | ## usethis namespace: start 5 | #' @importFrom matrixStats colRanges 6 | #' @importFrom Rdpack reprompt 7 | #' @importFrom utils modifyList write.table tail 8 | #' @useDynLib moocore, .registration = TRUE 9 | ## usethis namespace: end 10 | NULL 11 | 12 | #' Results of Hybrid GA on Vanzyl and Richmond water networks 13 | #' 14 | #'@format 15 | #' A list with two data frames, each of them with three columns, as 16 | #' produced by [read_datasets()]. 17 | #' \describe{ 18 | #' \item{`$vanzyl`}{data frame of results on Vanzyl network} 19 | #' \item{`$richmond`}{data frame of results on Richmond 20 | #' network. The second column is filled with `NA`} 21 | #' } 22 | #' 23 | #'@source \insertRef{LopezIbanezPhD}{moocore}. 24 | #' 25 | #' @examples 26 | #'data(HybridGA) 27 | #'print(HybridGA$vanzyl) 28 | #'print(HybridGA$richmond) 29 | #' @keywords datasets 30 | "HybridGA" 31 | 32 | #' Results of SPEA2 when minimising electrical cost and maximising the 33 | #' minimum idle time of pumps on Richmond water network. 34 | #' 35 | #' @format 36 | #' A data frame as produced by [read_datasets()]. The second 37 | #' column measures time in seconds and corresponds to a maximisation problem. 38 | #' 39 | #' @source \insertRef{LopezIbanezPhD}{moocore} 40 | #' 41 | #' @examples 42 | #' data(SPEA2minstoptimeRichmond) 43 | #' str(SPEA2minstoptimeRichmond) 44 | #' @keywords datasets 45 | "SPEA2minstoptimeRichmond" 46 | 47 | #' Results of SPEA2 with relative time-controlled triggers on Richmond water 48 | #' network. 49 | #' 50 | #' @format 51 | #' A data frame as produced by [read_datasets()]. 52 | #' 53 | #' @source \insertRef{LopezIbanezPhD}{moocore} 54 | #' 55 | #'@examples 56 | #' data(SPEA2relativeRichmond) 57 | #' str(SPEA2relativeRichmond) 58 | #' @keywords datasets 59 | "SPEA2relativeRichmond" 60 | 61 | #' Results of SPEA2 with relative time-controlled triggers on Vanzyl's 62 | #' water network. 63 | #' 64 | #' @inherit SPEA2relativeRichmond format source 65 | #' 66 | #'@examples 67 | #' data(SPEA2relativeVanzyl) 68 | #' str(SPEA2relativeVanzyl) 69 | #' @keywords datasets 70 | "SPEA2relativeVanzyl" 71 | 72 | 73 | #' Conditional Pareto fronts obtained from Gaussian processes simulations. 74 | #' 75 | #' The data has the only goal of providing an example of use of [vorob_t()] and 76 | #' [vorob_dev()]. It has been obtained by fitting two Gaussian processes on 20 77 | #' observations of a bi-objective problem, before generating conditional 78 | #' simulation of both GPs at different locations and extracting non-dominated 79 | #' values of coupled simulations. 80 | #' 81 | #' @format 82 | #' A data frame with 2967 observations on the following 3 variables. 83 | #' \describe{ 84 | #' \item{`f1`}{first objective values.} 85 | #' \item{`f2`}{second objective values.} 86 | #' \item{`set`}{indices of corresponding conditional Pareto fronts.} 87 | #' } 88 | #' 89 | #'@source 90 | #' \insertRef{BinGinRou2015gaupar}{moocore} 91 | #' 92 | #'@examples 93 | #' data(CPFs) 94 | #' vorob_t(CPFs, reference = c(2, 200)) 95 | #'@keywords datasets 96 | "CPFs" 97 | 98 | #' Various strategies of Two-Phase Local Search applied to the Permutation 99 | #' Flowshop Problem with Makespan and Weighted Tardiness objectives. 100 | #' 101 | #' @format 102 | #' A data frame with 1511 observations of 4 variables: 103 | #' \describe{ 104 | #' \item{`algorithm`}{TPLS search strategy} 105 | #' \item{`Makespan`}{first objective values.} 106 | #' \item{`WeightedTardiness`}{second objective values.} 107 | #' \item{`run`}{index of the run.} 108 | #' } 109 | #' 110 | #'@source 111 | #' 112 | #' \insertRef{DubLopStu2011amai}{moocore} 113 | #' 114 | #' @examples 115 | #' data(tpls50x20_1_MWT) 116 | #' str(tpls50x20_1_MWT) 117 | #' @keywords datasets 118 | "tpls50x20_1_MWT" 119 | -------------------------------------------------------------------------------- /r/R/normalise.R: -------------------------------------------------------------------------------- 1 | #' Normalise points 2 | #' 3 | #' Normalise points per coordinate to a range, e.g., `c(1,2)`, where the 4 | #' minimum value will correspond to 1 and the maximum to 2. If bounds are 5 | #' given, they are used for the normalisation. 6 | #' 7 | #' @inheritParams is_nondominated 8 | #' 9 | #' @param to_range `numerical(2)`\cr Normalise values to this range. If the objective is 10 | #' maximised, it is normalised to `c(to_range[1], to_range[0])` 11 | #' instead. 12 | #' 13 | #' @param lower,upper `numerical()`\cr Bounds on the values. If `NA`, the maximum and minimum 14 | #' values of each coordinate are used. 15 | #' 16 | #' @return `matrix()`\cr A numerical matrix 17 | #' 18 | #' @author Manuel \enc{López-Ibáñez}{Lopez-Ibanez} 19 | #' 20 | #' @examples 21 | #' 22 | #' data(SPEA2minstoptimeRichmond) 23 | #' # The second objective must be maximized 24 | #' head(SPEA2minstoptimeRichmond[, 1:2]) 25 | #' 26 | #' head(normalise(SPEA2minstoptimeRichmond[, 1:2], maximise = c(FALSE, TRUE))) 27 | #' 28 | #' head(normalise(SPEA2minstoptimeRichmond[, 1:2], to_range = c(0,1), maximise = c(FALSE, TRUE))) 29 | #' 30 | #' @export 31 | normalise <- function(x, to_range = c(1, 2), lower = NA, upper = NA, maximise = FALSE) 32 | { 33 | x <- as_double_matrix(x) 34 | nobjs <- ncol(x) 35 | lower <- rep_len(as.double(lower), nobjs) 36 | upper <- rep_len(as.double(upper), nobjs) 37 | # Handle NA 38 | no.lower <- is.na(lower) 39 | no.upper <- is.na(upper) 40 | minmax <- colRanges(x) 41 | lower[no.lower] <- minmax[no.lower, 1L] 42 | upper[no.upper] <- minmax[no.upper, 2L] 43 | maximise <- rep_len(as.logical(maximise), nobjs) 44 | 45 | if (length(to_range) != 2L) 46 | stop("'to_range' must be a vector of length 2") 47 | 48 | x <- t.default(x) 49 | .Call(normalise_C, 50 | x, # This is modified by normalise_C 51 | as.double(to_range), 52 | lower, upper, maximise) 53 | # FIXME: Transposing in C may avoid a copy. 54 | t.default(x) 55 | } 56 | -------------------------------------------------------------------------------- /r/R/rbind_datasets.R: -------------------------------------------------------------------------------- 1 | #' Combine datasets `x` and `y` by row taking care of making all sets unique. 2 | #' 3 | #' @param x,y `matrix`|`data.frame()`\cr Each dataset has at least three 4 | #' columns, the last one is the set of each point. See also 5 | #' [read_datasets()]. 6 | #' 7 | #' @return `matrix()|`data.frame()`\cr A dataset. 8 | #' @examples 9 | #' x <- data.frame(f1 = 5:10, f2 = 10:5, set = 1:6) 10 | #' y <- data.frame(f1 = 15:20, f2 = 20:15, set = 1:6) 11 | #' rbind_datasets(x,y) 12 | #' @export 13 | rbind_datasets <- function(x, y) 14 | { 15 | setcol <- ncol(x) 16 | stopifnot(setcol > 2L) 17 | stopifnot(ncol(x) == ncol(y)) 18 | # FIXME: We could relax this condition by re-encoding the column. 19 | stopifnot(min(x[,setcol]) == 1L) 20 | stopifnot(min(y[,setcol]) == 1L) 21 | # We have to make all sets unique. 22 | y[,setcol] <- y[,setcol] + max(x[,setcol]) 23 | rbind(x, y) 24 | } 25 | 26 | 27 | combine_cumsizes_sets <- function(sets_x, sets_y) 28 | { 29 | cumsizes <- cumsum(unique_counts(sets_x)) 30 | c(cumsizes, cumsizes[length(cumsizes)] + cumsum(unique_counts(sets_y))) 31 | } 32 | -------------------------------------------------------------------------------- /r/R/utils.R: -------------------------------------------------------------------------------- 1 | range_df_list <- function(x, col) 2 | { 3 | # FIXME: How to make this faster? 4 | do.call(range, lapply(x, `[`, , col)) 5 | } 6 | 7 | get_ideal <- function(x, maximise) 8 | { 9 | # FIXME: Is there a better way to do this? 10 | minmax <- colRanges(as.matrix(x)) 11 | lower <- minmax[,1L] 12 | upper <- minmax[,2L] 13 | ifelse(maximise, upper, lower) 14 | } 15 | 16 | nunique <- function(x) length(unique.default(x)) 17 | 18 | # FIXME: There must be something faster than table 19 | unique_counts <- function(x) as.vector(table(x)) 20 | 21 | #' Transform matrix according to maximise parameter 22 | #' 23 | #' @inheritParams is_nondominated 24 | #' 25 | #' @return `x` transformed such that every column where `maximise` is `TRUE` is multiplied by `-1`. 26 | #' 27 | #' @examples 28 | #' x <- data.frame(f1=1:10, f2=101:110) 29 | #' rownames(x) <- letters[1:10] 30 | #' transform_maximise(x, maximise=c(FALSE,TRUE)) 31 | #' transform_maximise(x, maximise=TRUE) 32 | #' x <- as.matrix(x) 33 | #' transform_maximise(x, maximise=c(FALSE,TRUE)) 34 | #' transform_maximise(x, maximise=TRUE) 35 | #' 36 | #' @export 37 | transform_maximise <- function(x, maximise) 38 | { 39 | if (any(maximise)) { 40 | if (all(maximise)) 41 | return(-x) 42 | if (length(maximise) != ncol(x)) 43 | stop("length of maximise must be either 1 or ncol(x)") 44 | x[,maximise] <- -x[,maximise, drop=FALSE] 45 | } 46 | x 47 | } 48 | 49 | #' Convert input to a matrix with `"double"` storage mode ([base::storage.mode()]). 50 | #' 51 | #' @param x `data.frame()`|`matrix()`\cr A numerical data frame or matrix with at least 1 row and 2 columns. 52 | #' @return `x` is coerced to a numerical `matrix()`. 53 | #' @export 54 | as_double_matrix <- function(x) 55 | { 56 | name <- deparse(substitute(x)) # FIXME: Only do this if there is an error. 57 | if (length(dim(x)) != 2L) 58 | stop("'", name, "' must be a data.frame or a matrix") 59 | if (nrow(x) < 1L) 60 | stop("not enough points (rows) in '", name, "'") 61 | if (ncol(x) < 2L) 62 | stop("'", name, "' must have at least 2 columns") 63 | x <- as.matrix(x) 64 | if (!is.numeric(x)) 65 | stop("'", name, "' must be numeric") 66 | if (storage.mode(x) != "double") 67 | storage.mode(x) <- "double" 68 | x 69 | } 70 | 71 | is_wholenumber <- function(x, tol = .Machine$double.eps^0.5) 72 | is.finite(x) && abs(x - round(x)) < tol 73 | 74 | as_integer <- function(x) 75 | { 76 | if (!is_wholenumber(x)) { 77 | stop("'", deparse(substitute(x)), "' is not an integer: ", x) 78 | } 79 | as.integer(x) 80 | } 81 | -------------------------------------------------------------------------------- /r/R/vorob.R: -------------------------------------------------------------------------------- 1 | #' Vorob'ev computations 2 | #' 3 | #' Compute Vorob'ev threshold, expectation and deviation. Also, displaying the 4 | #' symmetric deviation function is possible. The symmetric deviation 5 | #' function is the probability for a given target in the objective space to 6 | #' belong to the symmetric difference between the Vorob'ev expectation and a 7 | #' realization of the (random) attained set. 8 | #' 9 | #' @inheritParams eaf 10 | #' @inheritParams hypervolume 11 | #' 12 | #' @return `vorob_t` returns a list with elements `threshold`, 13 | #' `ve`, and `avg_hyp` (average hypervolume) 14 | #' @rdname Vorob 15 | #' @author Mickael Binois 16 | #' @doctest 17 | #' data(CPFs) 18 | #' res <- vorob_t(CPFs, reference = c(2, 200)) 19 | #' @expect equal(44.140625) 20 | #' res$threshold 21 | #' @expect equal(8943.3332) 22 | #' res$avg_hyp 23 | #' # Now print Vorob'ev deviation 24 | #' vd <- vorob_dev(CPFs, ve = res$ve, reference = c(2, 200)) 25 | #' @expect equal(3017.1299) 26 | #' vd 27 | #' @references 28 | #' \insertRef{BinGinRou2015gaupar}{moocore} 29 | #' 30 | #' C. Chevalier (2013), Fast uncertainty reduction strategies relying on 31 | #' Gaussian process models, University of Bern, PhD thesis. 32 | #' 33 | #' \insertRef{Molchanov2005theory}{moocore} 34 | #' 35 | #' @concept eaf 36 | #' @export 37 | vorob_t <- function(x, sets, reference, maximise = FALSE) 38 | { 39 | if (missing(sets)) { 40 | sets <- x[, ncol(x)] 41 | x <- x[, -ncol(x), drop=FALSE] 42 | } 43 | x <- as_double_matrix(x) 44 | 45 | if (any(maximise)) { 46 | x <- transform_maximise(x, maximise) 47 | if (all(maximise)) { 48 | reference <- -reference 49 | } else { 50 | reference[maximise] <- -reference[maximise] 51 | } 52 | } 53 | 54 | # First step: compute average hypervolume over conditional Pareto fronts 55 | avg_hyp <- mean(sapply(split.data.frame(x, sets), hypervolume, reference = reference)) 56 | 57 | prev_hyp <- diff <- Inf # hypervolume of quantile at previous step 58 | a <- 0 59 | b <- 100 60 | setcol <- ncol(x) + 1L 61 | while (diff != 0) { 62 | c <- (a + b) / 2 63 | ve <- eaf(x, sets = sets, percentiles = c)[,-setcol] 64 | tmp <- hypervolume(ve, reference = reference) 65 | if (tmp > avg_hyp) a <- c else b <- c 66 | diff <- prev_hyp - tmp 67 | prev_hyp <- tmp 68 | } 69 | ve <- transform_maximise(ve, maximise) 70 | list(threshold = c, ve = ve, avg_hyp = avg_hyp) 71 | } 72 | 73 | #' @concept eaf 74 | #' @rdname Vorob 75 | #' @param ve `matrix()`\cr Vorob'ev expectation, e.g., as returned by [vorob_t()]. 76 | #' @return `vorob_dev` returns the Vorob'ev deviation. 77 | #' @export 78 | vorob_dev <- function(x, sets, reference, ve = NULL, maximise = FALSE) 79 | { 80 | # FIXME: Does it make sense to call this function with 'x' different than the 81 | # one used to calculate ve? If not, then we should merge them and avoid a lot 82 | # of redundant work. 83 | if (missing(sets)) { 84 | sets <- x[, ncol(x)] 85 | x <- x[, -ncol(x), drop=FALSE] 86 | } 87 | if (is.null(ve)) { 88 | ve <- vorob_t(x, sets, reference = reference, maximise = maximise)$ve 89 | } else { 90 | x <- as_double_matrix(x) 91 | } 92 | 93 | if (any(maximise)) { 94 | x <- transform_maximise(x, maximise) 95 | ve <- transform_maximise(ve, maximise) 96 | if (all(maximise)) { 97 | reference <- -reference 98 | } else { 99 | reference[maximise] <- -reference[maximise] 100 | } 101 | } 102 | setcol <- ncol(x) 103 | # Hypervolume of the symmetric difference between A and B: 104 | # 2 * H(AUB) - H(A) - H(B) 105 | h2 <- hypervolume(ve, reference = reference) 106 | x_split <- split.data.frame(x, sets) 107 | h1 <- mean(sapply(x_split, hypervolume, reference = reference)) 108 | 109 | hv_union_ve <- function(y) 110 | hypervolume(rbind(y, ve), reference = reference) 111 | 112 | vd <- 2 * sum(sapply(x_split, hv_union_ve)) 113 | nruns <- length(x_split) 114 | ((vd / nruns) - h1 - h2) 115 | } 116 | -------------------------------------------------------------------------------- /r/R/zzz.R: -------------------------------------------------------------------------------- 1 | .onLoad <- function(lib, pkg){ 2 | Rdpack::Rdpack_bibstyles(package = pkg, authors = "LongNames") 3 | invisible(NULL) 4 | } 5 | -------------------------------------------------------------------------------- /r/_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://multi-objective.github.io/moocore/r/ 2 | 3 | repo: 4 | url: 5 | home: https://github.com/multi-objective/moocore/ 6 | source: https://github.com/multi-objective/moocore/blob/HEAD/r/ 7 | 8 | template: 9 | bootstrap: 5 10 | light-switch: true 11 | bslib: 12 | primary: "#0054AD" 13 | border-radius: 0.5rem 14 | btn-border-radius: 0.25rem 15 | danger: "#A6081A" 16 | # This is needed until https://github.com/r-lib/pkgdown/issues/2704 is fixed. 17 | includes: 18 | in_header: | 19 | 20 | 21 | 22 | 23 | 24 | development: 25 | mode: unreleased 26 | version_tooltip: "Development version" 27 | 28 | navbar: 29 | structure: 30 | right: [search, cran, github, lightswitch] 31 | components: 32 | cran: 33 | icon: fab fa-r-project 34 | href: https://cloud.r-project.org/package=moocore 35 | aria-label: View on CRAN 36 | 37 | reference: 38 | - title: Pareto dominance 39 | contents: has_concept("dominance") 40 | - title: Multi-objective performance assessment metrics 41 | contents: has_concept("metrics") 42 | - title: Computing the Empirical Attainment Function 43 | contents: has_concept("eaf") 44 | - title: Read/Write/Transform datasets 45 | contents: 46 | - read_datasets 47 | - write_datasets 48 | - normalise 49 | - rbind_datasets 50 | - transform_maximise 51 | - as_double_matrix 52 | - title: Datasets 53 | contents: has_keyword("datasets") 54 | -------------------------------------------------------------------------------- /r/cleanup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | rm -f config.* confdefs.h tests/testthat/*.pdf \ 3 | src/*.o src/*.so src/symbols.rds \ 4 | inst/doc/*.blg inst/doc/*.bbl *-Ex.R 5 | 6 | rm -rf autom4te.cache 7 | find . -name '*.orig' -o -name '.Rhistory' -o -name '*.Rout' -print0 | xargs -0 rm -f 8 | -------------------------------------------------------------------------------- /r/data/CPFs.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/data/CPFs.rda -------------------------------------------------------------------------------- /r/data/HybridGA.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/data/HybridGA.rda -------------------------------------------------------------------------------- /r/data/SPEA2minstoptimeRichmond.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/data/SPEA2minstoptimeRichmond.rda -------------------------------------------------------------------------------- /r/data/SPEA2relativeRichmond.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/data/SPEA2relativeRichmond.rda -------------------------------------------------------------------------------- /r/data/SPEA2relativeVanzyl.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/data/SPEA2relativeVanzyl.rda -------------------------------------------------------------------------------- /r/data/tpls50x20_1_MWT.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/data/tpls50x20_1_MWT.rda -------------------------------------------------------------------------------- /r/do.R: -------------------------------------------------------------------------------- 1 | argv <- commandArgs(trailingOnly = TRUE) 2 | if (length(argv) != 2L) 3 | stop("Missing filename") 4 | built_path <- argv[2L] 5 | 6 | if (argv[1] == "submit") { 7 | cli::cat_rule("Submitting", col = "red") 8 | xfun::submit_cran(built_path) 9 | } else if (argv[1] == "info") { 10 | size <- file.info(built_path)$size 11 | cli::cat_rule("Info", col = "cyan") 12 | cli::cli_inform(c(i = "Path {.file {built_path}}", i = "File size: {prettyunits::pretty_bytes(size)}")) 13 | cli::cat_line() 14 | } else { 15 | stop("Unknown command") 16 | } 17 | -------------------------------------------------------------------------------- /r/inst/WORDLIST: -------------------------------------------------------------------------------- 1 | Bartz 2 | Beielstein 3 | Binois 4 | Biobjective 5 | Bischl 6 | Chiarandini 7 | Cleanups 8 | Codecov 9 | Coloring 10 | DIMACS 11 | EAF 12 | EAFs 13 | Flowshop 14 | GD 15 | GPs 16 | Ginsbourger 17 | Hausdorff 18 | Hypervolume 19 | IGD 20 | IGDX 21 | IRIDIA 22 | Ibanez 23 | Ibáñez 24 | López 25 | Makespan 26 | Metaheuristics 27 | Mickael 28 | Mickaël 29 | NumPy 30 | ORCID 31 | Paquete 32 | Preuss 33 | Quemy 34 | Roustant 35 | Roxygen 36 | SPEA 37 | Stützle 38 | TPLS 39 | TR 40 | Tabu 41 | TabuCol 42 | Vanzyl 43 | Vanzyl's 44 | Vorob'ev 45 | Vorob'ev 46 | behavior 47 | behaviors 48 | bibtex 49 | biobjective 50 | cleanups 51 | color 52 | colormap 53 | colors 54 | doi 55 | hyperrectangle 56 | hyperrectangles 57 | hypervolume 58 | hypervolumes 59 | libre 60 | matrixStats 61 | metaheuristic 62 | nondominated 63 | orthotopes 64 | polytope 65 | -------------------------------------------------------------------------------- /r/inst/extdata/ALG_1_dat.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/inst/extdata/ALG_1_dat.xz -------------------------------------------------------------------------------- /r/inst/extdata/ALG_2_dat.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/inst/extdata/ALG_2_dat.xz -------------------------------------------------------------------------------- /r/inst/extdata/example1_dat: -------------------------------------------------------------------------------- 1 | 5136906 5252884 2 | 5142568 5219868 3 | 5167616 5217238 4 | 5169726 5210654 5 | 5188260 5201808 6 | 5207548 5195594 7 | 5208824 5191646 8 | 5231722 5183804 9 | 10 | 5135414 5268372 11 | 5147846 5210542 12 | 5174600 5209152 13 | 5181262 5179358 14 | 15 | 5146734 5264400 16 | 5149830 5258196 17 | 5151682 5217482 18 | 5162856 5203302 19 | 5186380 5201066 20 | 5208800 5190706 21 | 5220492 5185632 22 | 5248376 5183246 23 | 24 | 5134240 5232272 25 | 5160938 5205572 26 | 5172032 5205298 27 | 5175168 5192196 28 | 5193370 5186240 29 | 5208364 5184852 30 | 31 | 5153534 5295574 32 | 5155512 5219180 33 | 5162270 5211316 34 | 5177516 5207258 35 | 5193688 5206734 36 | 5196258 5198842 37 | 5196400 5194080 38 | 5200046 5187082 39 | 40 | 5128224 5266830 41 | 5147362 5262574 42 | 5155716 5198006 43 | 5169034 5186656 44 | 45 | 5128176 5238886 46 | 5153066 5238024 47 | 5155408 5204124 48 | 5185294 5200040 49 | 5191528 5192334 50 | 5223828 5189078 51 | 52 | 5137952 5233566 53 | 5158466 5229466 54 | 5158568 5215710 55 | 5162240 5209706 56 | 5168678 5202144 57 | 5175072 5196856 58 | 5221198 5186816 59 | 60 | 5135300 5276110 61 | 5145614 5247358 62 | 5146024 5226582 63 | 5154280 5223756 64 | 5165138 5217210 65 | 5165272 5215804 66 | 5168218 5206596 67 | 5199178 5202584 68 | 5210042 5194988 69 | 5226770 5191850 70 | 71 | 5143188 5235016 72 | 5144532 5209802 73 | 5164108 5188228 74 | 5213378 5185264 75 | -------------------------------------------------------------------------------- /r/inst/extdata/rest.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/inst/extdata/rest.xz -------------------------------------------------------------------------------- /r/inst/extdata/tpls.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/inst/extdata/tpls.xz -------------------------------------------------------------------------------- /r/man/CPFs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/moocore-package.R 3 | \docType{data} 4 | \name{CPFs} 5 | \alias{CPFs} 6 | \title{Conditional Pareto fronts obtained from Gaussian processes simulations.} 7 | \format{ 8 | A data frame with 2967 observations on the following 3 variables. 9 | \describe{ 10 | \item{\code{f1}}{first objective values.} 11 | \item{\code{f2}}{second objective values.} 12 | \item{\code{set}}{indices of corresponding conditional Pareto fronts.} 13 | } 14 | } 15 | \source{ 16 | \insertRef{BinGinRou2015gaupar}{moocore} 17 | } 18 | \usage{ 19 | CPFs 20 | } 21 | \description{ 22 | The data has the only goal of providing an example of use of \code{\link[=vorob_t]{vorob_t()}} and 23 | \code{\link[=vorob_dev]{vorob_dev()}}. It has been obtained by fitting two Gaussian processes on 20 24 | observations of a bi-objective problem, before generating conditional 25 | simulation of both GPs at different locations and extracting non-dominated 26 | values of coupled simulations. 27 | } 28 | \examples{ 29 | data(CPFs) 30 | vorob_t(CPFs, reference = c(2, 200)) 31 | } 32 | \keyword{datasets} 33 | -------------------------------------------------------------------------------- /r/man/HybridGA.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/moocore-package.R 3 | \docType{data} 4 | \name{HybridGA} 5 | \alias{HybridGA} 6 | \title{Results of Hybrid GA on Vanzyl and Richmond water networks} 7 | \format{ 8 | A list with two data frames, each of them with three columns, as 9 | produced by \code{\link[=read_datasets]{read_datasets()}}. 10 | \describe{ 11 | \item{\verb{$vanzyl}}{data frame of results on Vanzyl network} 12 | \item{\verb{$richmond}}{data frame of results on Richmond 13 | network. The second column is filled with \code{NA}} 14 | } 15 | } 16 | \source{ 17 | \insertRef{LopezIbanezPhD}{moocore}. 18 | } 19 | \usage{ 20 | HybridGA 21 | } 22 | \description{ 23 | Results of Hybrid GA on Vanzyl and Richmond water networks 24 | } 25 | \examples{ 26 | data(HybridGA) 27 | print(HybridGA$vanzyl) 28 | print(HybridGA$richmond) 29 | } 30 | \keyword{datasets} 31 | -------------------------------------------------------------------------------- /r/man/SPEA2minstoptimeRichmond.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/moocore-package.R 3 | \docType{data} 4 | \name{SPEA2minstoptimeRichmond} 5 | \alias{SPEA2minstoptimeRichmond} 6 | \title{Results of SPEA2 when minimising electrical cost and maximising the 7 | minimum idle time of pumps on Richmond water network.} 8 | \format{ 9 | A data frame as produced by \code{\link[=read_datasets]{read_datasets()}}. The second 10 | column measures time in seconds and corresponds to a maximisation problem. 11 | } 12 | \source{ 13 | \insertRef{LopezIbanezPhD}{moocore} 14 | } 15 | \usage{ 16 | SPEA2minstoptimeRichmond 17 | } 18 | \description{ 19 | Results of SPEA2 when minimising electrical cost and maximising the 20 | minimum idle time of pumps on Richmond water network. 21 | } 22 | \examples{ 23 | data(SPEA2minstoptimeRichmond) 24 | str(SPEA2minstoptimeRichmond) 25 | } 26 | \keyword{datasets} 27 | -------------------------------------------------------------------------------- /r/man/SPEA2relativeRichmond.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/moocore-package.R 3 | \docType{data} 4 | \name{SPEA2relativeRichmond} 5 | \alias{SPEA2relativeRichmond} 6 | \title{Results of SPEA2 with relative time-controlled triggers on Richmond water 7 | network.} 8 | \format{ 9 | A data frame as produced by \code{\link[=read_datasets]{read_datasets()}}. 10 | } 11 | \source{ 12 | \insertRef{LopezIbanezPhD}{moocore} 13 | } 14 | \usage{ 15 | SPEA2relativeRichmond 16 | } 17 | \description{ 18 | Results of SPEA2 with relative time-controlled triggers on Richmond water 19 | network. 20 | } 21 | \examples{ 22 | data(SPEA2relativeRichmond) 23 | str(SPEA2relativeRichmond) 24 | } 25 | \keyword{datasets} 26 | -------------------------------------------------------------------------------- /r/man/SPEA2relativeVanzyl.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/moocore-package.R 3 | \docType{data} 4 | \name{SPEA2relativeVanzyl} 5 | \alias{SPEA2relativeVanzyl} 6 | \title{Results of SPEA2 with relative time-controlled triggers on Vanzyl's 7 | water network.} 8 | \format{ 9 | An object of class \code{data.frame} with 107 rows and 3 columns. 10 | } 11 | \source{ 12 | \insertRef{LopezIbanezPhD}{moocore} 13 | } 14 | \usage{ 15 | SPEA2relativeVanzyl 16 | } 17 | \description{ 18 | Results of SPEA2 with relative time-controlled triggers on Vanzyl's 19 | water network. 20 | } 21 | \examples{ 22 | data(SPEA2relativeVanzyl) 23 | str(SPEA2relativeVanzyl) 24 | } 25 | \keyword{datasets} 26 | -------------------------------------------------------------------------------- /r/man/Vorob.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/vorob.R 3 | \name{vorob_t} 4 | \alias{vorob_t} 5 | \alias{vorob_dev} 6 | \title{Vorob'ev computations} 7 | \usage{ 8 | vorob_t(x, sets, reference, maximise = FALSE) 9 | 10 | vorob_dev(x, sets, reference, ve = NULL, maximise = FALSE) 11 | } 12 | \arguments{ 13 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 14 | values, where each row gives the coordinates of a point. If \code{sets} is 15 | missing, the last column of \code{x} gives the sets.} 16 | 17 | \item{sets}{\code{integer()}\cr A vector that indicates the set of each point in \code{x}. If 18 | missing, the last column of \code{x} is used instead.} 19 | 20 | \item{reference}{\code{numeric()}\cr Reference point as a vector of numerical 21 | values.} 22 | 23 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 24 | instead of minimised. Either a single logical value that applies to all 25 | objectives or a vector of logical values, with one value per objective.} 26 | 27 | \item{ve}{\code{matrix()}\cr Vorob'ev expectation, e.g., as returned by \code{\link[=vorob_t]{vorob_t()}}.} 28 | } 29 | \value{ 30 | \code{vorob_t} returns a list with elements \code{threshold}, 31 | \code{ve}, and \code{avg_hyp} (average hypervolume) 32 | 33 | \code{vorob_dev} returns the Vorob'ev deviation. 34 | } 35 | \description{ 36 | Compute Vorob'ev threshold, expectation and deviation. Also, displaying the 37 | symmetric deviation function is possible. The symmetric deviation 38 | function is the probability for a given target in the objective space to 39 | belong to the symmetric difference between the Vorob'ev expectation and a 40 | realization of the (random) attained set. 41 | } 42 | \references{ 43 | \insertRef{BinGinRou2015gaupar}{moocore} 44 | 45 | C. Chevalier (2013), Fast uncertainty reduction strategies relying on 46 | Gaussian process models, University of Bern, PhD thesis. 47 | 48 | \insertRef{Molchanov2005theory}{moocore} 49 | } 50 | \author{ 51 | Mickael Binois 52 | } 53 | \concept{eaf} 54 | \examples{ 55 | data(CPFs) 56 | res <- vorob_t(CPFs, reference = c(2, 200)) 57 | res$threshold 58 | res$avg_hyp 59 | # Now print Vorob'ev deviation 60 | vd <- vorob_dev(CPFs, ve = res$ve, reference = c(2, 200)) 61 | vd 62 | } 63 | -------------------------------------------------------------------------------- /r/man/as_double_matrix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{as_double_matrix} 4 | \alias{as_double_matrix} 5 | \title{Convert input to a matrix with \code{"double"} storage mode (\code{\link[base:mode]{base::storage.mode()}}).} 6 | \usage{ 7 | as_double_matrix(x) 8 | } 9 | \arguments{ 10 | \item{x}{\code{data.frame()}|\code{matrix()}\cr A numerical data frame or matrix with at least 1 row and 2 columns.} 11 | } 12 | \value{ 13 | \code{x} is coerced to a numerical \code{matrix()}. 14 | } 15 | \description{ 16 | Convert input to a matrix with \code{"double"} storage mode (\code{\link[base:mode]{base::storage.mode()}}). 17 | } 18 | -------------------------------------------------------------------------------- /r/man/attsurf2df.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/eaf.R 3 | \name{attsurf2df} 4 | \alias{attsurf2df} 5 | \title{Convert a list of attainment surfaces to a single EAF \code{data.frame}.} 6 | \usage{ 7 | attsurf2df(x) 8 | } 9 | \arguments{ 10 | \item{x}{\code{list()}\cr List of \code{data.frames} or matrices. The names of the list 11 | give the percentiles of the attainment surfaces. This is the format 12 | returned by \code{\link[=eaf_as_list]{eaf_as_list()}}.} 13 | } 14 | \value{ 15 | \code{data.frame()}\cr Data frame with as many columns as objectives and an additional column \code{percentiles}. 16 | } 17 | \description{ 18 | Convert a list of attainment surfaces to a single EAF \code{data.frame}. 19 | } 20 | \examples{ 21 | 22 | data(SPEA2relativeRichmond) 23 | attsurfs <- eaf_as_list(eaf(SPEA2relativeRichmond, percentiles = c(0,50,100))) 24 | str(attsurfs) 25 | eaf_df <- attsurf2df(attsurfs) 26 | str(eaf_df) 27 | } 28 | \seealso{ 29 | \code{\link[=eaf_as_list]{eaf_as_list()}} 30 | } 31 | \concept{eaf} 32 | -------------------------------------------------------------------------------- /r/man/choose_eafdiff.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/largest_eafdiff.R 3 | \name{choose_eafdiff} 4 | \alias{choose_eafdiff} 5 | \title{Interactively choose according to empirical attainment function differences} 6 | \usage{ 7 | choose_eafdiff(x, left = stop("'left' must be either TRUE or FALSE")) 8 | } 9 | \arguments{ 10 | \item{x}{\code{matrix()}\cr Matrix of rectangles representing EAF differences 11 | returned by \code{\link[=eafdiff]{eafdiff()}} with \code{rectangles=TRUE}.} 12 | 13 | \item{left}{\code{logical(1)}\cr With \code{left=TRUE} return the rectangles with 14 | positive differences, otherwise return those with negative differences but 15 | differences are converted to positive.} 16 | } 17 | \value{ 18 | \code{matrix()} where the first 4 columns give the coordinates of two 19 | corners of each rectangle and the last column. In both cases, the last 20 | column gives the positive differences in favor of the chosen side. 21 | } 22 | \description{ 23 | Interactively choose according to empirical attainment function differences 24 | } 25 | \examples{ 26 | \donttest{ 27 | extdata_dir <- system.file(package="moocore", "extdata") 28 | A1 <- read_datasets(file.path(extdata_dir, "wrots_l100w10_dat")) 29 | A2 <- read_datasets(file.path(extdata_dir, "wrots_l10w100_dat")) 30 | # Choose A1 31 | rectangles <- eafdiff(A1, A2, intervals = 5, rectangles = TRUE) 32 | rectangles <- choose_eafdiff(rectangles, left = TRUE) 33 | reference <- c(max(A1[, 1], A2[, 1]), max(A1[, 2], A2[, 2])) 34 | x <- split.data.frame(A1[,1:2], A1[,3]) 35 | hv_A1 <- sapply(split.data.frame(A1[, 1:2], A1[, 3]), 36 | hypervolume, reference=reference) 37 | hv_A2 <- sapply(split.data.frame(A2[, 1:2], A2[, 3]), 38 | hypervolume, reference=reference) 39 | print(fivenum(hv_A1)) 40 | print(fivenum(hv_A2)) 41 | whv_A1 <- sapply(split.data.frame(A1[, 1:2], A1[, 3]), 42 | whv_rect, rectangles=rectangles, reference=reference) 43 | whv_A2 <- sapply(split.data.frame(A2[, 1:2], A2[, 3]), 44 | whv_rect, rectangles=rectangles, reference=reference) 45 | print(fivenum(whv_A1)) 46 | print(fivenum(whv_A2)) 47 | } 48 | 49 | } 50 | \concept{eaf} 51 | -------------------------------------------------------------------------------- /r/man/compute_eaf_call.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/eaf.R 3 | \name{compute_eaf_call} 4 | \alias{compute_eaf_call} 5 | \title{Same as \code{\link[=eaf]{eaf()}} but performs no checks and does not transform the input or 6 | the output. This function should be used by other packages that want to 7 | avoid redundant checks and transformations.} 8 | \usage{ 9 | compute_eaf_call(x, cumsizes, percentiles) 10 | } 11 | \arguments{ 12 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 13 | values, where each row gives the coordinates of a point. If \code{sets} is 14 | missing, the last column of \code{x} gives the sets.} 15 | 16 | \item{cumsizes}{\code{integer()}\cr Cumulative size of the different sets of points in \code{x}.} 17 | 18 | \item{percentiles}{\code{numeric()}\cr Vector indicating which percentiles are computed. 19 | \code{NULL} computes all.} 20 | } 21 | \value{ 22 | \code{data.frame()}\cr A data frame containing the exact representation of 23 | EAF. The last column gives the percentile that corresponds to each 24 | point. If groups is not \code{NULL}, then an additional column indicates to 25 | which group the point belongs. 26 | } 27 | \description{ 28 | Same as \code{\link[=eaf]{eaf()}} but performs no checks and does not transform the input or 29 | the output. This function should be used by other packages that want to 30 | avoid redundant checks and transformations. 31 | } 32 | \seealso{ 33 | \code{\link[=as_double_matrix]{as_double_matrix()}} \code{\link[=transform_maximise]{transform_maximise()}} 34 | } 35 | \concept{eaf} 36 | -------------------------------------------------------------------------------- /r/man/compute_eafdiff_call.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/eafdiff.R 3 | \name{compute_eafdiff_call} 4 | \alias{compute_eafdiff_call} 5 | \title{Same as \code{\link[=eafdiff]{eafdiff()}} but performs no checks and does not transform the input 6 | or the output. This function should be used by other packages that want to 7 | avoid redundant checks and transformations.} 8 | \usage{ 9 | compute_eafdiff_call(x, y, cumsizes_x, cumsizes_y, intervals, ret) 10 | } 11 | \arguments{ 12 | \item{x, y}{\code{matrix}|\code{data.frame()}\cr Data frames corresponding to the input data of 13 | left and right sides, respectively. Each data frame has at least three 14 | columns, the last one is the set of each point. See also 15 | \code{\link[=read_datasets]{read_datasets()}}.} 16 | 17 | \item{cumsizes_x, cumsizes_y}{Cumulative size of the different sets of points in \code{x} and \code{y}.} 18 | 19 | \item{intervals}{\code{integer(1)}\cr The absolute range of the differences 20 | \eqn{[0, 1]} is partitioned into the number of intervals provided.} 21 | 22 | \item{ret}{(\code{"points"|"rectangles"|"polygons"})\cr The format of the returned EAF differences.} 23 | } 24 | \value{ 25 | With \code{rectangle=FALSE}, a \code{data.frame} containing points where there 26 | is a transition in the value of the EAF differences. With 27 | \code{rectangle=TRUE}, a \code{matrix} where the first 4 columns give the 28 | coordinates of two corners of each rectangle. In both cases, the last 29 | column gives the difference in terms of sets in \code{x} minus sets in \code{y} that 30 | attain each point (i.e., negative values are differences in favour \code{y}). 31 | } 32 | \description{ 33 | Same as \code{\link[=eafdiff]{eafdiff()}} but performs no checks and does not transform the input 34 | or the output. This function should be used by other packages that want to 35 | avoid redundant checks and transformations. 36 | } 37 | \seealso{ 38 | \code{\link[=as_double_matrix]{as_double_matrix()}} \code{\link[=transform_maximise]{transform_maximise()}} 39 | } 40 | \concept{eaf} 41 | -------------------------------------------------------------------------------- /r/man/eaf.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/eaf.R 3 | \name{eaf} 4 | \alias{eaf} 5 | \title{Exact computation of the EAF in 2D or 3D} 6 | \usage{ 7 | eaf(x, sets, percentiles = NULL, maximise = FALSE, groups = NULL) 8 | } 9 | \arguments{ 10 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 11 | values, where each row gives the coordinates of a point. If \code{sets} is 12 | missing, the last column of \code{x} gives the sets.} 13 | 14 | \item{sets}{\code{integer()}\cr A vector that indicates the set of each point in \code{x}. If 15 | missing, the last column of \code{x} is used instead.} 16 | 17 | \item{percentiles}{\code{numeric()}\cr Vector indicating which percentiles are computed. 18 | \code{NULL} computes all.} 19 | 20 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 21 | instead of minimised. Either a single logical value that applies to all 22 | objectives or a vector of logical values, with one value per objective.} 23 | 24 | \item{groups}{\code{factor()}\cr Indicates that the EAF must be computed separately for data 25 | belonging to different groups.} 26 | } 27 | \value{ 28 | \code{data.frame()}\cr A data frame containing the exact representation of 29 | EAF. The last column gives the percentile that corresponds to each 30 | point. If groups is not \code{NULL}, then an additional column indicates to 31 | which group the point belongs. 32 | } 33 | \description{ 34 | This function computes the EAF given a set of 2D or 3D points and a vector \code{set} 35 | that indicates to which set each point belongs. 36 | } 37 | \note{ 38 | There are several examples of data sets in 39 | \code{system.file(package="moocore","extdata")}. The current implementation 40 | only supports two and three dimensional points. 41 | } 42 | \examples{ 43 | extdata_path <- system.file(package="moocore", "extdata") 44 | 45 | x <- read_datasets(file.path(extdata_path, "example1_dat")) 46 | # Compute full EAF (sets is the last column) 47 | str(eaf(x)) 48 | 49 | # Compute only best, median and worst 50 | str(eaf(x[,1:2], sets = x[,3], percentiles = c(0, 50, 100))) 51 | 52 | x <- read_datasets(file.path(extdata_path, "spherical-250-10-3d.txt")) 53 | y <- read_datasets(file.path(extdata_path, "uniform-250-10-3d.txt")) 54 | x <- rbind(data.frame(x, groups = "spherical"), 55 | data.frame(y, groups = "uniform")) 56 | # Compute only median separately for each group 57 | z <- eaf(x[,1:3], sets = x[,4], groups = x[,5], percentiles = 50) 58 | str(z) 59 | } 60 | \references{ 61 | \insertRef{Grunert01}{moocore} 62 | 63 | \insertRef{FonGueLopPaq2011emo}{moocore} 64 | 65 | \insertRef{LopVerDreDoe2025}{moocore} 66 | } 67 | \seealso{ 68 | \code{\link[=read_datasets]{read_datasets()}} 69 | } 70 | \author{ 71 | Manuel \enc{López-Ibáñez}{Lopez-Ibanez} 72 | } 73 | \concept{eaf} 74 | -------------------------------------------------------------------------------- /r/man/eaf_as_list.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/eaf.R 3 | \name{eaf_as_list} 4 | \alias{eaf_as_list} 5 | \title{Convert an EAF data frame to a list of data frames, where each element 6 | of the list is one attainment surface. The function \code{\link[=attsurf2df]{attsurf2df()}} can be 7 | used to convert the list into a single data frame.} 8 | \usage{ 9 | eaf_as_list(eaf) 10 | } 11 | \arguments{ 12 | \item{eaf}{\code{data.frame()}|\code{matrix()}\cr Data frame or matrix that represents the EAF.} 13 | } 14 | \value{ 15 | \code{list()}\cr A list of data frames. Each \code{data.frame} represents one attainment surface. 16 | } 17 | \description{ 18 | Convert an EAF data frame to a list of data frames, where each element 19 | of the list is one attainment surface. The function \code{\link[=attsurf2df]{attsurf2df()}} can be 20 | used to convert the list into a single data frame. 21 | } 22 | \examples{ 23 | extdata_path <- system.file(package="moocore", "extdata") 24 | x <- read_datasets(file.path(extdata_path, "example1_dat")) 25 | attsurfs <- eaf_as_list(eaf(x, percentiles = c(0, 50, 100))) 26 | str(attsurfs) 27 | } 28 | \seealso{ 29 | \code{\link[=eaf]{eaf()}} \code{\link[=attsurf2df]{attsurf2df()}} 30 | } 31 | \concept{eaf} 32 | -------------------------------------------------------------------------------- /r/man/eafdiff.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/eafdiff.R 3 | \name{eafdiff} 4 | \alias{eafdiff} 5 | \title{Compute empirical attainment function differences} 6 | \usage{ 7 | eafdiff(x, y, intervals = NULL, maximise = FALSE, rectangles = FALSE) 8 | } 9 | \arguments{ 10 | \item{x, y}{\code{matrix}|\code{data.frame()}\cr Data frames corresponding to the input data of 11 | left and right sides, respectively. Each data frame has at least three 12 | columns, the last one is the set of each point. See also 13 | \code{\link[=read_datasets]{read_datasets()}}.} 14 | 15 | \item{intervals}{\code{integer(1)}\cr The absolute range of the differences 16 | \eqn{[0, 1]} is partitioned into the number of intervals provided.} 17 | 18 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 19 | instead of minimised. Either a single logical value that applies to all 20 | objectives or a vector of logical values, with one value per objective.} 21 | 22 | \item{rectangles}{\code{logical(1)}\cr If TRUE, the output is in the form of rectangles of the same color.} 23 | } 24 | \value{ 25 | With \code{rectangle=FALSE}, a \code{data.frame} containing points where there 26 | is a transition in the value of the EAF differences. With 27 | \code{rectangle=TRUE}, a \code{matrix} where the first 4 columns give the 28 | coordinates of two corners of each rectangle. In both cases, the last 29 | column gives the difference in terms of sets in \code{x} minus sets in \code{y} that 30 | attain each point (i.e., negative values are differences in favour \code{y}). 31 | } 32 | \description{ 33 | Calculate the differences between the empirical attainment functions of two 34 | data sets. 35 | } 36 | \details{ 37 | This function calculates the differences between the EAFs of two 38 | data sets. 39 | } 40 | \seealso{ 41 | \code{\link[=read_datasets]{read_datasets()}} 42 | } 43 | \concept{eaf} 44 | \examples{ 45 | A1 <- read_datasets(text=' 46 | 3 2 47 | 2 3 48 | 49 | 2.5 1 50 | 1 2 51 | 52 | 1 2 53 | ') 54 | 55 | A2 <- read_datasets(text=' 56 | 4 2.5 57 | 3 3 58 | 2.5 3.5 59 | 60 | 3 3 61 | 2.5 3.5 62 | 63 | 2 1 64 | ') 65 | d <- eafdiff(A1, A2) 66 | str(d) 67 | d 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | d <- eafdiff(A1, A2, rectangles = TRUE) 78 | str(d) 79 | d 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | } 92 | -------------------------------------------------------------------------------- /r/man/hv_contributions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hv.R 3 | \name{hv_contributions} 4 | \alias{hv_contributions} 5 | \title{Hypervolume contribution of a set of points} 6 | \usage{ 7 | hv_contributions(x, reference, maximise = FALSE) 8 | } 9 | \arguments{ 10 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 11 | values, where each row gives the coordinates of a point.} 12 | 13 | \item{reference}{\code{numeric()}\cr Reference point as a vector of numerical 14 | values.} 15 | 16 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 17 | instead of minimised. Either a single logical value that applies to all 18 | objectives or a vector of logical values, with one value per objective.} 19 | } 20 | \value{ 21 | \code{numeric()}\cr A numerical vector 22 | } 23 | \description{ 24 | Computes the hypervolume contribution of each point given a set of points 25 | with respect to a given reference point assuming minimization of all 26 | objectives. Dominated points have zero contribution. Duplicated points have 27 | zero contribution even if not dominated, because removing one of them does 28 | not change the hypervolume dominated by the remaining set. 29 | } 30 | \examples{ 31 | 32 | data(SPEA2minstoptimeRichmond) 33 | # The second objective must be maximized 34 | # We calculate the hypervolume contribution of each point of the union of all sets. 35 | hv_contributions(SPEA2minstoptimeRichmond[, 1:2], reference = c(250, 0), 36 | maximise = c(FALSE, TRUE)) 37 | 38 | # Duplicated points show zero contribution above, even if not 39 | # dominated. However, filter_dominated removes all duplicates except 40 | # one. Hence, there are more points below with nonzero contribution. 41 | hv_contributions(filter_dominated(SPEA2minstoptimeRichmond[, 1:2], maximise = c(FALSE, TRUE)), 42 | reference = c(250, 0), maximise = c(FALSE, TRUE)) 43 | 44 | } 45 | \references{ 46 | \insertRef{FonPaqLop06:hypervolume}{moocore} 47 | 48 | \insertRef{BeuFonLopPaqVah09:tec}{moocore} 49 | } 50 | \seealso{ 51 | \code{\link[=hypervolume]{hypervolume()}} 52 | } 53 | \author{ 54 | Manuel \enc{López-Ibáñez}{Lopez-Ibanez} 55 | } 56 | \concept{metrics} 57 | -------------------------------------------------------------------------------- /r/man/largest_eafdiff.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/largest_eafdiff.R 3 | \name{largest_eafdiff} 4 | \alias{largest_eafdiff} 5 | \title{Identify largest EAF differences} 6 | \usage{ 7 | largest_eafdiff(x, maximise = FALSE, intervals = 5L, reference, ideal = NULL) 8 | } 9 | \arguments{ 10 | \item{x}{\code{list()}\cr A list of matrices or data frames with at least 3 columns (last column indicates the set).} 11 | 12 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 13 | instead of minimised. Either a single logical value that applies to all 14 | objectives or a vector of logical values, with one value per objective.} 15 | 16 | \item{intervals}{\code{integer(1)}\cr The absolute range of the differences 17 | \eqn{[0, 1]} is partitioned into the number of intervals provided.} 18 | 19 | \item{reference}{\code{numeric()}\cr Reference point as a vector of numerical 20 | values.} 21 | 22 | \item{ideal}{\code{numeric()}\cr Ideal point as a vector of numerical values. If 23 | \code{NULL}, it is calculated as minimum (or maximum if maximising that 24 | objective) of each objective in the input data.} 25 | } 26 | \value{ 27 | \code{list()}\cr A list with two components \code{pair} and \code{value}. 28 | } 29 | \description{ 30 | Given a list of datasets, return the indexes of the pair with the largest 31 | EAF differences according to the method proposed by \citet{DiaLop2020ejor}. 32 | } 33 | \examples{ 34 | # FIXME: This example is too large, we need a smaller one. 35 | data(tpls50x20_1_MWT) 36 | nadir <- apply(tpls50x20_1_MWT[,2:3], 2L, max) 37 | x <- largest_eafdiff(split.data.frame(tpls50x20_1_MWT[,2:4], tpls50x20_1_MWT[, 1L]), 38 | reference = nadir) 39 | str(x) 40 | 41 | } 42 | \references{ 43 | \insertAllCited{} 44 | } 45 | \concept{eaf} 46 | -------------------------------------------------------------------------------- /r/man/macros/macros.Rd: -------------------------------------------------------------------------------- 1 | \newcommand{\citep}{\insertCite{#1}{moocore}}% 2 | \newcommand{\citet}{\insertCite{#1;textual}{moocore}}% 3 | -------------------------------------------------------------------------------- /r/man/moocore-package.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/moocore-package.R 3 | \docType{package} 4 | \name{moocore-package} 5 | \alias{moocore} 6 | \alias{moocore-package} 7 | \title{moocore: Core Mathematical Functions for Multi-Objective Optimization} 8 | \description{ 9 | Fast implementation of mathematical operations and performance metrics for multi-objective optimization, including filtering and ranking of dominated vectors according to Pareto optimality, computation of the empirical attainment function, V.G. da Fonseca, C.M. Fonseca, A.O. Hall (2001) \doi{10.1007/3-540-44719-9_15}, hypervolume metric, C.M. Fonseca, L. Paquete, M. López-Ibáñez (2006) \doi{10.1109/CEC.2006.1688440}, epsilon indicator, inverted generational distance, and Vorob'ev threshold, expectation and deviation, M. Binois, D. Ginsbourger, O. Roustant (2015) \doi{10.1016/j.ejor.2014.07.032}, among others. 10 | } 11 | \seealso{ 12 | Useful links: 13 | \itemize{ 14 | \item \url{https://multi-objective.github.io/moocore/r/} 15 | \item \url{https://github.com/multi-objective/moocore/tree/main/r} 16 | \item Report bugs at \url{https://github.com/multi-objective/moocore/issues} 17 | } 18 | 19 | } 20 | \author{ 21 | \strong{Maintainer}: Manuel López-Ibáñez \email{manuel.lopez-ibanez@manchester.ac.uk} (\href{https://orcid.org/0000-0001-9974-1295}{ORCID}) 22 | 23 | Other contributors: 24 | \itemize{ 25 | \item Carlos Fonseca [contributor] 26 | \item Luís Paquete [contributor] 27 | \item Andreia P. Guerreiro [contributor] 28 | \item Mickaël Binois [contributor] 29 | \item Michael H. Buselli (AVL-tree library) [copyright holder] 30 | \item Wessel Dankers (AVL-tree library) [copyright holder] 31 | \item NumPy Developers (RNG and ziggurat constants) [copyright holder] 32 | \item Jean-Sebastien Roy (mt19937 library) [copyright holder] 33 | \item Makoto Matsumoto (mt19937 library) [copyright holder] 34 | \item Takuji Nishimura (mt19937 library) [copyright holder] 35 | } 36 | 37 | } 38 | \keyword{internal} 39 | -------------------------------------------------------------------------------- /r/man/nondominated.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/nondominated.R 3 | \name{is_nondominated} 4 | \alias{is_nondominated} 5 | \alias{filter_dominated} 6 | \alias{pareto_rank} 7 | \title{Identify, remove and rank dominated points according to Pareto optimality} 8 | \usage{ 9 | is_nondominated(x, maximise = FALSE, keep_weakly = FALSE) 10 | 11 | filter_dominated(x, maximise = FALSE, keep_weakly = FALSE) 12 | 13 | pareto_rank(x, maximise = FALSE) 14 | } 15 | \arguments{ 16 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 17 | values, where each row gives the coordinates of a point.} 18 | 19 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 20 | instead of minimised. Either a single logical value that applies to all 21 | objectives or a vector of logical values, with one value per objective.} 22 | 23 | \item{keep_weakly}{\code{logical(1)}\cr If \code{FALSE}, return \code{FALSE} for any 24 | duplicates of nondominated points. Which of the duplicates is identified 25 | as nondominated is unspecified due to the sorting not being stable in this 26 | version.} 27 | } 28 | \value{ 29 | \code{\link[=is_nondominated]{is_nondominated()}} returns a logical vector of the same length 30 | as the number of rows of \code{data}, where \code{TRUE} means that the 31 | point is not dominated by any other point. 32 | 33 | \code{filter_dominated} returns a matrix or data.frame with only mutually nondominated points. 34 | 35 | \code{pareto_rank()} returns an integer vector of the same length as 36 | the number of rows of \code{data}, where each value gives the rank of each 37 | point. 38 | } 39 | \description{ 40 | Identify nondominated points with \code{is_nondominated()} and remove dominated 41 | ones with \code{filter_dominated()}. 42 | 43 | \code{pareto_rank()} ranks points according to Pareto-optimality, 44 | which is also called nondominated sorting \citep{Deb02nsga2}. 45 | } 46 | \details{ 47 | Given \eqn{n} points of dimension \eqn{m}, the current implementation uses 48 | the well-known \eqn{O(n \log n)} dimension-sweep algorithm 49 | \citep{KunLucPre1975jacm} for \eqn{m \leq 3} and the naive \eqn{O(m n^2)} 50 | algorithm for \eqn{m \geq 4}. 51 | 52 | \code{pareto_rank()} is meant to be used like \code{rank()}, but it 53 | assigns ranks according to Pareto dominance. Duplicated points are kept on 54 | the same front. When \code{ncol(data) == 2}, the code uses the \eqn{O(n 55 | \log n)} algorithm by \citet{Jen03}. 56 | } 57 | \examples{ 58 | S = matrix(c(1,1,0,1,1,0,1,0), ncol = 2, byrow = TRUE) 59 | is_nondominated(S) 60 | 61 | is_nondominated(S, maximise = TRUE) 62 | 63 | filter_dominated(S) 64 | 65 | filter_dominated(S, keep_weakly = TRUE) 66 | 67 | path_A1 <- file.path(system.file(package="moocore"),"extdata","ALG_1_dat.xz") 68 | set <- read_datasets(path_A1)[,1:2] 69 | is_nondom <- is_nondominated(set) 70 | cat("There are ", sum(is_nondom), " nondominated points\n") 71 | 72 | if (requireNamespace("graphics", quietly = TRUE)) { 73 | plot(set, col = "blue", type = "p", pch = 20) 74 | ndset <- filter_dominated(set) 75 | points(ndset[order(ndset[,1]),], col = "red", pch = 21) 76 | } 77 | 78 | ranks <- pareto_rank(set) 79 | str(ranks) 80 | if (requireNamespace("graphics", quietly = TRUE)) { 81 | colors <- colorRampPalette(c("red","yellow","springgreen","royalblue"))(max(ranks)) 82 | plot(set, col = colors[ranks], type = "p", pch = 20) 83 | } 84 | } 85 | \references{ 86 | \insertAllCited{} 87 | } 88 | \author{ 89 | Manuel \enc{López-Ibáñez}{Lopez-Ibanez} 90 | } 91 | \concept{dominance} 92 | -------------------------------------------------------------------------------- /r/man/normalise.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalise.R 3 | \name{normalise} 4 | \alias{normalise} 5 | \title{Normalise points} 6 | \usage{ 7 | normalise(x, to_range = c(1, 2), lower = NA, upper = NA, maximise = FALSE) 8 | } 9 | \arguments{ 10 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 11 | values, where each row gives the coordinates of a point.} 12 | 13 | \item{to_range}{\code{numerical(2)}\cr Normalise values to this range. If the objective is 14 | maximised, it is normalised to \code{c(to_range[1], to_range[0])} 15 | instead.} 16 | 17 | \item{lower, upper}{\code{numerical()}\cr Bounds on the values. If \code{NA}, the maximum and minimum 18 | values of each coordinate are used.} 19 | 20 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 21 | instead of minimised. Either a single logical value that applies to all 22 | objectives or a vector of logical values, with one value per objective.} 23 | } 24 | \value{ 25 | \code{matrix()}\cr A numerical matrix 26 | } 27 | \description{ 28 | Normalise points per coordinate to a range, e.g., \code{c(1,2)}, where the 29 | minimum value will correspond to 1 and the maximum to 2. If bounds are 30 | given, they are used for the normalisation. 31 | } 32 | \examples{ 33 | 34 | data(SPEA2minstoptimeRichmond) 35 | # The second objective must be maximized 36 | head(SPEA2minstoptimeRichmond[, 1:2]) 37 | 38 | head(normalise(SPEA2minstoptimeRichmond[, 1:2], maximise = c(FALSE, TRUE))) 39 | 40 | head(normalise(SPEA2minstoptimeRichmond[, 1:2], to_range = c(0,1), maximise = c(FALSE, TRUE))) 41 | 42 | } 43 | \author{ 44 | Manuel \enc{López-Ibáñez}{Lopez-Ibanez} 45 | } 46 | -------------------------------------------------------------------------------- /r/man/rbind_datasets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rbind_datasets.R 3 | \name{rbind_datasets} 4 | \alias{rbind_datasets} 5 | \title{Combine datasets \code{x} and \code{y} by row taking care of making all sets unique.} 6 | \usage{ 7 | rbind_datasets(x, y) 8 | } 9 | \arguments{ 10 | \item{x, y}{\code{matrix}|\code{data.frame()}\cr Each dataset has at least three 11 | columns, the last one is the set of each point. See also 12 | \code{\link[=read_datasets]{read_datasets()}}.} 13 | } 14 | \value{ 15 | \verb{matrix()|}data.frame()`\cr A dataset. 16 | } 17 | \description{ 18 | Combine datasets \code{x} and \code{y} by row taking care of making all sets unique. 19 | } 20 | \examples{ 21 | x <- data.frame(f1 = 5:10, f2 = 10:5, set = 1:6) 22 | y <- data.frame(f1 = 15:20, f2 = 20:15, set = 1:6) 23 | rbind_datasets(x,y) 24 | } 25 | -------------------------------------------------------------------------------- /r/man/read_datasets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/read_datasets.R 3 | \name{read_datasets} 4 | \alias{read_datasets} 5 | \title{Read several data sets} 6 | \usage{ 7 | read_datasets(file, col_names, text) 8 | } 9 | \arguments{ 10 | \item{file}{\code{character()}\cr Filename that contains the data. Each row 11 | of the table appears as one line of the file. If it does not contain an 12 | \emph{absolute} path, the file name is \emph{relative} to the current 13 | working directory, \code{\link[base:getwd]{base::getwd()}}. Tilde-expansion is 14 | performed where supported. Files compressed with \code{xz} are supported.} 15 | 16 | \item{col_names}{\code{character()}\cr Vector of optional names for the variables. The 17 | default is to use \samp{"V"} followed by the column number.} 18 | 19 | \item{text}{\code{character()}\cr If \code{file} is not supplied and this is, 20 | then data are read from the value of \code{text} via a text connection. 21 | Notice that a literal string can be used to include (small) data sets 22 | within R code.} 23 | } 24 | \value{ 25 | \code{matrix()}\cr A numerical matrix of the 26 | data in the file. An extra column \code{set} is added to indicate to 27 | which set each row belongs. 28 | } 29 | \description{ 30 | Reads a text file in table format and creates a matrix from it. The file 31 | may contain several sets, separated by empty lines. Lines starting by 32 | \code{'#'} are considered comments and treated as empty lines. The function 33 | adds an additional column \code{set} to indicate to which set each row 34 | belongs. 35 | } 36 | \note{ 37 | There are several examples of data sets in 38 | \code{system.file(package="moocore","extdata")}. 39 | } 40 | \section{Warning}{ 41 | 42 | A known limitation is that the input file must use newline characters 43 | native to the host system, otherwise they will be, possibly silently, 44 | misinterpreted. In GNU/Linux the program \code{dos2unix} may be used 45 | to fix newline characters. 46 | } 47 | 48 | \examples{ 49 | extdata_path <- system.file(package="moocore","extdata") 50 | A1 <- read_datasets(file.path(extdata_path,"ALG_1_dat.xz")) 51 | str(A1) 52 | 53 | read_datasets(text="1 2\n3 4\n\n5 6\n7 8\n", col_names=c("obj1", "obj2")) 54 | 55 | } 56 | \seealso{ 57 | \code{\link[utils:read.table]{utils::read.table()}} 58 | } 59 | \author{ 60 | Manuel \enc{López-Ibáñez}{Lopez-Ibanez} 61 | } 62 | \keyword{file} 63 | -------------------------------------------------------------------------------- /r/man/tpls50x20_1_MWT.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/moocore-package.R 3 | \docType{data} 4 | \name{tpls50x20_1_MWT} 5 | \alias{tpls50x20_1_MWT} 6 | \title{Various strategies of Two-Phase Local Search applied to the Permutation 7 | Flowshop Problem with Makespan and Weighted Tardiness objectives.} 8 | \format{ 9 | A data frame with 1511 observations of 4 variables: 10 | \describe{ 11 | \item{\code{algorithm}}{TPLS search strategy} 12 | \item{\code{Makespan}}{first objective values.} 13 | \item{\code{WeightedTardiness}}{second objective values.} 14 | \item{\code{run}}{index of the run.} 15 | } 16 | } 17 | \source{ 18 | \insertRef{DubLopStu2011amai}{moocore} 19 | } 20 | \usage{ 21 | tpls50x20_1_MWT 22 | } 23 | \description{ 24 | Various strategies of Two-Phase Local Search applied to the Permutation 25 | Flowshop Problem with Makespan and Weighted Tardiness objectives. 26 | } 27 | \examples{ 28 | data(tpls50x20_1_MWT) 29 | str(tpls50x20_1_MWT) 30 | } 31 | \keyword{datasets} 32 | -------------------------------------------------------------------------------- /r/man/transform_maximise.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{transform_maximise} 4 | \alias{transform_maximise} 5 | \title{Transform matrix according to maximise parameter} 6 | \usage{ 7 | transform_maximise(x, maximise) 8 | } 9 | \arguments{ 10 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 11 | values, where each row gives the coordinates of a point.} 12 | 13 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 14 | instead of minimised. Either a single logical value that applies to all 15 | objectives or a vector of logical values, with one value per objective.} 16 | } 17 | \value{ 18 | \code{x} transformed such that every column where \code{maximise} is \code{TRUE} is multiplied by \code{-1}. 19 | } 20 | \description{ 21 | Transform matrix according to maximise parameter 22 | } 23 | \examples{ 24 | x <- data.frame(f1=1:10, f2=101:110) 25 | rownames(x) <- letters[1:10] 26 | transform_maximise(x, maximise=c(FALSE,TRUE)) 27 | transform_maximise(x, maximise=TRUE) 28 | x <- as.matrix(x) 29 | transform_maximise(x, maximise=c(FALSE,TRUE)) 30 | transform_maximise(x, maximise=TRUE) 31 | 32 | } 33 | -------------------------------------------------------------------------------- /r/man/whv_hype.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/whv.R 3 | \name{whv_hype} 4 | \alias{whv_hype} 5 | \title{Approximation of the (weighted) hypervolume by Monte-Carlo sampling (2D only)} 6 | \usage{ 7 | whv_hype( 8 | x, 9 | reference, 10 | ideal, 11 | maximise = FALSE, 12 | nsamples = 100000L, 13 | seed = NULL, 14 | dist = "uniform", 15 | mu = NULL 16 | ) 17 | } 18 | \arguments{ 19 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 20 | values, where each row gives the coordinates of a point.} 21 | 22 | \item{reference}{\code{numeric()}\cr Reference point as a vector of numerical 23 | values.} 24 | 25 | \item{ideal}{\code{numeric()}\cr Ideal point as a vector of numerical values.} 26 | 27 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 28 | instead of minimised. Either a single logical value that applies to all 29 | objectives or a vector of logical values, with one value per objective.} 30 | 31 | \item{nsamples}{\code{integer(1)}\cr number of samples for Monte-Carlo sampling.} 32 | 33 | \item{seed}{\code{integer(1)}\cr random seed.} 34 | 35 | \item{dist}{\code{character(1)}\cr weight distribution type. See Details.} 36 | 37 | \item{mu}{\code{numeric()}\cr parameter of the weight distribution. See Details.} 38 | } 39 | \value{ 40 | A single numerical value. 41 | } 42 | \description{ 43 | Return an estimation of the hypervolume of the space dominated by the input 44 | data following the procedure described by \citet{AugBadBroZit2009gecco}. A 45 | weight distribution describing user preferences may be specified. 46 | } 47 | \details{ 48 | The current implementation only supports 2 objectives. 49 | 50 | A weight distribution \citep{AugBadBroZit2009gecco} can be provided via the \code{dist} argument. The ones currently supported are: 51 | \itemize{ 52 | \item \code{"uniform"} corresponds to the default hypervolume (unweighted). 53 | \item \code{"point"} describes a goal in the objective space, where the parameter \code{mu} gives the coordinates of the goal. The resulting weight distribution is a multivariate normal distribution centred at the goal. 54 | \item \code{"exponential"} describes an exponential distribution with rate parameter \code{1/mu}, i.e., \eqn{\lambda = \frac{1}{\mu}}. 55 | } 56 | } 57 | \references{ 58 | \insertAllCited{} 59 | } 60 | \seealso{ 61 | \code{\link[=read_datasets]{read_datasets()}}, \code{\link[=eafdiff]{eafdiff()}}, \code{\link[=whv_rect]{whv_rect()}} 62 | } 63 | \concept{metrics} 64 | \examples{ 65 | whv_hype(matrix(2, ncol=2), reference = 4, ideal = 1, seed = 42) 66 | whv_hype(matrix(c(3,1), ncol=2), reference = 4, ideal = 1, seed = 42) 67 | whv_hype(matrix(2, ncol=2), reference = 4, ideal = 1, seed = 42, 68 | dist = "exponential", mu=0.2) 69 | whv_hype(matrix(c(3,1), ncol=2), reference = 4, ideal = 1, seed = 42, 70 | dist = "exponential", mu=0.2) 71 | whv_hype(matrix(2, ncol=2), reference = 4, ideal = 1, seed = 42, 72 | dist = "point", mu=c(2.9,0.9)) 73 | whv_hype(matrix(c(3,1), ncol=2), reference = 4, ideal = 1, seed = 42, 74 | dist = "point", mu=c(2.9,0.9)) 75 | } 76 | -------------------------------------------------------------------------------- /r/man/whv_rect.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/whv.R 3 | \name{whv_rect} 4 | \alias{whv_rect} 5 | \alias{total_whv_rect} 6 | \title{Compute (total) weighted hypervolume given a set of rectangles} 7 | \usage{ 8 | whv_rect(x, rectangles, reference, maximise = FALSE) 9 | 10 | total_whv_rect( 11 | x, 12 | rectangles, 13 | reference, 14 | maximise = FALSE, 15 | ideal = NULL, 16 | scalefactor = 0.1 17 | ) 18 | } 19 | \arguments{ 20 | \item{x}{\code{matrix()}|\code{data.frame()}\cr Matrix or data frame of numerical 21 | values, where each row gives the coordinates of a point.} 22 | 23 | \item{rectangles}{\code{matrix()}\cr Weighted rectangles that will bias the 24 | computation of the hypervolume. Maybe generated by \code{\link[=eafdiff]{eafdiff()}} with 25 | \code{rectangles=TRUE} or by \code{\link[=choose_eafdiff]{choose_eafdiff()}}.} 26 | 27 | \item{reference}{\code{numeric()}\cr Reference point as a vector of numerical 28 | values.} 29 | 30 | \item{maximise}{\code{logical()}\cr Whether the objectives must be maximised 31 | instead of minimised. Either a single logical value that applies to all 32 | objectives or a vector of logical values, with one value per objective.} 33 | 34 | \item{ideal}{\code{numeric()}\cr Ideal point as a vector of numerical values. If 35 | \code{NULL}, it is calculated as minimum (or maximum if maximising that 36 | objective) of each objective in the input data.} 37 | 38 | \item{scalefactor}{\code{numeric(1)}\cr Real value within \eqn{(0,1]} that scales 39 | the overall weight of the differences. This is parameter psi (\eqn{\psi}) 40 | in \citet{DiaLop2020ejor}.} 41 | } 42 | \value{ 43 | \code{numeric(1)} A single numerical value. 44 | } 45 | \description{ 46 | Calculates the hypervolume weighted by a set of rectangles (with zero weight 47 | outside the rectangles). The function \code{\link[=total_whv_rect]{total_whv_rect()}} calculates the 48 | total weighted hypervolume as \code{\link[=hypervolume]{hypervolume()}}\code{ + scalefactor * abs(prod(reference - ideal)) * whv_rect()}. The details of the computation 49 | are given by \citet{DiaLop2020ejor}. 50 | } 51 | \details{ 52 | TODO 53 | } 54 | \examples{ 55 | rectangles <- as.matrix(read.table(header=FALSE, text=' 56 | 1.0 3.0 2.0 Inf 1 57 | 2.0 3.5 2.5 Inf 2 58 | 2.0 3.0 3.0 3.5 3 59 | ')) 60 | whv_rect (matrix(2, ncol=2), rectangles, reference = 6) 61 | whv_rect (matrix(c(2, 1), ncol=2), rectangles, reference = 6) 62 | whv_rect (matrix(c(1, 2), ncol=2), rectangles, reference = 6) 63 | 64 | total_whv_rect (matrix(2, ncol=2), rectangles, reference = 6, ideal = c(1,1)) 65 | total_whv_rect (matrix(c(2, 1), ncol=2), rectangles, reference = 6, ideal = c(1,1)) 66 | total_whv_rect (matrix(c(1, 2), ncol=2), rectangles, reference = 6, ideal = c(1,1)) 67 | 68 | } 69 | \references{ 70 | \insertAllCited{} 71 | } 72 | \seealso{ 73 | \code{\link[=read_datasets]{read_datasets()}}, \code{\link[=eafdiff]{eafdiff()}}, \code{\link[=choose_eafdiff]{choose_eafdiff()}}, \code{\link[=whv_hype]{whv_hype()}} 74 | } 75 | \concept{metrics} 76 | -------------------------------------------------------------------------------- /r/man/write_datasets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/read_datasets.R 3 | \name{write_datasets} 4 | \alias{write_datasets} 5 | \title{Write data sets} 6 | \usage{ 7 | write_datasets(x, file = "") 8 | } 9 | \arguments{ 10 | \item{x}{\code{matrix}|\code{data.frame()}\cr Dataset with at least three 11 | columns, the last one is the set of each point. See also 12 | \code{\link[=read_datasets]{read_datasets()}}.} 13 | 14 | \item{file}{Either a character string naming a file or a connection open for 15 | writing. \code{""} indicates output to the console.} 16 | } 17 | \value{ 18 | No return value, called for side effects 19 | } 20 | \description{ 21 | Write data sets to a file in the same format as \code{\link[=read_datasets]{read_datasets()}}. 22 | } 23 | \examples{ 24 | x <- read_datasets(text="1 2\n3 4\n\n5 6\n7 8\n", col_names=c("obj1", "obj2")) 25 | write_datasets(x) 26 | 27 | } 28 | \seealso{ 29 | \code{\link[utils:write.table]{utils::write.table()}}, \code{\link[=read_datasets]{read_datasets()}} 30 | } 31 | \keyword{file} 32 | -------------------------------------------------------------------------------- /r/src/Makevars: -------------------------------------------------------------------------------- 1 | # -*- mode: makefile -*- 2 | # Do not put GCC specific flags here. Put them instead in CFLAGS and 3 | # CXXFLAGS in ~/.R/Makevars 4 | LTO = $(LTO_OPT) 5 | DEBUG=0 6 | PKG_CPPFLAGS = -DR_PACKAGE -DDEBUG=$(DEBUG) -I./libmoocore/ $(LTO) 7 | 8 | MOOCORE_SRC_FILES = hv3dplus.c hv4d.c hv_contrib.c hv.c pareto.c whv.c whv_hype.c avl.c eaf3d.c eaf.c io.c rng.c mt19937/mt19937.c 9 | SOURCES = $(MOOCORE_SRC_FILES:%=libmoocore/%) init.c Rmoocore.c 10 | OBJECTS = $(SOURCES:.c=.o) 11 | 12 | .PHONY: all clean exes 13 | 14 | all: exes $(SHLIB) 15 | 16 | # We need this to force building order in parallel builds 17 | $(SHLIB): $(OBJECTS) 18 | 19 | exes: $(SHLIB) 20 | $(MAKE) -C libmoocore all march=none CC="$(CC)" CFLAGS="$(CFLAGS) $(LTO)" OPT_CFLAGS="" WARN_CFLAGS="" DEBUG=$(DEBUG) EXE=$(EXEEXT) BINDIR=$(PWD) 21 | @-$(RM) libmoocore/*.o 22 | 23 | clean: 24 | @-$(RM) $(SHLIB) $(OBJECTS) 25 | $(MAKE) -C libmoocore clean EXE=$(EXEEXT) BINDIR=$(PWD) 26 | -------------------------------------------------------------------------------- /r/src/Makevars.ucrt: -------------------------------------------------------------------------------- 1 | CRT=-ucrt 2 | include Makevars.win 3 | OBJECTS = $(SOURCES:.c=.o) 4 | -------------------------------------------------------------------------------- /r/src/Makevars.win: -------------------------------------------------------------------------------- 1 | # -*- mode: makefile -*- 2 | # Do not put GCC specific flags here. Put them instead in CFLAGS and 3 | # CXXFLAGS in ~/.R/Makevars 4 | EXEEXT=.exe 5 | include Makevars 6 | OBJECTS = $(SOURCES:.c=.o) 7 | -------------------------------------------------------------------------------- /r/src/init.c: -------------------------------------------------------------------------------- 1 | #include // for NULL 2 | #include 3 | #include 4 | #include 5 | 6 | // Not sure how to fix this warning. Also newer versions of Clang do not warn. 7 | // warning: must specify at least one argument for '...' parameter of variadic macro 8 | #if defined(__clang__) 9 | # pragma clang diagnostic push 10 | # pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" 11 | #endif 12 | 13 | // Supports 1-10 arguments 14 | #define VA_NARGS_IMPL(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N 15 | #define VA_NARGS(...) VA_NARGS_IMPL(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1) 16 | 17 | #define DECLARE_CALL(NAME, ...) \ 18 | extern SEXP NAME(__VA_ARGS__); 19 | #include "init.h" 20 | #undef DECLARE_CALL 21 | 22 | 23 | #define DECLARE_CALL(NAME, ...) \ 24 | {#NAME, (DL_FUNC) &NAME, VA_NARGS(__VA_ARGS__)}, 25 | 26 | static const R_CallMethodDef CallEntries[] = { 27 | #include "init.h" 28 | {NULL, NULL, 0} 29 | }; 30 | #undef DECLARE_CALL 31 | 32 | #if defined(__clang__) 33 | # pragma clang diagnostic pop 34 | #endif 35 | 36 | void R_init_moocore(DllInfo *dll) 37 | { 38 | R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); 39 | R_useDynamicSymbols(dll, FALSE); 40 | } 41 | -------------------------------------------------------------------------------- /r/src/init.h: -------------------------------------------------------------------------------- 1 | /* .Call calls */ 2 | DECLARE_CALL(compute_eaf_C, SEXP DATA, SEXP CUMSIZES, SEXP PERCENTILE) 3 | DECLARE_CALL(compute_eafdiff_C, SEXP DATA, SEXP CUMSIZES, SEXP INTERVALS) 4 | DECLARE_CALL(compute_eafdiff_polygon_C, SEXP DATA, SEXP CUMSIZES, SEXP INTERVALS) 5 | DECLARE_CALL(compute_eafdiff_rectangles_C, SEXP DATA, SEXP CUMSIZES, SEXP INTERVALS) 6 | DECLARE_CALL(R_read_datasets, SEXP FILENAME) 7 | DECLARE_CALL(hypervolume_C, SEXP DATA, SEXP REFERENCE) 8 | DECLARE_CALL(hv_contributions_C, SEXP DATA, SEXP REFERENCE) 9 | DECLARE_CALL(normalise_C, SEXP DATA, SEXP RANGE, SEXP LBOUND, SEXP UBOUND, SEXP MAXIMISE) 10 | DECLARE_CALL(is_nondominated_C, SEXP DATA, SEXP MAXIMISE, SEXP KEEP_WEAKLY) 11 | DECLARE_CALL(pareto_ranking_C, SEXP DATA) 12 | DECLARE_CALL(epsilon_mul_C, SEXP DATA, SEXP REFERENCE, SEXP MAXIMISE) 13 | DECLARE_CALL(epsilon_add_C, SEXP DATA, SEXP REFERENCE, SEXP MAXIMISE) 14 | DECLARE_CALL(igd_C, SEXP DATA, SEXP REFERENCE, SEXP MAXIMISE) 15 | DECLARE_CALL(igd_plus_C, SEXP DATA, SEXP REFERENCE, SEXP MAXIMISE) 16 | DECLARE_CALL(avg_hausdorff_dist_C, SEXP DATA, SEXP REFERENCE, SEXP MAXIMISE, SEXP P) 17 | DECLARE_CALL(rect_weighted_hv2d_C, SEXP DATA, SEXP RECTANGLES, SEXP REFERENCE) 18 | DECLARE_CALL(whv_hype_C, SEXP DATA, SEXP IDEAL, SEXP REFERENCE, SEXP NSAMPLES, SEXP DIST, SEXP SEED, SEXP MU) 19 | -------------------------------------------------------------------------------- /r/src/install.libs.R: -------------------------------------------------------------------------------- 1 | files <- c("hv", "epsilon","igd","dominatedsets","nondominated","ndsort", "eaf") 2 | 3 | file_move <- function(from, to) { 4 | file.copy(from = from, to = to, overwrite = TRUE) 5 | file.remove(from) 6 | } 7 | 8 | if (WINDOWS) files <- paste0(files, ".exe") 9 | if (any(file.exists(files))) { 10 | dest <- file.path(R_PACKAGE_DIR, paste0('bin', R_ARCH)) 11 | dir.create(dest, recursive = TRUE, showWarnings = FALSE) 12 | file_move(files, dest) 13 | } 14 | 15 | files <- Sys.glob(paste0("*", SHLIB_EXT)) 16 | if (any(file.exists(files))) { 17 | dest <- file.path(R_PACKAGE_DIR, paste0('libs', R_ARCH)) 18 | dir.create(dest, recursive = TRUE, showWarnings = FALSE) 19 | file.copy(files, dest, overwrite = TRUE) 20 | } 21 | if (file.exists("symbols.rds")) 22 | file.copy("symbols.rds", dest, overwrite = TRUE) 23 | -------------------------------------------------------------------------------- /r/src/libmoocore: -------------------------------------------------------------------------------- 1 | ../../c/ -------------------------------------------------------------------------------- /r/tests/spelling.R: -------------------------------------------------------------------------------- 1 | if (requireNamespace('spelling', quietly = TRUE)) 2 | spelling::spell_check_test(vignettes = TRUE, error = FALSE, 3 | skip_on_cran = TRUE) 4 | -------------------------------------------------------------------------------- /r/tests/testthat.R: -------------------------------------------------------------------------------- 1 | # This file is part of the standard setup for testthat. 2 | # It is recommended that you do not modify it. 3 | # 4 | # Where should you do additional test configuration? 5 | # Learn more about the roles of various files in: 6 | # * https://r-pkgs.org/tests.html 7 | # * https://testthat.r-lib.org/reference/test_package.html#special-files 8 | 9 | library(testthat) 10 | library(moocore) 11 | 12 | test_check("moocore") 13 | -------------------------------------------------------------------------------- /r/tests/testthat/ALG_1_dat-eaf.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/ALG_1_dat-eaf.rds -------------------------------------------------------------------------------- /r/tests/testthat/DTLZDiscontinuousShape.3d.front.1000pts.10.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/DTLZDiscontinuousShape.3d.front.1000pts.10.rds -------------------------------------------------------------------------------- /r/tests/testthat/SPEA2relativeRichmond-eaf.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/SPEA2relativeRichmond-eaf.rds -------------------------------------------------------------------------------- /r/tests/testthat/_snaps/eafdiff/eafdiff-ALG_1_dat-ALG_2_dat.csv.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/_snaps/eafdiff/eafdiff-ALG_1_dat-ALG_2_dat.csv.xz -------------------------------------------------------------------------------- /r/tests/testthat/_snaps/eafdiff/eafdiff-tpls-rest.csv.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/_snaps/eafdiff/eafdiff-tpls-rest.csv.xz -------------------------------------------------------------------------------- /r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-max-max.csv.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-max-max.csv.xz -------------------------------------------------------------------------------- /r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-max-min.csv.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-max-min.csv.xz -------------------------------------------------------------------------------- /r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-min-max.csv.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat-min-max.csv.xz -------------------------------------------------------------------------------- /r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat.csv.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/_snaps/eafdiff/eafdiff-wrots_l10w100_dat-wrots_l100w10_dat.csv.xz -------------------------------------------------------------------------------- /r/tests/testthat/helper-common.R: -------------------------------------------------------------------------------- 1 | # This file is loaded automatically by testthat (supposedly) 2 | extdata_path <- function(file) 3 | file.path(system.file(package = "moocore"), "extdata", file) 4 | 5 | read_extdata <- function(file) read_datasets(extdata_path(file)) 6 | 7 | save_csv_xz <- function(code, pattern) 8 | { 9 | path <- tempfile(fileext = ".csv.xz") 10 | res <- code 11 | write.table(file = withr::local_connection(xzfile(path, "wb")), 12 | res, row.names = FALSE, col.names=FALSE, sep=",") 13 | path 14 | } 15 | 16 | compare_file_text_compressed <- function(old, new) 17 | { 18 | if (compare_file_binary(old, new)) 19 | return(TRUE) 20 | old <- base::readLines(withr::local_connection(gzfile(old, open = "rb")), warn = FALSE) 21 | new <- base::readLines(withr::local_connection(gzfile(new, open = "rb")), warn = FALSE) 22 | identical(old, new) 23 | } 24 | 25 | expect_snapshot_csv_xz <- function(name, code) 26 | { 27 | # skip_on_ci() # Skip for now until we implement this: https://github.com/tidyverse/ggplot2/blob/main/tests/testthat/helper-vdiffr.R 28 | name <- paste0(name, ".csv.xz") 29 | # Announce the file before touching `code`. This way, if `code` 30 | # unexpectedly fails or skips, testthat will not auto-delete the 31 | # corresponding snapshot file. 32 | testthat::announce_snapshot_file(name = name) 33 | path <- save_csv_xz(code) 34 | testthat::expect_snapshot_file(path, name = name, compare = compare_file_text_compressed) 35 | } 36 | 37 | is_wholenumber <- function(x, tol = .Machine$double.eps^0.5) 38 | abs(x - round(x)) < tol 39 | -------------------------------------------------------------------------------- /r/tests/testthat/lin.S-sph.S-diff.txt.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/multi-objective/moocore/7cae3479413f91c5577e9ac12d3b2f6fbc981305/r/tests/testthat/lin.S-sph.S-diff.txt.xz -------------------------------------------------------------------------------- /r/tests/testthat/test-doctest-eafdiff.R: -------------------------------------------------------------------------------- 1 | # Generated by doctest: do not edit by hand 2 | # Please edit file in R/eafdiff.R 3 | 4 | test_that("Doctest: eafdiff", { 5 | # Created from @doctest for `eafdiff` 6 | # Source file: R/eafdiff.R 7 | # Source line: 32 8 | A1 <- read_datasets(text = "\n 3 2\n 2 3\n\n 2.5 1\n 1 2\n\n 1 2\n") 9 | A2 <- read_datasets(text = "\n 4 2.5\n 3 3\n 2.5 3.5\n\n 3 3\n 2.5 3.5\n\n 2 1\n") 10 | d <- eafdiff(A1, A2) 11 | expect_true(is.matrix(d)) 12 | expect_equal(d, matrix(byrow = TRUE, ncol = 3, scan(quiet = TRUE, text = "1.0 2.0 2\n\n 2.0 1.0 -1\n\n 2.5 1.0 0\n\n 2.0 2.0 1\n\n 2.0 3.0 2\n\n 3.0 2.0 2\n\n 2.5 3.5 0\n\n 3.0 3.0 0\n\n 4.0 2.5 1"))) 13 | d <- eafdiff(A1, A2, rectangles = TRUE) 14 | expect_equal(d, as.matrix(read.table(header = TRUE, text = "\n\n xmin ymin xmax ymax diff\n\n 2.0 1.0 2.5 2.0 -1\n\n 1.0 2.0 2.0 Inf 2\n\n 2.5 1.0 Inf 2.0 0\n\n 2.0 2.0 3.0 3.0 1\n\n 2.0 3.5 2.5 Inf 2\n\n 2.0 3.0 3.0 3.5 2\n\n 3.0 2.5 4.0 3.0 2\n\n 3.0 2.0 Inf 2.5 2\n\n 4.0 2.5 Inf 3.0 1"))) 15 | }) 16 | 17 | -------------------------------------------------------------------------------- /r/tests/testthat/test-doctest-epsilon_additive.R: -------------------------------------------------------------------------------- 1 | # Generated by doctest: do not edit by hand 2 | # Please edit file in R/epsilon.R 3 | 4 | test_that("Doctest: epsilon_additive", { 5 | # Created from @doctest for `epsilon_additive` 6 | # Source file: R/epsilon.R 7 | # Source line: 58 8 | A1 <- matrix(c(9, 2, 8, 4, 7, 5, 5, 6, 4, 7), ncol = 2, byrow = TRUE) 9 | A2 <- matrix(c(8, 4, 7, 5, 5, 6, 4, 7), ncol = 2, byrow = TRUE) 10 | A3 <- matrix(c(10, 4, 9, 5, 8, 6, 7, 7, 6, 8), ncol = 2, byrow = TRUE) 11 | expect_equal(epsilon_mult(A1, A3), 0.9) 12 | expect_equal(epsilon_mult(A1, A2), 1) 13 | expect_equal(epsilon_mult(A2, A1), 2) 14 | expect_equal(exp(epsilon_additive(log(A2), log(A1))), 2) 15 | extdata_path <- system.file(package = "moocore", "extdata") 16 | path.A1 <- file.path(extdata_path, "ALG_1_dat.xz") 17 | path.A2 <- file.path(extdata_path, "ALG_2_dat.xz") 18 | A1 <- read_datasets(path.A1)[, 1:2] 19 | A2 <- read_datasets(path.A2)[, 1:2] 20 | ref <- filter_dominated(rbind(A1, A2)) 21 | expect_equal(epsilon_additive(A1, ref), 199090640) 22 | expect_equal(epsilon_additive(A2, ref), 132492066) 23 | ref <- filter_dominated(rbind(A1, A2)) 24 | expect_equal(epsilon_mult(A1, ref), 1.05401476) 25 | expect_equal(epsilon_mult(A2, ref), 1.023755) 26 | }) 27 | 28 | -------------------------------------------------------------------------------- /r/tests/testthat/test-doctest-igd.R: -------------------------------------------------------------------------------- 1 | # Generated by doctest: do not edit by hand 2 | # Please edit file in R/igd.R 3 | 4 | test_that("Doctest: igd", { 5 | # Created from @doctest for `igd` 6 | # Source file: R/igd.R 7 | # Source line: 74 8 | extdata_path <- system.file(package = "moocore", "extdata") 9 | path.A1 <- file.path(extdata_path, "ALG_1_dat.xz") 10 | path.A2 <- file.path(extdata_path, "ALG_2_dat.xz") 11 | A1 <- read_datasets(path.A1)[, 1:2] 12 | A2 <- read_datasets(path.A2)[, 1:2] 13 | ref <- filter_dominated(rbind(A1, A2)) 14 | expect_equal(igd(A1, ref), 91888189) 15 | expect_equal(igd(A2, ref), 11351992) 16 | expect_equal(igd_plus(A1, ref), 82695357) 17 | expect_equal(igd_plus(A2, ref), 10698269.3) 18 | expect_equal(avg_hausdorff_dist(A1, ref), 268547627) 19 | expect_equal(avg_hausdorff_dist(A2, ref), 352613092) 20 | }) 21 | 22 | -------------------------------------------------------------------------------- /r/tests/testthat/test-doctest-vorob_t.R: -------------------------------------------------------------------------------- 1 | # Generated by doctest: do not edit by hand 2 | # Please edit file in R/vorob.R 3 | 4 | test_that("Doctest: vorob_t", { 5 | # Created from @doctest for `vorob_t` 6 | # Source file: R/vorob.R 7 | # Source line: 16 8 | data(CPFs) 9 | res <- vorob_t(CPFs, reference = c(2, 200)) 10 | expect_equal(res$threshold, 44.140625) 11 | expect_equal(res$avg_hyp, 8943.3332) 12 | vd <- vorob_dev(CPFs, ve = res$ve, reference = c(2, 200)) 13 | expect_equal(vd, 3017.1299) 14 | }) 15 | 16 | -------------------------------------------------------------------------------- /r/tests/testthat/test-doctest-whv_hype.R: -------------------------------------------------------------------------------- 1 | # Generated by doctest: do not edit by hand 2 | # Please edit file in R/whv.R 3 | 4 | test_that("Doctest: whv_hype", { 5 | # Created from @doctest for `whv_hype` 6 | # Source file: R/whv.R 7 | # Source line: 134 8 | expect_equal(whv_hype(matrix(2, ncol = 2), reference = 4, ideal = 1, seed = 42), 9 | 3.99807) 10 | expect_equal(whv_hype(matrix(c(3, 1), ncol = 2), reference = 4, ideal = 1, 11 | seed = 42), 3.00555) 12 | expect_equal(whv_hype(matrix(2, ncol = 2), reference = 4, ideal = 1, seed = 42, 13 | dist = "exponential", mu = 0.2), 1.14624) 14 | expect_equal(whv_hype(matrix(c(3, 1), ncol = 2), reference = 4, ideal = 1, 15 | seed = 42, dist = "exponential", mu = 0.2), 1.66815) 16 | expect_equal(whv_hype(matrix(2, ncol = 2), reference = 4, ideal = 1, seed = 42, 17 | dist = "point", mu = c(2.9, 0.9)), 0.64485) 18 | expect_equal(whv_hype(matrix(c(3, 1), ncol = 2), reference = 4, ideal = 1, 19 | seed = 42, dist = "point", mu = c(2.9, 0.9)), 4.03632) 20 | }) 21 | 22 | -------------------------------------------------------------------------------- /r/tests/testthat/test-eaf.R: -------------------------------------------------------------------------------- 1 | source("helper-common.R") 2 | 3 | test_that("eaf", { 4 | 5 | test_eaf_dataset <- function(name, percentiles = NULL) { 6 | dataset <- get(name) 7 | x <- eaf(dataset, percentiles = percentiles) 8 | # FIXME: work-around for change in the computation 9 | x[,3] <- floor(x[,3]) 10 | #saveRDS(x, paste0(name, "-eaf.rds")) 11 | return(x) 12 | } 13 | test_eaf_file <- function(file, percentiles = NULL) { 14 | dataset <- read_datasets(file) 15 | x <- eaf(dataset, percentiles = percentiles) 16 | #saveRDS(x, paste0(basename(file), "-eaf.rds")) 17 | return(x) 18 | } 19 | expect_equal(test_eaf_file(extdata_path("ALG_1_dat.xz")), 20 | readRDS("ALG_1_dat-eaf.rds")) 21 | expect_equal(test_eaf_dataset("SPEA2relativeRichmond"), 22 | readRDS("SPEA2relativeRichmond-eaf.rds")) 23 | 24 | for (i in seq_len(399)) 25 | expect_equal(anyDuplicated(eaf(cbind(0:i, 0:i), sets=0:i)[,1]), 0L) 26 | }) 27 | 28 | test_that("eafs_sets_non_numeric", { 29 | x <- matrix(1:10, ncol=2) 30 | expect_equal(eaf(x, sets=1:5), eaf(x, sets=letters[1:5])) 31 | }) 32 | -------------------------------------------------------------------------------- /r/tests/testthat/test-eafdiff.R: -------------------------------------------------------------------------------- 1 | test_that("eafdiff3d", { 2 | lin <- read_datasets("lin.S.txt") 3 | sph <- read_datasets("sph.S.txt") 4 | setcol <- ncol(lin) 5 | # This may stop working once we filter uninteresting values in the C code directly. 6 | DIFF <- eafdiff(lin, sph) 7 | x <- as.matrix(read.table("lin.S-sph.S-diff.txt.xz", header = FALSE)) 8 | x[, setcol] <- x[, setcol] - x[, setcol+1] 9 | dimnames(x) <- NULL 10 | expect_equal(DIFF[, 1:setcol], x[, 1:setcol]) 11 | }) 12 | 13 | ## FIXME: We need smaller data! 14 | test_that("eafdiff2d", { 15 | test_eafdiff <- function(a, b, maximise = FALSE, rectangles = FALSE, ...) { 16 | A1 <- read_datasets(file.path(system.file(package="moocore"), "extdata", a)) 17 | A2 <- read_datasets(file.path(system.file(package="moocore"), "extdata", b)) 18 | minmax <- "" 19 | if (any(maximise)) { 20 | minmax <- paste0("-", 21 | paste0(ifelse(maximise, "max", "min"), collapse="-")) 22 | A1[, which(maximise)] <- -A1[, which(maximise)] 23 | A2[, which(maximise)] <- -A2[, which(maximise)] 24 | } 25 | # FIXME: It would be much faster to just test the case maximise=FALSE 26 | # against the snapshot then test that all possible maximise variants are 27 | # equivalent to maximise=FALSE after negating the appropriate columns. 28 | a <- tools::file_path_sans_ext(basename(a)) 29 | b <- tools::file_path_sans_ext(basename(b)) 30 | res <- as.data.frame(eafdiff(A1, A2, maximise = maximise, ...)) 31 | expect_true(all(is_wholenumber(res[, ncol(res)]))) 32 | # If we do not save the last column as integer we get spurious differences. 33 | res[, ncol(res)] <- as.integer(round(res[,ncol(res)])) 34 | 35 | expect_snapshot_csv_xz(paste0("eafdiff-", a, "-", b, minmax, ifelse(rectangles,"-rect","")), 36 | res) 37 | } 38 | test_eafdiff("wrots_l10w100_dat", "wrots_l100w10_dat") 39 | test_eafdiff("wrots_l10w100_dat", "wrots_l100w10_dat", maximise = c(TRUE, FALSE)) 40 | test_eafdiff("wrots_l10w100_dat", "wrots_l100w10_dat", maximise = c(FALSE, TRUE)) 41 | test_eafdiff("wrots_l10w100_dat", "wrots_l100w10_dat", maximise = c(TRUE, TRUE)) 42 | test_eafdiff("tpls.xz", "rest.xz") 43 | test_eafdiff("ALG_1_dat.xz", "ALG_2_dat.xz") 44 | }) 45 | -------------------------------------------------------------------------------- /r/tests/testthat/test-epsilon.R: -------------------------------------------------------------------------------- 1 | test_that("epsilon", { 2 | ref = matrix(c(10, 1, 6, 1, 2, 2, 1, 6, 1, 10), ncol = 2L, byrow = TRUE) 3 | A = matrix(c(4, 2, 3, 3, 2, 4), ncol = 2L, byrow = TRUE) 4 | expect_equal(epsilon_additive(A, ref), 1.0) 5 | expect_equal(epsilon_mult(A, ref), 2.0) 6 | expect_equal(epsilon_mult(A, ref, maximise=TRUE), 2.5) 7 | expect_equal(epsilon_additive(A, ref, maximise=TRUE), 6.0) 8 | }) 9 | -------------------------------------------------------------------------------- /r/tests/testthat/test-hv.R: -------------------------------------------------------------------------------- 1 | source("helper-common.R") 2 | 3 | library(tools) # file_ext 4 | test_that("hypervolume", { 5 | 6 | test_hv_file <- function(file, reference, maximise = FALSE) { 7 | nobj <- length(reference) 8 | dataset <- if (file_ext(file) == "rds") readRDS(file) else read_datasets(file) 9 | hypervolume(dataset[,1:nobj], reference = reference, maximise) 10 | } 11 | 12 | expect_equal(test_hv_file("DTLZDiscontinuousShape.3d.front.1000pts.10.rds", 13 | reference = c(10,10,10)), 14 | 719.223555475191) 15 | 16 | expect_equal(test_hv_file("duplicated3.inp", 17 | reference = c(-14324, -14906, -14500, -14654, -14232, -14093)), 18 | 1.52890128312393e+20) 19 | 20 | }) 21 | 22 | test_that("hv_contributions", { 23 | hv_contributions_slow <- function(dataset, reference, maximise) { 24 | return(hypervolume(dataset, reference, maximise) - 25 | sapply(1:nrow(dataset), function(x) hypervolume(dataset[-x,], reference, maximise))) 26 | } 27 | reference = c(250,0) 28 | maximise = c(FALSE,TRUE) 29 | expect_equal(hv_contributions(SPEA2minstoptimeRichmond[,1:2], reference = reference, maximise = maximise), 30 | hv_contributions_slow(SPEA2minstoptimeRichmond[,1:2], reference = reference, maximise = maximise)) 31 | }) 32 | -------------------------------------------------------------------------------- /r/tests/testthat/test-igd.R: -------------------------------------------------------------------------------- 1 | # Example 4 from Ishibuchi et al. (2015) 2 | ref = matrix(c(10, 0, 6, 1, 2, 2, 1, 6, 0, 10), ncol = 2L, byrow = TRUE) 3 | A = matrix(c(4, 2, 3, 3, 2, 4), ncol = 2L, byrow = TRUE) 4 | B = matrix(c(8, 2, 4, 4, 2, 8), ncol = 2L, byrow = TRUE) 5 | 6 | test_that("igd", { 7 | expect_equal(igd(A, ref), 3.707092031609239) 8 | expect_equal(igd(B, ref), 2.59148346584763) 9 | }) 10 | 11 | test_that("igd plus", { 12 | expect_equal(igd_plus(A, ref), 1.482842712474619) 13 | expect_equal(igd_plus(B, ref), 2.260112615949154) 14 | 15 | }) 16 | test_that("avg_hausdorff_dist", { 17 | expect_equal(avg_hausdorff_dist(A, ref), 3.707092031609239) 18 | expect_equal(avg_hausdorff_dist(B, ref), 2.59148346584763) 19 | }) 20 | -------------------------------------------------------------------------------- /r/tests/testthat/test-normalise.R: -------------------------------------------------------------------------------- 1 | source("helper-common.R") 2 | 3 | test_that("normalise", { 4 | my.2d.matrix <- function(...) matrix(c(...), ncol = 2, byrow=FALSE) 5 | x = my.2d.matrix(0, 0.5, 1, 0, 0.5, 1) 6 | 7 | expect_equal(normalise(x), my.2d.matrix(1, 1.5, 2, 1, 1.5, 2)) 8 | 9 | expect_equal(normalise(x, maximise = c(FALSE,TRUE)), 10 | my.2d.matrix(1, 1.5, 2, 2, 1.5, 1)) 11 | 12 | expect_equal(normalise(x, to_range = c(0, 1), maximise = c(FALSE,TRUE)), 13 | my.2d.matrix(0, 0.5, 1, 1, 0.5, 0)) 14 | 15 | expect_equal(normalise(my.2d.matrix(1,1,2,2)), my.2d.matrix(1,1,1,1)) 16 | 17 | df <- as.data.frame(x) 18 | expect_equal(normalise(df), 19 | as.matrix(data.frame(V1=c(1,1.5,2), V2=c(1,1.5,2)))) 20 | }) 21 | -------------------------------------------------------------------------------- /r/tests/testthat/test-pareto-rank.R: -------------------------------------------------------------------------------- 1 | source("helper-common.R") 2 | 3 | test_that("pareto", { 4 | test_pareto_rank <- function(extdatafile, maximise = FALSE) { 5 | data <- read_extdata(extdatafile) 6 | # Drop set column 7 | data <- data[,-ncol(data)] 8 | ranks <- pareto_rank(data, maximise = maximise) 9 | data2 <- data 10 | for (r in min(ranks):max(ranks)) { 11 | # We have to keep_weakly because pareto_rank does the same. 12 | nondom <- is_nondominated(data2, maximise = maximise, keep_weakly = TRUE) 13 | expect_equal(data[ranks == r, , drop = FALSE], data2[nondom, , drop = FALSE]) 14 | data2 <- data2[!nondom, , drop = FALSE] 15 | } 16 | } 17 | test_pareto_rank("ALG_2_dat.xz") 18 | test_pareto_rank("spherical-250-10-3d.txt") 19 | }) 20 | -------------------------------------------------------------------------------- /r/tests/testthat/test-vorob.R: -------------------------------------------------------------------------------- 1 | test_that("vorob", { 2 | # FIXME: What is this test exactly testing? 3 | test_data <- expand.grid(seq(0, 1, length.out = 51), 4 | seq(0, 1, length.out = 51)) 5 | test_data <- as.matrix(cbind(test_data, nrun = 1:nrow(test_data))) 6 | 7 | # Average hypervolume is known to be 0.25 8 | # avg_hyp <- mean(sapply(split.data.frame(test_data[,1:2], test_data[,3]), 9 | # hypervolume, reference = c(1, 1))) 10 | expect_equal(hypervolume(vorob_t(x = test_data, reference = c(1, 1))$ve, reference = c(1, 1)), 0.25, tolerance = 1e-1) 11 | expect_equal(vorob_dev(x = test_data, ve = vorob_t(test_data, reference = c(1,1))$ve, reference = c(1, 1)), 0.218, tolerance = 1e-1) 12 | 13 | }) 14 | -------------------------------------------------------------------------------- /r/tests/testthat/test-whv.R: -------------------------------------------------------------------------------- 1 | source("helper-common.R") 2 | 3 | withr::with_output_sink("test-whv.Rout", { 4 | 5 | test_that("example:whv_rect", { 6 | rectangles <- as.matrix(read.table(header=FALSE, text=' 7 | 1.0 3.0 2.0 Inf 1 8 | 2.0 3.5 2.5 Inf 2 9 | 2.0 3.0 3.0 3.5 3 10 | ')) 11 | expect_equal(4, whv_rect (matrix(2, ncol=2), rectangles, reference = 6)) 12 | expect_equal(4, whv_rect (matrix(c(2, 1), ncol=2), rectangles, reference = 6)) 13 | expect_equal(7, whv_rect (matrix(c(1, 2), ncol=2), rectangles, reference = 6)) 14 | expect_equal(26, total_whv_rect (matrix(2, ncol=2), rectangles, reference = 6, ideal = c(1,1))) 15 | expect_equal(30, total_whv_rect (matrix(c(2, 1), ncol=2), rectangles, reference = 6, ideal = c(1,1))) 16 | expect_equal(37.5, total_whv_rect (matrix(c(1, 2), ncol=2), rectangles, reference = 6, ideal = c(1,1))) 17 | }) 18 | 19 | test_that("whv_rect", { 20 | 21 | rectangles <- as.matrix(read.table(header=FALSE, text= 22 | '1.0 3.0 2.0 Inf 1 23 | 1.0 2.0 3.0 3.0 1 24 | 2.5 1.0 Inf 2.0 1 25 | 2.0 3.5 2.5 Inf 2 26 | 2.0 3.0 3.0 3.5 2 27 | 3.0 2.5 4.0 3.0 2 28 | 3.0 2.0 Inf 2.5 2 29 | 4.0 2.5 Inf 3.0 1')) 30 | x <- matrix(c(2,2), ncol=2) 31 | expect_equal(whv_rect (x, rectangles, reference = 6), 9.5) 32 | 33 | }) 34 | 35 | test_that("whv2", { 36 | 37 | A <- read_datasets(text=' 38 | 3 2 39 | 2 3 40 | 41 | 2.5 1 42 | 1 2 43 | ') 44 | B <- read_datasets(text=' 45 | 4 2.5 46 | 3 3 47 | 2.5 3.5 48 | 49 | 3 3 50 | 2.5 3.5 51 | ') 52 | rectangles <- eafdiff(A,B, rectangles=TRUE) 53 | true_rectangles <- as.matrix(read.table(text=' 54 | xmin ymin xmax ymax diff 55 | 1.0 3.0 2.0 Inf 1 56 | 1.0 2.0 3.0 3.0 1 57 | 2.5 1.0 Inf 2.0 1 58 | 2.0 3.5 2.5 Inf 2 59 | 2.0 3.0 3.0 3.5 2 60 | 3.0 2.5 4.0 3.0 2 61 | 3.0 2.0 Inf 2.5 2 62 | 4.0 2.5 Inf 3.0 1', header=TRUE)) 63 | expect_equal(rectangles, true_rectangles) 64 | ref <- c(5,5) 65 | ideal <- c(1,1) 66 | whv <- whv_rect(matrix(1,nrow=1,ncol=2), rectangles=rectangles, ref=ref) 67 | expect_equal(whv, 12.5) 68 | whv <- total_whv_rect(matrix(1,nrow=1,ncol=2), rectangles=rectangles, ref=ref, ideal = ideal) 69 | expect_equal(whv, 36) 70 | whv <- whv_rect(matrix(3,nrow=1,ncol=2), rectangles=rectangles, ref=ref) 71 | expect_equal(whv, 0) 72 | whv <- total_whv_rect(matrix(3,nrow=1,ncol=2), rectangles=rectangles, ref=ref, ideal = ideal) 73 | expect_equal(whv, 4) 74 | whv <- total_whv_rect(A[,1:2], rectangles=rectangles, ref=ref, ideal = ideal) 75 | expect_equal(whv, 27.3) 76 | whv <- total_whv_rect(B[,1:2], rectangles=rectangles, ref=ref, ideal = ideal) 77 | expect_equal(whv, 6.05) 78 | }) 79 | 80 | test_that("whv3", { 81 | 82 | A <- read_datasets(text=' 83 | 1 2 84 | ') 85 | B <- read_datasets(text=' 86 | 2 1 87 | ') 88 | 89 | rectangles <- eafdiff(A,B, rectangles=TRUE) 90 | ref <- c(3,3) 91 | ideal <- c(1,1) 92 | 93 | rects_A <- choose_eafdiff(rectangles, left=TRUE) 94 | rects_B <- choose_eafdiff(rectangles, left=FALSE) 95 | 96 | whv <- whv_rect(matrix(1.5,nrow=1,ncol=2), rectangles=rects_A, ref=ref) 97 | expect_equal(whv, 0.5) 98 | whv <- whv_rect(matrix(c(1,2),nrow=1,ncol=2), rectangles=rects_A, ref=ref) 99 | expect_equal(whv, 1) 100 | whv <- whv_rect(matrix(c(1,2),nrow=1,ncol=2), rectangles=rects_B, ref=ref) 101 | expect_equal(whv, 0) 102 | whv <- whv_rect(matrix(c(2,1),nrow=1,ncol=2), rectangles=rects_A, ref=ref) 103 | expect_equal(whv, 0) 104 | whv <- whv_rect(matrix(c(2,1),nrow=1,ncol=2), rectangles=rects_B, ref=ref) 105 | expect_equal(whv, 1) 106 | }) 107 | 108 | }) # withr::with_output_sink() 109 | -------------------------------------------------------------------------------- /r/tests/testthat/test-whv_hype.R: -------------------------------------------------------------------------------- 1 | source("helper-common.R") 2 | 3 | withr::with_output_sink("test-whv_hype.Rout", { 4 | 5 | find_minmax <- function(...) 6 | { 7 | args <- list(...) 8 | lower <- list() 9 | upper <- list() 10 | for (x in args) { 11 | stopifnot(is.numeric(x)) 12 | minmax <- apply(x[,1:2], 2L, range) 13 | lower <- c(lower, list(minmax[1L, , drop = FALSE])) 14 | upper <- c(upper, list(minmax[2L, , drop = FALSE])) 15 | } 16 | lower <- apply(do.call("rbind", lower), 2L, min) 17 | upper <- apply(do.call("rbind", upper), 2L, min) 18 | list(lower = lower, upper = upper) 19 | } 20 | 21 | test_that("whv_hype", { 22 | 23 | x <- read_extdata("wrots_l10w100_dat") 24 | y <- read_extdata("wrots_l100w10_dat") 25 | 26 | r <- find_minmax(x, y) 27 | ideal <- r$lower 28 | ref <- 1.1 * r$upper 29 | goal <- colMeans(x[,1:2]) 30 | 31 | x_list <- split.data.frame(x[,1:2], x[,3]) 32 | y_list <- split.data.frame(y[,1:2], y[,3]) 33 | set.seed(12345) 34 | whv_x <- whv_hype(x_list[[1]], reference = ref, ideal = ideal) 35 | expect_equal(whv_x, 2480979524388, tolerance=10) 36 | 37 | whv_x <- whv_hype(x_list[[1]], reference = ref, ideal = ideal, 38 | dist = "point", mu = goal) 39 | expect_equal(whv_x, 1496335657875, tolerance=10) 40 | 41 | whv_x <- whv_hype(x_list[[1]], reference = ref, ideal = ideal, 42 | dist = "exponential", mu=0.2) 43 | expect_equal(whv_x, 1903385037871, tolerance=10) 44 | 45 | }) 46 | 47 | }) 48 | -------------------------------------------------------------------------------- /r/tests/testthat/test-write_datasets.R: -------------------------------------------------------------------------------- 1 | source("helper-common.R") 2 | 3 | test_that("write_datasets", { 4 | x <- read_datasets(text="1 2\n3 4\n\n5 6\n7 8\n", col_names=c("obj1", "obj2")) 5 | y <- read_datasets(text = capture.output(write_datasets(x)), 6 | col_names=c("obj1", "obj2")) 7 | expect_equal(x,y) 8 | }) 9 | -------------------------------------------------------------------------------- /r/vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /r/vignettes/articles/eaf.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Empirical Attainment Function (EAF)" 3 | --- 4 | 5 | ```{r, include = FALSE} 6 | knitr::opts_chunk$set( 7 | collapse = TRUE, 8 | comment = "#>" 9 | ) 10 | ``` 11 | 12 | ```{r setup} 13 | library(moocore) 14 | ``` 15 | 16 | The AUC of the EAF and the AOC (Hypervolume) 17 | -------------------------------------------- 18 | 19 | The Area-Over-the-Curve (i.e., the hypervolume) of a set of nondominated sets 20 | is exactly the the Area-Under-the-Curve (AUC) of their corresponding EAF, as 21 | this example shows. 22 | 23 | 24 | ```{r auc} 25 | library(tidyr) 26 | library(ggplot2) 27 | 28 | extdata_dir <- system.file(package="moocore", "extdata") 29 | A <- read_datasets(file.path(extdata_dir, "ALG_1_dat.xz")) 30 | A[,1:2] <- normalise(A[,1:2], to_range = c(0,1)) 31 | 32 | aoc <- mean(sapply(split.data.frame(A[,1:2], A[,3]), hypervolume, reference = 1)) 33 | eafA <- eaf(A[,1:2], A[,3]) 34 | eafA[,3] <- eafA[,3]/100 35 | auc <- hypervolume(eafA, reference = c(1,1,0), maximise = c(FALSE,FALSE,TRUE)) 36 | cat("Runs = ", length(unique(A[,3])), 37 | "\nAUC of EAF = ", auc, 38 | "\nMean AOC = ", aoc, "\n") 39 | 40 | runs <- 5:length(unique(A[,3])) 41 | aocs <- c() 42 | aucs <- c() 43 | for (r in runs) { 44 | a <- A[A[,3] <= r, ] 45 | aoc <- mean(sapply(split.data.frame(a[,1:2], a[,3]), hypervolume, reference = 1)) 46 | eafa <- eaf(a[,1:2], a[,3]) 47 | eafa[,3] <- eafa[,3]/100 48 | auc <- hypervolume(eafa, reference = c(1,1,0), maximise = c(FALSE,FALSE,TRUE)) 49 | aocs <- c(aocs, aoc) 50 | aucs <- c(aucs, auc) 51 | } 52 | 53 | x <- tibble(r = runs, AOC = aocs, AUC=aucs) %>% pivot_longer(-r, names_to = "variable", values_to = "value") 54 | ``` 55 | 56 | ```{r} 57 | #| fig.alt = "Plot of EAF AUC versus mean AOC with increasing number of sets." 58 | ggplot(x, aes(r, value, color=variable, linetype=variable)) + 59 | geom_line(linewidth=1.5) + 60 | labs(x = "Number of sets", y = "Value", color = "", linetype="") 61 | ``` 62 | -------------------------------------------------------------------------------- /update_bib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | BIBFILES="authors.bib abbrev.bib journals.bib articles.bib biblio.bib crossref.bib" 4 | 5 | for file in $BIBFILES; do 6 | # echo $file 7 | curl --silent --show-error https://raw.githubusercontent.com/iridia-ulb/references/master/${file} -o ${file} 8 | if [ ! -s "${file}" ]; then 9 | echo "error: ${file} is empty!" 10 | exit 1 11 | fi 12 | done 13 | tmpbib=$(mktemp --tmpdir tmpXXXXXXXXXX.bib) 14 | keys=$(paste -d '#' -s bibkeys.txt | sed 's/#/\\\|/g') 15 | bib2bib --warn-error --expand --expand-xrefs --no-comment --expand-xrefs $BIBFILES --remove pdf --remove alias -c "(\$key : \"$keys\")" -ob $tmpbib -oc /dev/null 16 | if [ ! -s "${tmpbib}" ]; then 17 | echo "error: ${tmpbib} is empty! keys:= $keys" 18 | exit 1 19 | fi 20 | # Workaround https://github.com/GeoBosh/rbibutils/issues/9 21 | sed -i 's#\\slash #~/ #g' $tmpbib 22 | sed -i 's#\\hspace{0pt}#{}{}{}#g' $tmpbib 23 | # Work around broken URL: 24 | sed -i 's%researchrepository.napier.ac.uk/id/eprint/3044%lopez-ibanez.eu/publications#LopezIbanezPhD%g' $tmpbib 25 | comment='% DO NOT EDIT THIS FILE. It is auto-generated by "update_bib.sh".' 26 | PYTHON_DOC_DIR="./python/doc/source" 27 | echo $comment | cat --squeeze-blank - $tmpbib > r/inst/REFERENCES.bib 28 | echo $comment | cat --squeeze-blank - $tmpbib > $PYTHON_DOC_DIR/REFERENCES.bib 29 | rm -f $BIBFILES $tmpbib 30 | --------------------------------------------------------------------------------