├── adaptive ├── tests │ ├── __init__.py │ ├── test_notebook_integration.py │ ├── test_balancing_learner.py │ ├── test_skopt_learner.py │ ├── test_learnernd.py │ ├── test_average_learner.py │ ├── test_runner.py │ ├── test_cquad.py │ ├── test_triangulation.py │ ├── test_learner1d.py │ └── test_learners.py ├── _static_version.py ├── learner │ ├── __init__.py │ ├── data_saver.py │ ├── skopt_learner.py │ ├── base_learner.py │ ├── average_learner.py │ ├── integrator_coeffs.py │ └── balancing_learner.py ├── __init__.py ├── utils.py ├── _version.py └── notebook_integration.py ├── MANIFEST.in ├── benchmarks ├── benchmarks │ ├── __init__.py │ └── benchmarks.py ├── asv.conf.json └── README.md ├── pytest.ini ├── docs ├── .gitignore ├── source │ ├── _static │ │ ├── custom.css │ │ └── logo.png │ ├── reference │ │ ├── adaptive.runner.Runner.rst │ │ ├── adaptive.utils.rst │ │ ├── adaptive.learner.skopt_learner.rst │ │ ├── adaptive.runner.AsyncRunner.rst │ │ ├── adaptive.learner.average_learner.rst │ │ ├── adaptive.runner.BlockingRunner.rst │ │ ├── adaptive.learner.base_learner.rst │ │ ├── adaptive.runner.BaseRunner.rst │ │ ├── adaptive.learner.balancing_learner.rst │ │ ├── adaptive.learner.integrator_learner.rst │ │ ├── adaptive.notebook_integration.rst │ │ ├── adaptive.learner.triangulation.rst │ │ ├── adaptive.runner.extras.rst │ │ ├── adaptive.learner.data_saver.rst │ │ ├── adaptive.learner.learnerND.rst │ │ ├── adaptive.learner.learner1D.rst │ │ ├── adaptive.learner.learner2D.rst │ │ └── adaptive.rst │ ├── _templates │ │ └── layout.html │ ├── index.rst │ ├── tutorial │ │ ├── tutorial.rst │ │ ├── tutorial.AverageLearner.rst │ │ ├── tutorial.parallelism.rst │ │ ├── tutorial.Learner2D.rst │ │ ├── tutorial.SKOptLearner.rst │ │ ├── tutorial.DataSaver.rst │ │ ├── tutorial.BalancingLearner.rst │ │ ├── tutorial.IntegratorLearner.rst │ │ ├── tutorial.LearnerND.rst │ │ ├── tutorial.custom_loss.rst │ │ ├── tutorial.Learner1D.rst │ │ └── tutorial.advanced-topics.rst │ ├── conf.py │ └── docs.rst ├── Makefile └── environment.yml ├── readthedocs.yml ├── test-requirements.txt ├── .gitattributes ├── .mailmap ├── AUTHORS.md ├── check_whitespace ├── environment.yml ├── azure-docker.yml ├── Dockerfile ├── azure-test.yml ├── azure-pipelines.yml ├── ipynb_filter.py ├── .gitlab-ci.yml ├── .gitignore ├── LICENSE ├── setup.py ├── RELEASE.md └── README.rst /adaptive/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /benchmarks/benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = adaptive 3 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | source/_static/holoviews.* 3 | -------------------------------------------------------------------------------- /docs/source/_static/custom.css: -------------------------------------------------------------------------------- 1 | .ignore-css{all:unset !important;} 2 | -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | conda: 2 | file: docs/environment.yml 3 | formats: [] 4 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-randomly 3 | pytest-cov 4 | pexpect 5 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.ipynb filter=ipynb_filter 2 | adaptive/_static_version.py export-subst 3 | -------------------------------------------------------------------------------- /docs/source/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basnijholt/adaptive/master/docs/source/_static/logo.png -------------------------------------------------------------------------------- /docs/source/reference/adaptive.runner.Runner.rst: -------------------------------------------------------------------------------- 1 | adaptive.Runner 2 | =============== 3 | 4 | .. autoclass:: adaptive.Runner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.utils.rst: -------------------------------------------------------------------------------- 1 | adaptive.utils module 2 | ===================== 3 | 4 | .. automodule:: adaptive.utils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | {% block extrahead %} 3 | {%- for scriptfile in holoviews_js_files %} 4 | {{ js_tag(scriptfile) }} 5 | {%- endfor %} 6 | {% endblock %} 7 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.skopt_learner.rst: -------------------------------------------------------------------------------- 1 | adaptive.SKOptLearner 2 | ===================== 3 | 4 | .. autoclass:: adaptive.SKOptLearner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.runner.AsyncRunner.rst: -------------------------------------------------------------------------------- 1 | adaptive.AsyncRunner 2 | ==================== 3 | 4 | .. autoclass:: adaptive.runner.AsyncRunner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.average_learner.rst: -------------------------------------------------------------------------------- 1 | adaptive.AverageLearner 2 | ======================= 3 | 4 | .. autoclass:: adaptive.AverageLearner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.runner.BlockingRunner.rst: -------------------------------------------------------------------------------- 1 | adaptive.BlockingRunner 2 | ======================= 3 | 4 | .. autoclass:: adaptive.BlockingRunner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.base_learner.rst: -------------------------------------------------------------------------------- 1 | adaptive.BaseLearner 2 | ============================ 3 | 4 | .. autoclass:: adaptive.learner.BaseLearner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.runner.BaseRunner.rst: -------------------------------------------------------------------------------- 1 | adaptive.runner.BaseRunner 2 | ========================== 3 | 4 | .. autoclass:: adaptive.runner.BaseRunner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.balancing_learner.rst: -------------------------------------------------------------------------------- 1 | adaptive.BalancingLearner 2 | ========================= 3 | 4 | .. autoclass:: adaptive.BalancingLearner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.integrator_learner.rst: -------------------------------------------------------------------------------- 1 | adaptive.IntegratorLearner 2 | ========================== 3 | 4 | .. autoclass:: adaptive.IntegratorLearner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | Joseph Weston 2 | Joseph Weston 3 | Bas Nijholt 4 | Jorn Hoofwijk Jorn 5 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.notebook_integration.rst: -------------------------------------------------------------------------------- 1 | adaptive.notebook\_integration module 2 | ===================================== 3 | 4 | .. automodule:: adaptive.notebook_integration 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.triangulation.rst: -------------------------------------------------------------------------------- 1 | adaptive.learner.triangulation module 2 | ===================================== 3 | 4 | .. automodule:: adaptive.learner.triangulation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | ## Authors 2 | Below is a list of the contributors to Adaptive: 3 | 4 | + [Anton Akhmerov]() 5 | + [Bas Nijholt]() 6 | + [Christoph Groth]() 7 | + Jorn Hoofwijk 8 | + [Joseph Weston]() 9 | -------------------------------------------------------------------------------- /check_whitespace: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ! for f in $(git ls-files | grep -v \\.diff$); do 4 | file $f | grep -q ' text' || continue 5 | grep -q '[[:blank:]]$' $f && echo $f: trailing whitespace 6 | tail -n1 $f | read -r _ || echo $f: no newline at end of file 7 | tail -n1 $f | grep -q '^$' && echo $f: empty line at end of file 8 | done | grep . >&2 9 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: adaptive 2 | 3 | channels: 4 | - conda-forge 5 | 6 | dependencies: 7 | - python=3.6 8 | - sortedcontainers 9 | - sortedcollections 10 | - scipy 11 | - holoviews 12 | - ipyparallel 13 | - distributed 14 | - ipykernel>=4.8* 15 | - jupyter_client>=5.2.2 16 | - ipywidgets 17 | - scikit-optimize 18 | - plotly 19 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.runner.extras.rst: -------------------------------------------------------------------------------- 1 | Runner extras 2 | ============= 3 | 4 | Simple executor 5 | --------------- 6 | 7 | .. autofunction:: adaptive.runner.simple 8 | 9 | Sequential excecutor 10 | -------------------- 11 | 12 | .. autoclass:: adaptive.runner.SequentialExecutor 13 | 14 | 15 | Replay log 16 | ---------- 17 | 18 | .. autofunction:: adaptive.runner.replay_log 19 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.data_saver.rst: -------------------------------------------------------------------------------- 1 | adaptive.DataSaver 2 | ================== 3 | 4 | The ``DataSaver`` class 5 | ----------------------- 6 | 7 | .. autoclass:: adaptive.DataSaver 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | 12 | 13 | The ``make_datasaver`` function 14 | ------------------------------- 15 | 16 | .. autofunction:: adaptive.make_datasaver 17 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../README.rst 2 | :start-after: summary-start 3 | :end-before: summary-end 4 | 5 | .. include:: ../../README.rst 6 | :start-after: references-start 7 | :end-before: references-end 8 | 9 | 10 | .. toctree:: 11 | :hidden: 12 | 13 | self 14 | 15 | .. toctree:: 16 | :maxdepth: 2 17 | :hidden: 18 | 19 | docs 20 | tutorial/tutorial 21 | reference/adaptive 22 | -------------------------------------------------------------------------------- /adaptive/_static_version.py: -------------------------------------------------------------------------------- 1 | # This file is part of 'miniver': https://github.com/jbweston/miniver 2 | # 3 | # This file will be overwritten by setup.py when a source or binary 4 | # distribution is made. The magic value "__use_git__" is interpreted by 5 | # _version.py. 6 | 7 | version = "__use_git__" 8 | 9 | # These values are only set if the distribution was created with 'git archive' 10 | refnames = "HEAD -> master" 11 | git_hash = "58e343399" 12 | -------------------------------------------------------------------------------- /azure-docker.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | paths: 3 | include: 4 | - Dockerfile 5 | - environment.yml 6 | - test-requirements.txt 7 | 8 | steps: 9 | - script: | 10 | docker build -f Dockerfile -t pythonadaptive/adaptive . 11 | docker login -u $user -p $password 12 | docker push pythonadaptive/adaptive 13 | env: 14 | password: $(docker_password) 15 | user: $(docker_user) 16 | displayName: 'Build and push Docker image' 17 | 18 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.learnerND.rst: -------------------------------------------------------------------------------- 1 | adaptive.LearnerND 2 | ================== 3 | 4 | .. autoclass:: adaptive.LearnerND 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Custom loss functions 10 | --------------------- 11 | .. autofunction:: adaptive.learner.learnerND.default_loss 12 | 13 | .. autofunction:: adaptive.learner.learnerND.uniform_loss 14 | 15 | .. autofunction:: adaptive.learner.learnerND.std_loss 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Docker image for testing adaptive 2 | FROM conda/miniconda3:latest 3 | 4 | # make our environment sane 5 | ENV LANG C.UTF-8 6 | ENV LC_ALL C.UTF-8 7 | 8 | # install git 9 | RUN apt-get update && \ 10 | apt-get install -y --no-install-recommends git && \ 11 | apt-get clean && \ 12 | rm -rf /var/lib/apt/lists/* 13 | 14 | COPY environment.yml test-requirements.txt / 15 | 16 | RUN conda env update --quiet -n root -f environment.yml 17 | RUN conda clean --yes --all 18 | RUN pip install -r test-requirements.txt 19 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.learner1D.rst: -------------------------------------------------------------------------------- 1 | adaptive.Learner1D 2 | ================== 3 | 4 | .. autoclass:: adaptive.Learner1D 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | 10 | Custom loss functions 11 | --------------------- 12 | .. autofunction:: adaptive.learner.learner1D.default_loss 13 | 14 | .. autofunction:: adaptive.learner.learner1D.uniform_loss 15 | 16 | .. autofunction:: adaptive.learner.learner1D.uses_nth_neighbors 17 | 18 | .. autofunction:: adaptive.learner.learner1D.triangle_loss 19 | 20 | .. autofunction:: adaptive.learner.learner1D.curvature_loss_function 21 | -------------------------------------------------------------------------------- /adaptive/tests/test_notebook_integration.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import ipykernel.iostream 4 | import zmq 5 | 6 | 7 | def test_private_api_used_in_live_info(): 8 | """We are catching all errors in 9 | adaptive.notebook_integration.should_update 10 | so if ipykernel changed its API it would happen unnoticed.""" 11 | # XXX: find a potential better solution in 12 | # https://github.com/ipython/ipykernel/issues/365 13 | ctx = zmq.Context() 14 | iopub_socket = ctx.socket(zmq.PUB) 15 | iopub_thread = ipykernel.iostream.IOPubThread(iopub_socket) 16 | assert hasattr(iopub_thread, '_events') 17 | -------------------------------------------------------------------------------- /azure-test.yml: -------------------------------------------------------------------------------- 1 | resources: 2 | containers: 3 | - container: adaptive 4 | image: pythonadaptive/adaptive:latest 5 | 6 | container: adaptive 7 | 8 | steps: 9 | - script: py.test --verbose --cov=adaptive --cov-report term --cov-report html adaptive 10 | displayName: 'Run the tests' 11 | - script: | 12 | MISSING_AUTHORS=$(git shortlog -s HEAD | sed -e "s/^[0-9\t ]*//"| xargs -i sh -c 'grep -q "{}" AUTHORS.md || echo "{} missing from authors"') 13 | if [ ! -z "$MISSING_AUTHORS" ]; then { echo $MISSING_AUTHORS; exit 1; }; fi 14 | continueOnError: true 15 | displayName: 'Authors check' 16 | - script: ./check_whitespace 17 | continueOnError: true 18 | displayName: 'Whitespace check' 19 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = adaptive 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.learner.learner2D.rst: -------------------------------------------------------------------------------- 1 | adaptive.Learner2D 2 | ================== 3 | 4 | .. autoclass:: adaptive.Learner2D 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | 10 | Custom loss functions 11 | --------------------- 12 | .. autofunction:: adaptive.learner.learner2D.default_loss 13 | 14 | .. autofunction:: adaptive.learner.learner2D.minimize_triangle_surface_loss 15 | 16 | .. autofunction:: adaptive.learner.learner2D.uniform_loss 17 | 18 | .. autofunction:: adaptive.learner.learner2D.resolution_loss_function 19 | 20 | 21 | Helper functions 22 | ---------------- 23 | .. autofunction:: adaptive.learner.learner2D.areas 24 | 25 | .. autofunction:: adaptive.learner.learner2D.deviations 26 | -------------------------------------------------------------------------------- /adaptive/learner/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from contextlib import suppress 4 | 5 | from adaptive.learner.average_learner import AverageLearner 6 | from adaptive.learner.base_learner import BaseLearner 7 | from adaptive.learner.balancing_learner import BalancingLearner 8 | from adaptive.learner.learner1D import Learner1D 9 | from adaptive.learner.learner2D import Learner2D 10 | from adaptive.learner.learnerND import LearnerND 11 | from adaptive.learner.integrator_learner import IntegratorLearner 12 | from adaptive.learner.data_saver import DataSaver, make_datasaver 13 | 14 | with suppress(ImportError): 15 | # Only available if 'scikit-optimize' is installed 16 | from adaptive.learner.skopt_learner import SKOptLearner 17 | -------------------------------------------------------------------------------- /docs/environment.yml: -------------------------------------------------------------------------------- 1 | name: adaptive 2 | 3 | channels: 4 | - conda-forge 5 | 6 | dependencies: 7 | - python=3.6 8 | - pip: 9 | - sortedcontainers 10 | - scipy 11 | - holoviews 12 | - bokeh==0.13 13 | - plotly 14 | - ipyparallel 15 | - distributed 16 | - ipykernel>=4.8* 17 | - jupyter_client>=5.2.2 18 | - ipywidgets 19 | - scikit-optimize 20 | - nbconvert>=5.4* # because my jupyter-sphinx branch depends on it 21 | - sphinx_rtd_theme 22 | - git+https://github.com/basnijholt/jupyter-sphinx.git@widgets_execute 23 | - git+https://github.com/python-adaptive/adaptive.git@master # temporary solution because jupyter-sphinx doesn't use the adaptive from the repo 24 | - sphinx_fontawesome 25 | - m2r 26 | -------------------------------------------------------------------------------- /docs/source/reference/adaptive.rst: -------------------------------------------------------------------------------- 1 | API documentation 2 | ================= 3 | 4 | Learners 5 | -------- 6 | 7 | .. toctree:: 8 | 9 | adaptive.learner.average_learner 10 | adaptive.learner.base_learner 11 | adaptive.learner.balancing_learner 12 | adaptive.learner.data_saver 13 | adaptive.learner.integrator_learner 14 | adaptive.learner.learner1D 15 | adaptive.learner.learner2D 16 | adaptive.learner.learnerND 17 | adaptive.learner.skopt_learner 18 | 19 | Runners 20 | ------- 21 | 22 | .. toctree:: 23 | adaptive.runner.Runner 24 | adaptive.runner.AsyncRunner 25 | adaptive.runner.BlockingRunner 26 | adaptive.runner.BaseRunner 27 | adaptive.runner.extras 28 | 29 | Other 30 | ----- 31 | .. toctree:: 32 | adaptive.utils 33 | adaptive.notebook_integration 34 | -------------------------------------------------------------------------------- /benchmarks/asv.conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "project": "adaptive", 4 | "project_url": "https://gitlab.kwant-project.org/qt/adaptive", 5 | "repo": "..", 6 | "dvcs": "git", 7 | "environment_type": "conda", 8 | "install_timeout": 600, 9 | "show_commit_url": "https://gitlab.kwant-project.org/qt/adaptive/commit/", 10 | "pythons": ["3.6"], 11 | "conda_channels": ["conda-forge"], 12 | "matrix": { 13 | "numpy": ["1.13"], 14 | "holoviews": ["1.9.1"], 15 | "scipy": ["0.19.1"], 16 | "ipyparallel": ["6.0.2"], 17 | "sortedcontainers": ["1.5.7"], 18 | }, 19 | "benchmark_dir": "benchmarks", 20 | "env_dir": "env", 21 | "results_dir": "results", 22 | "html_dir": "html", 23 | "hash_length": 8, 24 | "wheel_cache_size": 2 25 | } 26 | -------------------------------------------------------------------------------- /adaptive/tests/test_balancing_learner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from adaptive.learner import Learner1D, BalancingLearner 4 | 5 | 6 | def test_balancing_learner_loss_cache(): 7 | learner = Learner1D(lambda x: x, bounds=(-1, 1)) 8 | learner.tell(-1, -1) 9 | learner.tell(1, 1) 10 | learner.tell_pending(0) 11 | 12 | real_loss = learner.loss(real=True) 13 | pending_loss = learner.loss(real=False) 14 | 15 | # Test if the real and pending loss are cached correctly 16 | bl = BalancingLearner([learner]) 17 | assert bl.loss(real=True) == real_loss 18 | assert bl.loss(real=False) == pending_loss 19 | 20 | # Test if everything is still fine when executed in the reverse order 21 | bl = BalancingLearner([learner]) 22 | assert bl.loss(real=False) == pending_loss 23 | assert bl.loss(real=True) == real_loss 24 | -------------------------------------------------------------------------------- /adaptive/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from contextlib import suppress 4 | 5 | from adaptive.notebook_integration import (notebook_extension, live_plot, 6 | active_plotting_tasks) 7 | 8 | from adaptive import learner 9 | from adaptive import runner 10 | from adaptive import utils 11 | 12 | from adaptive.learner import ( 13 | BaseLearner, Learner1D, Learner2D, LearnerND, 14 | AverageLearner, BalancingLearner, make_datasaver, 15 | DataSaver, IntegratorLearner 16 | ) 17 | 18 | with suppress(ImportError): 19 | # Only available if 'scikit-optimize' is installed 20 | from adaptive.learner import SKOptLearner 21 | 22 | from adaptive.runner import Runner, AsyncRunner, BlockingRunner 23 | 24 | from adaptive._version import __version__ 25 | del _version 26 | 27 | del notebook_integration # to avoid confusion with `notebook_extension` 28 | -------------------------------------------------------------------------------- /adaptive/tests/test_skopt_learner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | 5 | import pytest 6 | 7 | try: 8 | import skopt 9 | with_scikit_optimize = True 10 | from adaptive.learner import SKOptLearner 11 | except ModuleNotFoundError: 12 | with_scikit_optimize = False 13 | 14 | 15 | @pytest.mark.skipif(not with_scikit_optimize, 16 | reason='scikit-optimize is not installed') 17 | def test_skopt_learner_runs(): 18 | """The SKOptLearner provides very few guarantees about its 19 | behaviour, so we only test the most basic usage 20 | """ 21 | 22 | def g(x, noise_level=0.1): 23 | return (np.sin(5 * x) * (1 - np.tanh(x ** 2)) 24 | + np.random.randn() * noise_level) 25 | 26 | learner = SKOptLearner(g, dimensions=[(-2., 2.)]) 27 | 28 | for _ in range(11): 29 | (x,), _ = learner.ask(1) 30 | learner.tell(x, learner.function(x)) 31 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | steps: 2 | - task: CondaEnvironment@1 3 | inputs: 4 | packageSpecs: > 5 | python=3.6 6 | sortedcontainers 7 | sortedcollections 8 | scipy 9 | holoviews 10 | ipyparallel 11 | distributed 12 | ipykernel>=4.8* 13 | jupyter_client>=5.2.2 14 | ipywidgets 15 | scikit-optimize 16 | plotly 17 | createCustomEnvironment: true 18 | createOptions: "-c conda-forge" 19 | environmentName: 'adaptive' 20 | 21 | - script: pip install -r test-requirements.txt 22 | displayName: 'Install test-requirements.txt' 23 | 24 | - script: py.test --verbose --cov=adaptive --cov-report term --cov-report html adaptive 25 | displayName: 'Run the tests' 26 | 27 | - script: | 28 | MISSING_AUTHORS=$(git shortlog -s HEAD | sed -e "s/^[0-9\t ]*//"| xargs -i sh -c 'grep -q "{}" AUTHORS.md || echo "{} missing from authors"') 29 | if [ ! -z "$MISSING_AUTHORS" ]; then { echo $MISSING_AUTHORS; exit 1; }; fi 30 | continueOnError: true 31 | displayName: 'Authors check' 32 | 33 | - script: ./check_whitespace 34 | continueOnError: true 35 | displayName: 'Whitespace check' 36 | 37 | -------------------------------------------------------------------------------- /ipynb_filter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # `ipynb_filter.py`: 4 | # This is a git filters that strips out the outputs and 5 | # meta data of a Jupyer notebook using `nbconvert`. 6 | # Execute the following line in order to activate this filter: 7 | # python ipynb_filter.py 8 | # 9 | # The following line should be in `.gitattributes`: 10 | # *.ipynb filter=ipynb_filter 11 | 12 | from nbconvert.preprocessors import Preprocessor 13 | 14 | 15 | class RemoveMetadata(Preprocessor): 16 | def preprocess(self, nb, resources): 17 | nb.metadata = {"language_info": {"name":"python", 18 | "pygments_lexer": "ipython3"}} 19 | return nb, resources 20 | 21 | 22 | if __name__ == '__main__': 23 | # The filter is getting activated 24 | import os 25 | git_cmd = 'git config filter.ipynb_filter.clean "jupyter nbconvert --to notebook --config ipynb_filter.py --stdin --stdout"' 26 | os.system(git_cmd) 27 | else: 28 | # This script is used as config 29 | c.Exporter.preprocessors = [RemoveMetadata] 30 | c.ClearOutputPreprocessor.enabled = True 31 | c.ClearOutputPreprocessor.remove_metadata_fields = [ 32 | "deletable", "editable", "collapsed", "scrolled"] 33 | -------------------------------------------------------------------------------- /benchmarks/benchmarks/benchmarks.py: -------------------------------------------------------------------------------- 1 | import adaptive 2 | 3 | import numpy as np 4 | import random 5 | 6 | 7 | offset = random.uniform(-0.5, 0.5) 8 | 9 | def f_1d(x, offset=offset): 10 | a = 0.01 11 | return x + a**2 / (a**2 + (x - offset)**2) 12 | 13 | 14 | def f_2d(xy): 15 | x, y = xy 16 | a = 0.2 17 | return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4) 18 | 19 | 20 | class TimeLearner1D: 21 | def setup(self): 22 | self.learner = adaptive.Learner1D(f_1d, bounds=(-1, 1)) 23 | 24 | def time_run(self): 25 | for _ in range(1000): 26 | points, _ = self.learner.ask(1) 27 | self.learner.tell_many(points, map(f_1d, points)) 28 | 29 | 30 | class TimeLearner2D: 31 | def setup(self): 32 | self.learner = adaptive.Learner2D(f_2d, bounds=[(-1, 1), (-1, 1)]) 33 | self.xs = np.random.rand(50**2, 2) 34 | self.ys = np.random.rand(50**2) 35 | 36 | def time_run(self): 37 | for _ in range(50**2): 38 | points, _ = self.learner.ask(1) 39 | self.learner.tell_many(points, map(f_2d, points)) 40 | 41 | def time_ask(self): 42 | for _ in range(50**2): 43 | self.learner.ask(1) 44 | 45 | def time_tell(self): 46 | for x, y in zip(self.xs, self.ys): 47 | self.learner.tell(x, y) 48 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: gitlab.kwant-project.org:5005/qt/adaptive:latest 2 | 3 | stages: 4 | - prebuild 5 | - test 6 | 7 | build docker: 8 | stage: prebuild 9 | image: 10 | name: gcr.io/kaniko-project/executor:debug 11 | entrypoint: [""] 12 | before_script: 13 | - echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json 14 | - export CI_REF=${CI_COMMIT_TAG:-latest} 15 | script: 16 | - /kaniko/executor 17 | --context $CI_PROJECT_DIR 18 | --dockerfile $CI_PROJECT_DIR/Dockerfile 19 | --destination $CI_REGISTRY_IMAGE:$CI_REF 20 | only: 21 | changes: 22 | - Dockerfile 23 | - environment.yml 24 | - test-requirements.txt 25 | 26 | test: 27 | stage: test 28 | script: 29 | - py.test --verbose --cov=adaptive --cov-report term --cov-report html adaptive 30 | artifacts: 31 | paths: 32 | - htmlcov 33 | 34 | authors check: 35 | stage: test 36 | script: 37 | - MISSING_AUTHORS=$(git shortlog -s HEAD | sed -e "s/^[0-9\t ]*//"| xargs -i sh -c 'grep -q "{}" AUTHORS.md || echo "{} missing from authors"') 38 | - if [ ! -z "$MISSING_AUTHORS" ]; then { echo $MISSING_AUTHORS; exit 1; }; fi 39 | allow_failure: true 40 | 41 | check whitespace style: 42 | stage: test 43 | script: ./check_whitespace 44 | allow_failure: true 45 | -------------------------------------------------------------------------------- /adaptive/tests/test_learnernd.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import scipy.spatial 4 | 5 | from adaptive.learner import LearnerND 6 | from adaptive.runner import replay_log, simple 7 | 8 | from .test_learners import ring_of_fire, generate_random_parametrization 9 | 10 | 11 | def test_faiure_case_LearnerND(): 12 | log = [ 13 | ('ask', 4), 14 | ('tell', (-1, -1, -1), 1.607873907219222e-101), 15 | ('tell', (-1, -1, 1), 1.607873907219222e-101), 16 | ('ask', 2), 17 | ('tell', (-1, 1, -1), 1.607873907219222e-101), 18 | ('tell', (-1, 1, 1), 1.607873907219222e-101), 19 | ('ask', 2), 20 | ('tell', (1, -1, 1), 2.0), 21 | ('tell', (1, -1, -1), 2.0), 22 | ('ask', 2), 23 | ('tell', (0.0, 0.0, 0.0), 4.288304431237686e-06), 24 | ('tell', (1, 1, -1), 2.0) 25 | ] 26 | learner = LearnerND(lambda *x: x, bounds=[(-1, 1), (-1, 1), (-1, 1)]) 27 | replay_log(learner, log) 28 | 29 | 30 | def test_interior_vs_bbox_gives_same_result(): 31 | f = generate_random_parametrization(ring_of_fire) 32 | 33 | control = LearnerND(f, bounds=[(-1, 1), (-1, 1)]) 34 | hull = scipy.spatial.ConvexHull(control._bounds_points) 35 | learner = LearnerND(f, bounds=hull) 36 | 37 | simple(control, goal=lambda l: l.loss() < 0.1) 38 | simple(learner, goal=lambda l: l.loss() < 0.1) 39 | 40 | assert learner.data == control.data 41 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.rst: -------------------------------------------------------------------------------- 1 | Tutorial Adaptive 2 | ================= 3 | 4 | `Adaptive `__ 5 | is a package for adaptively sampling functions with support for parallel 6 | evaluation. 7 | 8 | This is an introductory notebook that shows some basic use cases. 9 | 10 | ``adaptive`` needs at least Python 3.6, and the following packages: 11 | 12 | - ``scipy`` 13 | - ``sortedcontainers`` 14 | 15 | Additionally ``adaptive`` has lots of extra functionality that makes it 16 | simple to use from Jupyter notebooks. This extra functionality depends 17 | on the following packages 18 | 19 | - ``ipykernel>=4.8.0`` 20 | - ``jupyter_client>=5.2.2`` 21 | - ``holoviews`` 22 | - ``bokeh`` 23 | - ``ipywidgets`` 24 | 25 | We recommend to start with the :ref:`Tutorial `~adaptive.Learner1D``. 26 | 27 | .. note:: 28 | Because this documentation consists of static html, the ``live_plot`` 29 | and ``live_info`` widget is not live. Download the notebooks 30 | in order to see the real behaviour. 31 | 32 | .. toctree:: 33 | :hidden: 34 | 35 | tutorial.Learner1D 36 | tutorial.Learner2D 37 | tutorial.custom_loss 38 | tutorial.AverageLearner 39 | tutorial.BalancingLearner 40 | tutorial.DataSaver 41 | tutorial.IntegratorLearner 42 | tutorial.LearnerND 43 | tutorial.SKOptLearner 44 | tutorial.parallelism 45 | tutorial.advanced-topics 46 | -------------------------------------------------------------------------------- /benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # adaptive benchmarks 2 | 3 | Benchmarking adaptive with Airspeed Velocity. 4 | 5 | ## Usage 6 | 7 | Airspeed Velocity manages building and Python conda environments by itself, 8 | unless told otherwise. To run the benchmarks, you do not need to install a 9 | development version of adaptive to your current Python environment. 10 | 11 | Run ASV commands (record results and generate HTML): 12 | 13 | ```bash 14 | cd benchmarks 15 | asv run --skip-existing-commits --steps 10 ALL 16 | asv publish 17 | asv preview 18 | ``` 19 | 20 | More on how to use ``asv`` can be found in `ASV documentation`_ 21 | Command-line help is available as usual via ``asv --help`` and 22 | ``asv run --help``. 23 | 24 | 25 | ## Writing benchmarks 26 | 27 | See [`ASV documentation`](https://asv.readthedocs.io/) for basics on how to write benchmarks. 28 | 29 | Some things to consider: 30 | 31 | - The benchmark suite should be importable with any adaptive version. 32 | 33 | - The benchmark parameters etc. should not depend on which adaptive version 34 | is installed. 35 | 36 | - Try to keep the runtime of the benchmark reasonable. 37 | 38 | - Prefer ASV's ``time_`` methods for benchmarking times rather than cooking up 39 | time measurements via ``time.clock``, even if it requires some juggling when 40 | writing the benchmark. 41 | 42 | - Preparing arrays etc. should generally be put in the ``setup`` method rather 43 | than the ``time_`` methods, to avoid counting preparation time together with 44 | the time of the benchmarked operation. 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | # MacOS files 92 | *.DS_Store 93 | 94 | # asv files 95 | benchmarks/html/ 96 | benchmarks/results/ 97 | .asv 98 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017-2018, Adaptive authors 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /adaptive/tests/test_average_learner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import random 4 | 5 | import numpy as np 6 | 7 | from adaptive.learner import AverageLearner 8 | 9 | 10 | def test_only_returns_new_points(): 11 | learner = AverageLearner(lambda x: x, atol=None, rtol=0.01) 12 | 13 | # Only tell it n = 5...10 14 | for i in range(5, 10): 15 | learner.tell(i, 1) 16 | 17 | learner.tell_pending(0) # This means it shouldn't return 0 anymore 18 | 19 | assert learner.ask(1)[0][0] == 1 20 | assert learner.ask(1)[0][0] == 2 21 | assert learner.ask(1)[0][0] == 3 22 | assert learner.ask(1)[0][0] == 4 23 | assert learner.ask(1)[0][0] == 10 24 | 25 | 26 | def test_avg_std_and_npoints(): 27 | learner = AverageLearner(lambda x: x, atol=None, rtol=0.01) 28 | 29 | for i in range(300): 30 | # This will add 5000 points at random values of n. 31 | # It could try to readd already evaluated points. 32 | 33 | n = random.randint(0, 2 * 300) 34 | value = random.random() 35 | 36 | # With 10% chance None is added to simulate asking that point. 37 | if value < 0.9: 38 | learner.tell(n, value) 39 | else: 40 | learner.tell_pending(n) 41 | 42 | if i > 2 and i % 10 == 0: 43 | # We need more than two points for 'learner.std' to be defined. 44 | values = np.array(list(learner.data.values())) 45 | std = np.sqrt(sum((values - values.mean())**2) / (len(values) - 1)) 46 | assert learner.npoints == len(learner.data) 47 | assert abs(learner.sum_f - values.sum()) < 1e-13 48 | assert abs(learner.std - std) < 1e-13 49 | -------------------------------------------------------------------------------- /adaptive/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from contextlib import contextmanager 4 | import functools 5 | import gzip 6 | from itertools import product 7 | import os 8 | import pickle 9 | import time 10 | 11 | 12 | def named_product(**items): 13 | names = items.keys() 14 | vals = items.values() 15 | return [dict(zip(names, res)) for res in product(*vals)] 16 | 17 | 18 | @contextmanager 19 | def restore(*learners): 20 | states = [learner.__getstate__() for learner in learners] 21 | try: 22 | yield 23 | finally: 24 | for state, learner in zip(states, learners): 25 | learner.__setstate__(state) 26 | 27 | 28 | def cache_latest(f): 29 | """Cache the latest return value of the function and add it 30 | as 'self._cache[f.__name__]'.""" 31 | @functools.wraps(f) 32 | def wrapper(*args, **kwargs): 33 | self = args[0] 34 | if not hasattr(self, '_cache'): 35 | self._cache = {} 36 | self._cache[f.__name__] = f(*args, **kwargs) 37 | return self._cache[f.__name__] 38 | return wrapper 39 | 40 | 41 | def save(fname, data, compress=True): 42 | fname = os.path.expanduser(fname) 43 | dirname = os.path.dirname(fname) 44 | if dirname: 45 | os.makedirs(dirname, exist_ok=True) 46 | _open = gzip.open if compress else open 47 | with _open(fname, 'wb') as f: 48 | pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) 49 | 50 | 51 | def load(fname, compress=True): 52 | fname = os.path.expanduser(fname) 53 | _open = gzip.open if compress else open 54 | with _open(fname, 'rb') as f: 55 | return pickle.load(f) 56 | 57 | 58 | def copy_docstring_from(other): 59 | def decorator(method): 60 | return functools.wraps(other)(method) 61 | return decorator 62 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.AverageLearner.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.AverageLearner` 2 | ----------------------------------- 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.AverageLearner` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | The next type of learner averages a function until the uncertainty in 20 | the average meets some condition. 21 | 22 | This is useful for sampling a random variable. The function passed to 23 | the learner must formally take a single parameter, which should be used 24 | like a “seed” for the (pseudo-) random variable (although in the current 25 | implementation the seed parameter can be ignored by the function). 26 | 27 | .. jupyter-execute:: 28 | 29 | def g(n): 30 | import random 31 | from time import sleep 32 | sleep(random.random() / 1000) 33 | # Properly save and restore the RNG state 34 | state = random.getstate() 35 | random.seed(n) 36 | val = random.gauss(0.5, 1) 37 | random.setstate(state) 38 | return val 39 | 40 | .. jupyter-execute:: 41 | 42 | learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) 43 | # `loss < 1` means that we reached the `rtol` or `atol` 44 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1) 45 | 46 | .. jupyter-execute:: 47 | :hide-code: 48 | 49 | await runner.task # This is not needed in a notebook environment! 50 | 51 | .. jupyter-execute:: 52 | 53 | runner.live_info() 54 | 55 | .. jupyter-execute:: 56 | 57 | runner.live_plot(update_interval=0.1) 58 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.parallelism.rst: -------------------------------------------------------------------------------- 1 | Parallelism - using multiple cores 2 | ---------------------------------- 3 | 4 | Often you will want to evaluate the function on some remote computing 5 | resources. ``adaptive`` works out of the box with any framework that 6 | implements a `PEP 3148 `__ 7 | compliant executor that returns `concurrent.futures.Future` objects. 8 | 9 | `concurrent.futures` 10 | ~~~~~~~~~~~~~~~~~~~~ 11 | 12 | On Unix-like systems by default `adaptive.Runner` creates a 13 | `~concurrent.futures.ProcessPoolExecutor`, but you can also pass 14 | one explicitly e.g. to limit the number of workers: 15 | 16 | .. code:: python 17 | 18 | from concurrent.futures import ProcessPoolExecutor 19 | 20 | executor = ProcessPoolExecutor(max_workers=4) 21 | 22 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 23 | runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.loss() < 0.05) 24 | runner.live_info() 25 | runner.live_plot(update_interval=0.1) 26 | 27 | `ipyparallel.Client` 28 | ~~~~~~~~~~~~~~~~~~~~ 29 | 30 | .. code:: python 31 | 32 | import ipyparallel 33 | 34 | client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work 35 | 36 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 37 | runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) 38 | runner.live_info() 39 | runner.live_plot() 40 | 41 | `distributed.Client` 42 | ~~~~~~~~~~~~~~~~~~~~ 43 | 44 | On Windows by default `adaptive.Runner` uses a `distributed.Client`. 45 | 46 | .. code:: python 47 | 48 | import distributed 49 | 50 | client = distributed.Client() 51 | 52 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 53 | runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) 54 | runner.live_info() 55 | runner.live_plot(update_interval=0.1) 56 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from setuptools import setup, find_packages 5 | import sys 6 | 7 | 8 | if sys.version_info < (3, 6): 9 | print('adaptive requires Python 3.6 or above.') 10 | sys.exit(1) 11 | 12 | 13 | # Loads _version.py module without importing the whole package. 14 | def get_version_and_cmdclass(package_name): 15 | import os 16 | from importlib.util import module_from_spec, spec_from_file_location 17 | spec = spec_from_file_location('version', 18 | os.path.join(package_name, '_version.py')) 19 | module = module_from_spec(spec) 20 | spec.loader.exec_module(module) 21 | return module.__version__, module.cmdclass 22 | 23 | 24 | version, cmdclass = get_version_and_cmdclass('adaptive') 25 | 26 | 27 | install_requires = [ 28 | 'scipy', 29 | 'sortedcollections', 30 | 'sortedcontainers >= 2.0', 31 | ] 32 | 33 | extras_require = { 34 | 'notebook': [ 35 | 'ipython', 36 | 'ipykernel>=4.8.0', # because https://github.com/ipython/ipykernel/issues/274 and https://github.com/ipython/ipykernel/issues/263 37 | 'jupyter_client>=5.2.2', # because https://github.com/jupyter/jupyter_client/pull/314 38 | 'holoviews>=1.9.1', 39 | 'ipywidgets', 40 | 'bokeh', 41 | 'matplotlib', 42 | 'plotly', 43 | ], 44 | } 45 | 46 | 47 | setup( 48 | name='adaptive', 49 | description='Adaptive parallel sampling of mathematical functions', 50 | version=version, 51 | url='https://adaptive.readthedocs.io/', 52 | author='Adaptive authors', 53 | license='BSD', 54 | classifiers=[ 55 | 'Development Status :: 4 - Beta', 56 | 'License :: OSI Approved :: BSD License', 57 | 'Intended Audience :: Science/Research', 58 | 'Programming Language :: Python :: 3.6', 59 | ], 60 | packages=find_packages('.'), 61 | install_requires=install_requires, 62 | extras_require=extras_require, 63 | cmdclass=cmdclass, 64 | ) 65 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.Learner2D.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.Learner2D` 2 | ------------------------------ 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.Learner2D` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | import numpy as np 20 | from functools import partial 21 | 22 | Besides 1D functions, we can also learn 2D functions: 23 | :math:`\ f: ℝ^2 → ℝ`. 24 | 25 | .. jupyter-execute:: 26 | 27 | def ring(xy, wait=True): 28 | import numpy as np 29 | from time import sleep 30 | from random import random 31 | if wait: 32 | sleep(random()/10) 33 | x, y = xy 34 | a = 0.2 35 | return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4) 36 | 37 | learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) 38 | 39 | .. jupyter-execute:: 40 | 41 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) 42 | 43 | .. jupyter-execute:: 44 | :hide-code: 45 | 46 | await runner.task # This is not needed in a notebook environment! 47 | 48 | .. jupyter-execute:: 49 | 50 | runner.live_info() 51 | 52 | .. jupyter-execute:: 53 | 54 | def plot(learner): 55 | plot = learner.plot(tri_alpha=0.2) 56 | return (plot.Image + plot.EdgePaths.I + plot).cols(2) 57 | 58 | runner.live_plot(plotter=plot, update_interval=0.1) 59 | 60 | .. jupyter-execute:: 61 | 62 | %%opts EdgePaths (color='w') 63 | 64 | import itertools 65 | 66 | # Create a learner and add data on homogeneous grid, so that we can plot it 67 | learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) 68 | n = int(learner.npoints**0.5) 69 | xs, ys = [np.linspace(*bounds, n) for bounds in learner.bounds] 70 | xys = list(itertools.product(xs, ys)) 71 | learner2.tell_many(xys, map(partial(ring, wait=False), xys)) 72 | 73 | (learner2.plot(n).relabel('Homogeneous grid') + learner.plot().relabel('With adaptive') + 74 | learner2.plot(n, tri_alpha=0.4) + learner.plot(tri_alpha=0.4)).cols(2) 75 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.SKOptLearner.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.SKOptLearner` 2 | --------------------------------- 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.SKOptLearner` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | import holoviews as hv 20 | import numpy as np 21 | 22 | We have wrapped the ``Optimizer`` class from 23 | `scikit-optimize `__, 24 | to show how existing libraries can be integrated with ``adaptive``. 25 | 26 | The ``SKOptLearner`` attempts to “optimize” the given function ``g`` 27 | (i.e. find the global minimum of ``g`` in the window of interest). 28 | 29 | Here we use the same example as in the ``scikit-optimize`` 30 | `tutorial `__. 31 | Although ``SKOptLearner`` can optimize functions of arbitrary 32 | dimensionality, we can only plot the learner if a 1D function is being 33 | learned. 34 | 35 | .. jupyter-execute:: 36 | 37 | def F(x, noise_level=0.1): 38 | return (np.sin(5 * x) * (1 - np.tanh(x ** 2)) 39 | + np.random.randn() * noise_level) 40 | 41 | .. jupyter-execute:: 42 | 43 | learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)], 44 | base_estimator="GP", 45 | acq_func="gp_hedge", 46 | acq_optimizer="lbfgs", 47 | ) 48 | runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40) 49 | 50 | .. jupyter-execute:: 51 | :hide-code: 52 | 53 | await runner.task # This is not needed in a notebook environment! 54 | 55 | .. jupyter-execute:: 56 | 57 | runner.live_info() 58 | 59 | .. jupyter-execute:: 60 | 61 | %%opts Overlay [legend_position='top'] 62 | xs = np.linspace(*learner.space.bounds[0]) 63 | to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label='to learn') 64 | 65 | runner.live_plot().relabel('prediction', depth=2) * to_learn 66 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.DataSaver.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.DataSaver` 2 | ------------------------------ 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.DataSaver` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | If the function that you want to learn returns a value along with some 20 | metadata, you can wrap your learner in an `adaptive.DataSaver`. 21 | 22 | In the following example the function to be learned returns its result 23 | and the execution time in a dictionary: 24 | 25 | .. jupyter-execute:: 26 | 27 | from operator import itemgetter 28 | 29 | def f_dict(x): 30 | """The function evaluation takes roughly the time we `sleep`.""" 31 | import random 32 | from time import sleep 33 | 34 | waiting_time = random.random() 35 | sleep(waiting_time) 36 | a = 0.01 37 | y = x + a**2 / (a**2 + x**2) 38 | return {'y': y, 'waiting_time': waiting_time} 39 | 40 | # Create the learner with the function that returns a 'dict' 41 | # This learner cannot be run directly, as Learner1D does not know what to do with the 'dict' 42 | _learner = adaptive.Learner1D(f_dict, bounds=(-1, 1)) 43 | 44 | # Wrapping the learner with 'adaptive.DataSaver' and tell it which key it needs to learn 45 | learner = adaptive.DataSaver(_learner, arg_picker=itemgetter('y')) 46 | 47 | ``learner.learner`` is the original learner, so 48 | ``learner.learner.loss()`` will call the correct loss method. 49 | 50 | .. jupyter-execute:: 51 | 52 | runner = adaptive.Runner(learner, goal=lambda l: l.learner.loss() < 0.1) 53 | 54 | .. jupyter-execute:: 55 | :hide-code: 56 | 57 | await runner.task # This is not needed in a notebook environment! 58 | 59 | .. jupyter-execute:: 60 | 61 | runner.live_info() 62 | 63 | .. jupyter-execute:: 64 | 65 | runner.live_plot(plotter=lambda l: l.learner.plot(), update_interval=0.1) 66 | 67 | Now the ``DataSavingLearner`` will have an dictionary attribute 68 | ``extra_data`` that has ``x`` as key and the data that was returned by 69 | ``learner.function`` as values. 70 | 71 | .. jupyter-execute:: 72 | 73 | learner.extra_data 74 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Making a Adaptive release 2 | 3 | This document guides a contributor through creating a release of Adaptive. 4 | 5 | 6 | ## Preflight checks 7 | 8 | The following checks should be made *before* tagging the release. 9 | 10 | 11 | #### Check that all issues are resolved 12 | 13 | Check that all the issues and merge requests for the appropriate 14 | [milestone](https://gitlab.kwant-project.org/qt/adaptive/issues) 15 | have been resolved. Any unresolved issues should have their milestone 16 | bumped. 17 | 18 | 19 | #### Ensure that all tests pass 20 | 21 | For major and minor releases we will be tagging the ``master`` branch. 22 | This should be as simple as verifying that the 23 | [latest CI pipeline](https://gitlab.kwant-project.org/qt/adaptive/pipelines) 24 | succeeded. 25 | 26 | 27 | #### Verify that `AUTHORS.md` is up-to-date 28 | 29 | The following command shows the number of commits per author since the last 30 | annotated tag: 31 | ``` 32 | t=$(git describe --abbrev=0); echo Commits since $t; git shortlog -s $t.. 33 | ``` 34 | 35 | ## Make a release, but do not publish it yet 36 | 37 | 38 | ### Tag the release 39 | 40 | Make an **annotated, signed** tag for the release. The tag must have the name: 41 | ``` 42 | git tag -s v -m "version " 43 | ``` 44 | 45 | 46 | ### Build a source tarball and wheels and test it 47 | 48 | ``` 49 | rm -fr build dist 50 | python setup.py sdist bdist_wheel 51 | ``` 52 | 53 | This creates the file `dist/adaptive-.tar.gz`. It is a good idea to unpack it 54 | and check that the tests run: 55 | ``` 56 | tar xzf dist/adaptive*.tar.gz 57 | cd adaptive-* 58 | py.test . 59 | ``` 60 | 61 | ### Create an empty commit for new development and tag it 62 | ``` 63 | git commit --allow-empty -m 'start development towards v' 64 | git tag -am 'Start development towards v' v-dev 65 | ``` 66 | 67 | Where `` is `` with the minor version incremented 68 | (or major version incremented and minor and patch versions then reset to 0). 69 | This is necessary so that the reported version for any further commits is 70 | `-devX` and not `-devX`. 71 | 72 | 73 | ## Publish the release 74 | 75 | ### Push the tags 76 | ``` 77 | git push origin v v-dev 78 | ``` 79 | 80 | ### Upload to PyPI 81 | 82 | ``` 83 | twine upload dist/* 84 | ``` 85 | 86 | 87 | 88 | ## Update the [conda-forge recipe](https://github.com/conda-forge/adaptive-feedstock) 89 | 90 | * Fork the [feedstock repo](https://github.com/conda-forge/adaptive-feedstock) 91 | * Change the version number and sha256 in `recipe/meta.yaml` and commit to your fork 92 | * Open a [Pull Request](https://github.com/conda-forge/adaptive-feedstock/compare) 93 | * Type `@conda-forge-admin, please rerender` as a comment 94 | * When the tests succeed, merge 95 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.BalancingLearner.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.BalancingLearner` 2 | ------------------------------------- 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.BalancingLearner` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | import holoviews as hv 20 | import numpy as np 21 | from functools import partial 22 | import random 23 | 24 | The balancing learner is a “meta-learner” that takes a list of learners. 25 | When you request a point from the balancing learner, it will query all 26 | of its “children” to figure out which one will give the most 27 | improvement. 28 | 29 | The balancing learner can for example be used to implement a poor-man’s 30 | 2D learner by using the `~adaptive.Learner1D`. 31 | 32 | .. jupyter-execute:: 33 | 34 | def h(x, offset=0): 35 | a = 0.01 36 | return x + a**2 / (a**2 + (x - offset)**2) 37 | 38 | learners = [adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)), 39 | bounds=(-1, 1)) for i in range(10)] 40 | 41 | bal_learner = adaptive.BalancingLearner(learners) 42 | runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01) 43 | 44 | .. jupyter-execute:: 45 | :hide-code: 46 | 47 | await runner.task # This is not needed in a notebook environment! 48 | 49 | .. jupyter-execute:: 50 | 51 | runner.live_info() 52 | 53 | .. jupyter-execute:: 54 | 55 | plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners]) 56 | runner.live_plot(plotter=plotter, update_interval=0.1) 57 | 58 | Often one wants to create a set of ``learner``\ s for a cartesian 59 | product of parameters. For that particular case we’ve added a 60 | ``classmethod`` called `~adaptive.BalancingLearner.from_product`. 61 | See how it works below 62 | 63 | .. jupyter-execute:: 64 | 65 | from scipy.special import eval_jacobi 66 | 67 | def jacobi(x, n, alpha, beta): return eval_jacobi(n, alpha, beta, x) 68 | 69 | combos = { 70 | 'n': [1, 2, 4, 8], 71 | 'alpha': np.linspace(0, 2, 3), 72 | 'beta': np.linspace(0, 1, 5), 73 | } 74 | 75 | learner = adaptive.BalancingLearner.from_product( 76 | jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos) 77 | 78 | runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) 79 | 80 | # The `cdims` will automatically be set when using `from_product`, so 81 | # `plot()` will return a HoloMap with correctly labeled sliders. 82 | learner.plot().overlay('beta').grid().select(y=(-1, 3)) 83 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.IntegratorLearner.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.IntegratorLearner` 2 | -------------------------------------- 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.IntegratorLearner` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | import holoviews as hv 20 | import numpy as np 21 | 22 | This learner learns a 1D function and calculates the integral and error 23 | of the integral with it. It is based on Pedro Gonnet’s 24 | `implementation `__. 25 | 26 | Let’s try the following function with cusps (that is difficult to 27 | integrate): 28 | 29 | .. jupyter-execute:: 30 | 31 | def f24(x): 32 | return np.floor(np.exp(x)) 33 | 34 | xs = np.linspace(0, 3, 200) 35 | hv.Scatter((xs, [f24(x) for x in xs])) 36 | 37 | Just to prove that this really is a difficult to integrate function, 38 | let’s try a familiar function integrator `scipy.integrate.quad`, which 39 | will give us warnings that it encounters difficulties (if we run it 40 | in a notebook.) 41 | 42 | .. jupyter-execute:: 43 | 44 | import scipy.integrate 45 | scipy.integrate.quad(f24, 0, 3) 46 | 47 | We initialize a learner again and pass the bounds and relative tolerance 48 | we want to reach. Then in the `~adaptive.Runner` we pass 49 | ``goal=lambda l: l.done()`` where ``learner.done()`` is ``True`` when 50 | the relative tolerance has been reached. 51 | 52 | .. jupyter-execute:: 53 | 54 | from adaptive.runner import SequentialExecutor 55 | 56 | learner = adaptive.IntegratorLearner(f24, bounds=(0, 3), tol=1e-8) 57 | 58 | # We use a SequentialExecutor, which runs the function to be learned in 59 | # *this* process only. This means we don't pay 60 | # the overhead of evaluating the function in another process. 61 | runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.done()) 62 | 63 | .. jupyter-execute:: 64 | :hide-code: 65 | 66 | await runner.task # This is not needed in a notebook environment! 67 | 68 | .. jupyter-execute:: 69 | 70 | runner.live_info() 71 | 72 | Now we could do the live plotting again, but lets just wait untill the 73 | runner is done. 74 | 75 | .. jupyter-execute:: 76 | 77 | if not runner.task.done(): 78 | raise RuntimeError('Wait for the runner to finish before executing the cells below!') 79 | 80 | .. jupyter-execute:: 81 | 82 | print('The integral value is {} with the corresponding error of {}'.format(learner.igral, learner.err)) 83 | learner.plot() 84 | -------------------------------------------------------------------------------- /adaptive/tests/test_runner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import asyncio 4 | 5 | import pytest 6 | 7 | from adaptive.learner import Learner1D, Learner2D 8 | from adaptive.runner import (simple, BlockingRunner, AsyncRunner, 9 | SequentialExecutor, with_ipyparallel, with_distributed) 10 | 11 | 12 | def blocking_runner(learner, goal): 13 | BlockingRunner(learner, goal, executor=SequentialExecutor()) 14 | 15 | 16 | def async_runner(learner, goal): 17 | runner = AsyncRunner(learner, goal, executor=SequentialExecutor()) 18 | asyncio.get_event_loop().run_until_complete(runner.task) 19 | 20 | 21 | runners = [simple, blocking_runner, async_runner] 22 | 23 | 24 | def trivial_goal(learner): 25 | return learner.npoints > 10 26 | 27 | 28 | @pytest.mark.parametrize('runner', runners) 29 | def test_simple(runner): 30 | """Test that the runners actually run.""" 31 | 32 | def f(x): 33 | return x 34 | 35 | learner = Learner1D(f, (-1, 1)) 36 | runner(learner, lambda l: l.npoints > 10) 37 | assert len(learner.data) > 10 38 | 39 | 40 | @pytest.mark.parametrize('runner', runners) 41 | def test_nonconforming_output(runner): 42 | """Test that using a runner works with a 2D learner, even when the 43 | learned function outputs a 1-vector. This tests against the regression 44 | flagged in https://gitlab.kwant-project.org/qt/adaptive/issues/58. 45 | """ 46 | 47 | def f(x): 48 | return [0] 49 | 50 | runner(Learner2D(f, [(-1, 1), (-1, 1)]), trivial_goal) 51 | 52 | 53 | def test_aync_def_function(): 54 | 55 | async def f(x): 56 | return x 57 | 58 | learner = Learner1D(f, (-1, 1)) 59 | runner = AsyncRunner(learner, trivial_goal) 60 | asyncio.get_event_loop().run_until_complete(runner.task) 61 | 62 | 63 | ### Test with different executors 64 | 65 | @pytest.fixture(scope="session") 66 | def ipyparallel_executor(): 67 | from ipyparallel import Client 68 | import pexpect 69 | 70 | child = pexpect.spawn('ipcluster start -n 1') 71 | child.expect('Engines appear to have started successfully', timeout=35) 72 | yield Client() 73 | if not child.terminate(force=True): 74 | raise RuntimeError('Could not stop ipcluster') 75 | 76 | 77 | @pytest.fixture(scope="session") 78 | def dask_executor(): 79 | from distributed import LocalCluster, Client 80 | 81 | client = Client(n_workers=1) 82 | yield client 83 | client.close() 84 | 85 | 86 | def linear(x): 87 | return x 88 | 89 | 90 | def test_concurrent_futures_executor(): 91 | from concurrent.futures import ProcessPoolExecutor 92 | BlockingRunner(Learner1D(linear, (-1, 1)), trivial_goal, 93 | executor=ProcessPoolExecutor(max_workers=1)) 94 | 95 | 96 | @pytest.mark.skipif(not with_ipyparallel, reason='IPyparallel is not installed') 97 | def test_ipyparallel_executor(ipyparallel_executor): 98 | learner = Learner1D(linear, (-1, 1)) 99 | BlockingRunner(learner, trivial_goal, 100 | executor=ipyparallel_executor) 101 | assert learner.npoints > 0 102 | 103 | 104 | @pytest.mark.skipif(not with_distributed, reason='dask.distributed is not installed') 105 | def test_distributed_executor(dask_executor): 106 | learner = Learner1D(linear, (-1, 1)) 107 | BlockingRunner(learner, trivial_goal, 108 | executor=dask_executor) 109 | assert learner.npoints > 0 110 | -------------------------------------------------------------------------------- /adaptive/learner/data_saver.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from collections import OrderedDict 4 | import functools 5 | 6 | from adaptive.learner.base_learner import BaseLearner 7 | from adaptive.utils import copy_docstring_from 8 | 9 | 10 | class DataSaver: 11 | """Save extra data associated with the values that need to be learned. 12 | 13 | Parameters 14 | ---------- 15 | learner : `~adaptive.BaseLearner` instance 16 | The learner that needs to be wrapped. 17 | arg_picker : function 18 | Function that returns the argument that needs to be learned. 19 | 20 | Example 21 | ------- 22 | Imagine we have a function that returns a dictionary 23 | of the form: ``{'y': y, 'err_est': err_est}``. 24 | 25 | >>> from operator import itemgetter 26 | >>> _learner = Learner1D(f, bounds=(-1.0, 1.0)) 27 | >>> learner = DataSaver(_learner, arg_picker=itemgetter('y')) 28 | """ 29 | 30 | def __init__(self, learner, arg_picker): 31 | self.learner = learner 32 | self.extra_data = OrderedDict() 33 | self.function = learner.function 34 | self.arg_picker = arg_picker 35 | 36 | def __getattr__(self, attr): 37 | return getattr(self.learner, attr) 38 | 39 | @copy_docstring_from(BaseLearner.tell) 40 | def tell(self, x, result): 41 | y = self.arg_picker(result) 42 | self.extra_data[x] = result 43 | self.learner.tell(x, y) 44 | 45 | @copy_docstring_from(BaseLearner.tell_pending) 46 | def tell_pending(self, x): 47 | self.learner.tell_pending(x) 48 | 49 | def _get_data(self): 50 | return self.learner._get_data(), self.extra_data 51 | 52 | def _set_data(self, data): 53 | learner_data, self.extra_data = data 54 | self.learner._set_data(learner_data) 55 | 56 | @copy_docstring_from(BaseLearner.save) 57 | def save(self, fname, compress=True): 58 | # We copy this method because the 'DataSaver' is not a 59 | # subclass of the 'BaseLearner'. 60 | BaseLearner.save(self, fname, compress) 61 | 62 | @copy_docstring_from(BaseLearner.load) 63 | def load(self, fname, compress=True): 64 | # We copy this method because the 'DataSaver' is not a 65 | # subclass of the 'BaseLearner'. 66 | BaseLearner.load(self, fname, compress) 67 | 68 | 69 | def _ds(learner_type, arg_picker, *args, **kwargs): 70 | args = args[2:] # functools.partial passes the first 2 arguments in 'args'! 71 | return DataSaver(learner_type(*args, **kwargs), arg_picker) 72 | 73 | 74 | def make_datasaver(learner_type, arg_picker): 75 | """Create a `DataSaver` of a `learner_type` that can be instantiated 76 | with the `learner_type`'s key-word arguments. 77 | 78 | Parameters 79 | ---------- 80 | learner_type : `~adaptive.BaseLearner` type 81 | The learner type that needs to be wrapped. 82 | arg_picker : function 83 | Function that returns the argument that needs to be learned. 84 | 85 | Example 86 | ------- 87 | Imagine we have a function that returns a dictionary 88 | of the form: ``{'y': y, 'err_est': err_est}``. 89 | 90 | >>> from operator import itemgetter 91 | >>> DataSaver = make_datasaver(Learner1D, arg_picker=itemgetter('y')) 92 | >>> learner = DataSaver(function=f, bounds=(-1.0, 1.0)) 93 | 94 | Or when using `adaptive.BalancingLearner.from_product`: 95 | 96 | >>> learner_type = make_datasaver(adaptive.Learner1D, 97 | ... arg_picker=itemgetter('y')) 98 | >>> learner = adaptive.BalancingLearner.from_product( 99 | ... jacobi, learner_type, dict(bounds=(0, 1)), combos) 100 | """ 101 | return functools.partial(_ds, learner_type, arg_picker) 102 | -------------------------------------------------------------------------------- /adaptive/learner/skopt_learner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | from skopt import Optimizer 5 | 6 | from adaptive.learner.base_learner import BaseLearner 7 | from adaptive.notebook_integration import ensure_holoviews 8 | from adaptive.utils import cache_latest 9 | 10 | 11 | class SKOptLearner(Optimizer, BaseLearner): 12 | """Learn a function minimum using ``skopt.Optimizer``. 13 | 14 | This is an ``Optimizer`` from ``scikit-optimize``, 15 | with the necessary methods added to make it conform 16 | to the ``adaptive`` learner interface. 17 | 18 | Parameters 19 | ---------- 20 | function : callable 21 | The function to learn. 22 | **kwargs : 23 | Arguments to pass to ``skopt.Optimizer``. 24 | """ 25 | 26 | def __init__(self, function, **kwargs): 27 | self.function = function 28 | self.pending_points = set() 29 | super().__init__(**kwargs) 30 | 31 | def tell(self, x, y, fit=True): 32 | self.pending_points.discard(x) 33 | super().tell([x], y, fit) 34 | 35 | def tell_pending(self, x): 36 | # 'skopt.Optimizer' takes care of points we 37 | # have not got results for. 38 | self.pending_points.add(x) 39 | 40 | def remove_unfinished(self): 41 | pass 42 | 43 | @cache_latest 44 | def loss(self, real=True): 45 | if not self.models: 46 | return np.inf 47 | else: 48 | model = self.models[-1] 49 | # Return the in-sample error (i.e. test the model 50 | # with the training data). This is not the best 51 | # estimator of loss, but it is the cheapest. 52 | return 1 - model.score(self.Xi, self.yi) 53 | 54 | def ask(self, n, tell_pending=True): 55 | if not tell_pending: 56 | raise NotImplementedError('Asking points is an irreversible ' 57 | 'action, so use `ask(n, tell_pending=True`.') 58 | points = super().ask(n) 59 | # TODO: Choose a better estimate for the loss improvement. 60 | if self.space.n_dims > 1: 61 | return points, [self.loss() / n] * n 62 | else: 63 | return [p[0] for p in points], [self.loss() / n] * n 64 | 65 | @property 66 | def npoints(self): 67 | """Number of evaluated points.""" 68 | return len(self.Xi) 69 | 70 | def plot(self, nsamples=200): 71 | hv = ensure_holoviews() 72 | if self.space.n_dims > 1: 73 | raise ValueError('Can only plot 1D functions') 74 | bounds = self.space.bounds[0] 75 | if not self.Xi: 76 | p = hv.Scatter([]) * hv.Curve([]) * hv.Area([]) 77 | else: 78 | scatter = hv.Scatter(([p[0] for p in self.Xi], self.yi)) 79 | if self.models: 80 | model = self.models[-1] 81 | xs = np.linspace(*bounds, nsamples) 82 | xsp = self.space.transform(xs.reshape(-1, 1).tolist()) 83 | y_pred, sigma = model.predict(xsp, return_std=True) 84 | # Plot model prediction for function 85 | curve = hv.Curve( 86 | (xs, y_pred) 87 | ).opts(style=dict(line_dash='dashed')) 88 | # Plot 95% confidence interval as colored area around points 89 | area = hv.Area( 90 | (xs, y_pred - 1.96 * sigma, y_pred + 1.96 * sigma), 91 | vdims=['y', 'y2'], 92 | ).opts(style=dict(alpha=0.5, line_alpha=0)) 93 | 94 | else: 95 | area = hv.Area([]) 96 | curve = hv.Curve([]) 97 | p = scatter * curve * area 98 | 99 | # Plot with 5% empty margins such that the boundary points are visible 100 | margin = 0.05 * (bounds[1] - bounds[0]) 101 | plot_bounds = (bounds[0] - margin, bounds[1] + margin) 102 | 103 | return p.redim(x=dict(range=plot_bounds)) 104 | 105 | def _get_data(self): 106 | return [x[0] for x in self.Xi], self.yi 107 | 108 | def _set_data(self, data): 109 | xs, ys = data 110 | self.tell_many(xs, ys) 111 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.LearnerND.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.LearnerND` 2 | ------------------------------ 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.LearnerND` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | import holoviews as hv 20 | import numpy as np 21 | 22 | def dynamicmap_to_holomap(dm): 23 | # XXX: change when https://github.com/ioam/holoviews/issues/3085 24 | # is fixed. 25 | vals = {d.name: d.values for d in dm.dimensions() if d.values} 26 | return hv.HoloMap(dm.select(**vals)) 27 | 28 | Besides 1 and 2 dimensional functions, we can also learn N-D functions: 29 | :math:`\ f: ℝ^N → ℝ^M, N \ge 2, M \ge 1`. 30 | 31 | Do keep in mind the speed and 32 | `effectiveness `__ 33 | of the learner drops quickly with increasing number of dimensions. 34 | 35 | .. jupyter-execute:: 36 | 37 | # this step takes a lot of time, it will finish at about 3300 points, which can take up to 6 minutes 38 | def sphere(xyz): 39 | x, y, z = xyz 40 | a = 0.4 41 | return x + z**2 + np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4) 42 | 43 | learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) 44 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) 45 | 46 | .. jupyter-execute:: 47 | :hide-code: 48 | 49 | await runner.task # This is not needed in a notebook environment! 50 | 51 | .. jupyter-execute:: 52 | 53 | runner.live_info() 54 | 55 | Let’s plot 2D slices of the 3D function 56 | 57 | .. jupyter-execute:: 58 | 59 | def plot_cut(x, direction, learner=learner): 60 | cut_mapping = {'XYZ'.index(direction): x} 61 | return learner.plot_slice(cut_mapping, n=100) 62 | 63 | dm = hv.DynamicMap(plot_cut, kdims=['val', 'direction']) 64 | dm = dm.redim.values(val=np.linspace(-1, 1, 11), direction=list('XYZ')) 65 | 66 | # In a notebook one would run `dm` however we want a statically generated 67 | # html, so we use a HoloMap to display it here 68 | dynamicmap_to_holomap(dm) 69 | 70 | Or we can plot 1D slices 71 | 72 | .. jupyter-execute:: 73 | 74 | %%opts Path {+framewise} 75 | def plot_cut(x1, x2, directions, learner=learner): 76 | cut_mapping = {'xyz'.index(d): x for d, x in zip(directions, [x1, x2])} 77 | return learner.plot_slice(cut_mapping) 78 | 79 | dm = hv.DynamicMap(plot_cut, kdims=['v1', 'v2', 'directions']) 80 | dm = dm.redim.values(v1=np.linspace(-1, 1, 6), 81 | v2=np.linspace(-1, 1, 6), 82 | directions=['xy', 'xz', 'yz']) 83 | 84 | # In a notebook one would run `dm` however we want a statically generated 85 | # html, so we use a HoloMap to display it here 86 | dynamicmap_to_holomap(dm) 87 | 88 | The plots show some wobbles while the original function was smooth, this 89 | is a result of the fact that the learner chooses points in 3 dimensions 90 | and the simplices are not in the same face as we try to interpolate our 91 | lines. However, as always, when you sample more points the graph will 92 | become gradually smoother. 93 | 94 | Using any convex shape as domain 95 | ................................ 96 | 97 | Suppose you do not simply want to sample your function on a square (in 2D) or in 98 | a cube (in 3D). The LearnerND supports using a `scipy.spatial.ConvexHull` as 99 | your domain. This is best illustrated in the following example. 100 | 101 | Suppose you would like to sample you function in a cube split in half diagonally. 102 | You could use the following code as an example: 103 | 104 | .. jupyter-execute:: 105 | 106 | import scipy 107 | 108 | def f(xyz): 109 | x, y, z = xyz 110 | return x**4 + y**4 + z**4 - (x**2+y**2+z**2)**2 111 | 112 | # set the bound points, you can change this to be any shape 113 | b = [(-1, -1, -1), 114 | (-1, 1, -1), 115 | (-1, -1, 1), 116 | (-1, 1, 1), 117 | ( 1, 1, -1), 118 | ( 1, -1, -1)] 119 | 120 | # you have to convert the points into a scipy.spatial.ConvexHull 121 | hull = scipy.spatial.ConvexHull(b) 122 | 123 | learner = adaptive.LearnerND(f, hull) 124 | adaptive.BlockingRunner(learner, goal=lambda l: l.npoints > 2000) 125 | 126 | learner.plot_isosurface(-0.5) 127 | -------------------------------------------------------------------------------- /adaptive/learner/base_learner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import abc 4 | from contextlib import suppress 5 | from copy import deepcopy 6 | 7 | from adaptive.utils import save, load 8 | 9 | 10 | class BaseLearner(metaclass=abc.ABCMeta): 11 | """Base class for algorithms for learning a function 'f: X → Y'. 12 | 13 | Attributes 14 | ---------- 15 | function : callable: X → Y 16 | The function to learn. 17 | data : dict: X → Y 18 | `function` evaluated at certain points. 19 | The values can be 'None', which indicates that the point 20 | will be evaluated, but that we do not have the result yet. 21 | npoints : int, optional 22 | The number of evaluated points that have been added to the learner. 23 | Subclasses do not *have* to implement this attribute. 24 | pending_points : set, optional 25 | Points that have been requested but have not been evaluated yet. 26 | Subclasses do not *have* to implement this attribute. 27 | 28 | Notes 29 | ----- 30 | Subclasses may define a ``plot`` method that takes no parameters 31 | and returns a holoviews plot. 32 | """ 33 | 34 | def tell(self, x, y): 35 | """Tell the learner about a single value. 36 | 37 | Parameters 38 | ---------- 39 | x : A value from the function domain 40 | y : A value from the function image 41 | """ 42 | self.tell_many([x], [y]) 43 | 44 | def tell_many(self, xs, ys): 45 | """Tell the learner about some values. 46 | 47 | Parameters 48 | ---------- 49 | xs : Iterable of values from the function domain 50 | ys : Iterable of values from the function image 51 | """ 52 | for x, y in zip(xs, ys): 53 | self.tell(x, y) 54 | 55 | @abc.abstractmethod 56 | def tell_pending(self, x): 57 | """Tell the learner that 'x' has been requested such 58 | that it's not suggested again.""" 59 | pass 60 | 61 | @abc.abstractmethod 62 | def remove_unfinished(self): 63 | """Remove uncomputed data from the learner.""" 64 | pass 65 | 66 | @abc.abstractmethod 67 | def loss(self, real=True): 68 | """Return the loss for the current state of the learner. 69 | 70 | Parameters 71 | ---------- 72 | real : bool, default: True 73 | If False, return the "expected" loss, i.e. the 74 | loss including the as-yet unevaluated points 75 | (possibly by interpolation). 76 | """ 77 | 78 | @abc.abstractmethod 79 | def ask(self, n, tell_pending=True): 80 | """Choose the next 'n' points to evaluate. 81 | 82 | Parameters 83 | ---------- 84 | n : int 85 | The number of points to choose. 86 | tell_pending : bool, default: True 87 | If True, add the chosen points to this learner's 88 | `pending_points`. Set this to False if you do not 89 | want to modify the state of the learner. 90 | """ 91 | pass 92 | 93 | @abc.abstractmethod 94 | def _get_data(self): 95 | pass 96 | 97 | @abc.abstractmethod 98 | def _set_data(self): 99 | pass 100 | 101 | def copy_from(self, other): 102 | """Copy over the data from another learner. 103 | 104 | Parameters 105 | ---------- 106 | other : BaseLearner object 107 | The learner from which the data is copied. 108 | """ 109 | self._set_data(other._get_data()) 110 | 111 | def save(self, fname, compress=True): 112 | """Save the data of the learner into a pickle file. 113 | 114 | Parameters 115 | ---------- 116 | fname : str 117 | The filename into which to save the learner's data. 118 | compress : bool, default True 119 | Compress the data upon saving using 'gzip'. When saving 120 | using compression, one must load it with compression too. 121 | """ 122 | data = self._get_data() 123 | save(fname, data, compress) 124 | 125 | def load(self, fname, compress=True): 126 | """Load the data of a learner from a pickle file. 127 | 128 | Parameters 129 | ---------- 130 | fname : str 131 | The filename from which to load the learner's data. 132 | compress : bool, default True 133 | If the data is compressed when saved, one must load it 134 | with compression too. 135 | """ 136 | with suppress(FileNotFoundError, EOFError): 137 | data = load(fname, compress) 138 | self._set_data(data) 139 | 140 | def __getstate__(self): 141 | return deepcopy(self.__dict__) 142 | 143 | def __setstate__(self, state): 144 | self.__dict__ = state 145 | -------------------------------------------------------------------------------- /adaptive/learner/average_learner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from math import sqrt 4 | 5 | import numpy as np 6 | 7 | from adaptive.learner.base_learner import BaseLearner 8 | from adaptive.notebook_integration import ensure_holoviews 9 | from adaptive.utils import cache_latest 10 | 11 | 12 | class AverageLearner(BaseLearner): 13 | """A naive implementation of adaptive computing of averages. 14 | 15 | The learned function must depend on an integer input variable that 16 | represents the source of randomness. 17 | 18 | Parameters 19 | ---------- 20 | atol : float 21 | Desired absolute tolerance. 22 | rtol : float 23 | Desired relative tolerance. 24 | 25 | Attributes 26 | ---------- 27 | data : dict 28 | Sampled points and values. 29 | pending_points : set 30 | Points that still have to be evaluated. 31 | npoints : int 32 | Number of evaluated points. 33 | """ 34 | 35 | def __init__(self, function, atol=None, rtol=None): 36 | if atol is None and rtol is None: 37 | raise Exception('At least one of `atol` and `rtol` should be set.') 38 | if atol is None: 39 | atol = np.inf 40 | if rtol is None: 41 | rtol = np.inf 42 | 43 | self.data = {} 44 | self.pending_points = set() 45 | self.function = function 46 | self.atol = atol 47 | self.rtol = rtol 48 | self.npoints = 0 49 | self.sum_f = 0 50 | self.sum_f_sq = 0 51 | 52 | @property 53 | def n_requested(self): 54 | return self.npoints + len(self.pending_points) 55 | 56 | def ask(self, n, tell_pending=True): 57 | points = list(range(self.n_requested, self.n_requested + n)) 58 | 59 | if any(p in self.data or p in self.pending_points for p in points): 60 | # This means some of the points `< self.n_requested` do not exist. 61 | points = list(set(range(self.n_requested + n)) 62 | - set(self.data) 63 | - set(self.pending_points))[:n] 64 | 65 | loss_improvements = [self._loss_improvement(n) / n] * n 66 | if tell_pending: 67 | for p in points: 68 | self.tell_pending(p) 69 | return points, loss_improvements 70 | 71 | def tell(self, n, value): 72 | if n in self.data: 73 | # The point has already been added before. 74 | return 75 | 76 | self.data[n] = value 77 | self.pending_points.discard(n) 78 | self.sum_f += value 79 | self.sum_f_sq += value**2 80 | self.npoints += 1 81 | 82 | def tell_pending(self, n): 83 | self.pending_points.add(n) 84 | 85 | @property 86 | def mean(self): 87 | """The average of all values in `data`.""" 88 | return self.sum_f / self.npoints 89 | 90 | @property 91 | def std(self): 92 | """The corrected sample standard deviation of the values 93 | in `data`.""" 94 | n = self.npoints 95 | if n < 2: 96 | return np.inf 97 | numerator = self.sum_f_sq - n * self.mean**2 98 | if numerator < 0: 99 | # in this case the numerator ~ -1e-15 100 | return 0 101 | return sqrt(numerator / (n - 1)) 102 | 103 | @cache_latest 104 | def loss(self, real=True, *, n=None): 105 | if n is None: 106 | n = self.npoints if real else self.n_requested 107 | else: 108 | n = n 109 | if n < 2: 110 | return np.inf 111 | standard_error = self.std / sqrt(n) 112 | return max(standard_error / self.atol, 113 | standard_error / abs(self.mean) / self.rtol) 114 | 115 | def _loss_improvement(self, n): 116 | loss = self.loss() 117 | if np.isfinite(loss): 118 | return loss - self.loss(n=self.npoints + n) 119 | else: 120 | return np.inf 121 | 122 | def remove_unfinished(self): 123 | """Remove uncomputed data from the learner.""" 124 | self.pending_points = set() 125 | 126 | def plot(self): 127 | """Returns a histogram of the evaluated data. 128 | 129 | Returns 130 | ------- 131 | holoviews.element.Histogram 132 | A histogram of the evaluated data.""" 133 | hv = ensure_holoviews() 134 | vals = [v for v in self.data.values() if v is not None] 135 | if not vals: 136 | return hv.Histogram([[], []]) 137 | num_bins = int(max(5, sqrt(self.npoints))) 138 | vals = hv.Points(vals) 139 | return hv.operation.histogram(vals, num_bins=num_bins, dimension=1) 140 | 141 | def _get_data(self): 142 | return (self.data, self.npoints, self.sum_f, self.sum_f_sq) 143 | 144 | def _set_data(self, data): 145 | self.data, self.npoints, self.sum_f, self.sum_f_sq = data 146 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | import os 16 | import sys 17 | sys.path.insert(0, os.path.abspath('../..')) 18 | 19 | import adaptive 20 | import sphinx_fontawesome 21 | 22 | # -- Project information ----------------------------------------------------- 23 | 24 | project = 'adaptive' 25 | copyright = '2018, Adaptive Authors' 26 | author = 'Adaptive Authors' 27 | 28 | # The short X.Y version 29 | version = adaptive.__version__ 30 | # The full version, including alpha/beta/rc tags 31 | release = adaptive.__version__ 32 | 33 | 34 | # -- General configuration --------------------------------------------------- 35 | 36 | # If your documentation needs a minimal Sphinx version, state it here. 37 | # 38 | # needs_sphinx = '1.0' 39 | 40 | # Add any Sphinx extension module names here, as strings. They can be 41 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 42 | # ones. 43 | extensions = [ 44 | 'sphinx.ext.autodoc', 45 | 'sphinx.ext.autosummary', 46 | 'sphinx.ext.autosectionlabel', 47 | 'sphinx.ext.intersphinx', 48 | 'sphinx.ext.mathjax', 49 | 'sphinx.ext.viewcode', 50 | 'sphinx.ext.napoleon', 51 | 'jupyter_sphinx.execute', 52 | 'sphinx_fontawesome', 53 | 'm2r', 54 | ] 55 | 56 | source_parsers = {} 57 | 58 | # Add any paths that contain templates here, relative to this directory. 59 | templates_path = ['_templates'] 60 | 61 | # The suffix(es) of source filenames. 62 | # You can specify multiple suffix as a list of string: 63 | # 64 | source_suffix = ['.rst', '.md'] 65 | #source_suffix = '.rst' 66 | 67 | # The master toctree document. 68 | master_doc = 'index' 69 | 70 | # The language for content autogenerated by Sphinx. Refer to documentation 71 | # for a list of supported languages. 72 | # 73 | # This is also used if you do content translation via gettext catalogs. 74 | # Usually you set "language" from the command line for these cases. 75 | language = None 76 | 77 | # List of patterns, relative to source directory, that match files and 78 | # directories to ignore when looking for source files. 79 | # This pattern also affects html_static_path and html_extra_path . 80 | exclude_patterns = [] 81 | 82 | # The name of the Pygments (syntax highlighting) style to use. 83 | pygments_style = 'sphinx' 84 | 85 | 86 | # -- Options for HTML output ------------------------------------------------- 87 | 88 | # The theme to use for HTML and HTML Help pages. See the documentation for 89 | # a list of builtin themes. 90 | # 91 | html_theme = 'sphinx_rtd_theme' 92 | 93 | 94 | # Theme options are theme-specific and customize the look and feel of a theme 95 | # further. For a list of options available for each theme, see the 96 | # documentation. 97 | # 98 | # html_theme_options = {} 99 | 100 | # Add any paths that contain custom static files (such as style sheets) here, 101 | # relative to this directory. They are copied after the builtin static files, 102 | # so a file named "default.css" will overwrite the builtin "default.css". 103 | html_static_path = ['_static'] 104 | 105 | # Custom sidebar templates, must be a dictionary that maps document names 106 | # to template names. 107 | # 108 | # The default sidebars (for documents that don't match any pattern) are 109 | # defined by theme itself. Builtin themes are using these templates by 110 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 111 | # 'searchbox.html']``. 112 | # 113 | # html_sidebars = {} 114 | 115 | 116 | # -- Options for HTMLHelp output --------------------------------------------- 117 | 118 | # Output file base name for HTML help builder. 119 | htmlhelp_basename = 'adaptivedoc' 120 | 121 | 122 | # -- Extension configuration ------------------------------------------------- 123 | 124 | default_role = 'autolink' 125 | 126 | intersphinx_mapping = { 127 | 'python': ('https://docs.python.org/3', None), 128 | 'distributed': ('https://distributed.readthedocs.io/en/stable/', None), 129 | 'holoviews': ('https://holoviews.org/', None), 130 | 'ipyparallel': ('https://ipyparallel.readthedocs.io/en/stable/', None), 131 | 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None), 132 | } 133 | 134 | 135 | def get_holoviews_js_css(): 136 | from holoviews.plotting import Renderer 137 | dependencies = {**Renderer.core_dependencies, 138 | **Renderer.extra_dependencies} 139 | required = ['jQuery', 'jQueryUI', 'underscore', 'require'] 140 | js = [url for name in required for url in dependencies[name].get('js', [])] 141 | css = [url for name in required for url in dependencies[name].get('css', [])] 142 | return js, css 143 | 144 | 145 | js, css = get_holoviews_js_css() 146 | html_context = {'holoviews_js_files': js} 147 | 148 | 149 | def remove_jquery_and_underscore(app): 150 | # We need to remove the jquery and underscore file that are 151 | # added by default because we already add it in the tag. 152 | remove = lambda x: not any(js in x for js in ['jquery', 'underscore']) 153 | if hasattr(app.builder, 'script_files'): 154 | app.builder.script_files = [x for x in app.builder.script_files 155 | if remove(x)] 156 | 157 | 158 | def setup(app): 159 | for url in css: 160 | app.add_stylesheet(url) 161 | 162 | app.add_stylesheet('custom.css') # For the `live_info` widget 163 | app.connect('builder-inited', remove_jquery_and_underscore) 164 | -------------------------------------------------------------------------------- /docs/source/docs.rst: -------------------------------------------------------------------------------- 1 | Implemented algorithms 2 | ---------------------- 3 | 4 | The core concept in ``adaptive`` is that of a *learner*. A *learner* 5 | samples a function at the best places in its parameter space to get 6 | maximum “information” about the function. As it evaluates the function 7 | at more and more points in the parameter space, it gets a better idea of 8 | where the best places are to sample next. 9 | 10 | Of course, what qualifies as the “best places” will depend on your 11 | application domain! ``adaptive`` makes some reasonable default choices, 12 | but the details of the adaptive sampling are completely customizable. 13 | 14 | The following learners are implemented: 15 | 16 | - `~adaptive.Learner1D`, for 1D functions ``f: ℝ → ℝ^N``, 17 | - `~adaptive.Learner2D`, for 2D functions ``f: ℝ^2 → ℝ^N``, 18 | - `~adaptive.LearnerND`, for ND functions ``f: ℝ^N → ℝ^M``, 19 | - `~adaptive.AverageLearner`, For stochastic functions where you want to 20 | average the result over many evaluations, 21 | - `~adaptive.IntegratorLearner`, for 22 | when you want to intergrate a 1D function ``f: ℝ → ℝ``. 23 | 24 | Meta-learners (to be used with other learners): 25 | 26 | - `~adaptive.BalancingLearner`, for when you want to run several learners at once, 27 | selecting the “best” one each time you get more points, 28 | - `~adaptive.DataSaver`, for when your function doesn't just return a scalar or a vector. 29 | 30 | In addition to the learners, ``adaptive`` also provides primitives for 31 | running the sampling across several cores and even several machines, 32 | with built-in support for 33 | `concurrent.futures `_, 34 | `ipyparallel `_ and 35 | `distributed `_. 36 | 37 | Examples 38 | -------- 39 | 40 | Here are some examples of how Adaptive samples vs. homogeneous sampling. Click 41 | on the *Play* :fa:`play` button or move the sliders. 42 | 43 | .. jupyter-execute:: 44 | :hide-code: 45 | 46 | import itertools 47 | import adaptive 48 | from adaptive.learner.learner1D import uniform_loss, default_loss 49 | import holoviews as hv 50 | import numpy as np 51 | adaptive.notebook_extension() 52 | %output holomap='scrubber' 53 | 54 | `adaptive.Learner1D` 55 | ~~~~~~~~~~~~~~~~~~~~ 56 | 57 | .. jupyter-execute:: 58 | :hide-code: 59 | 60 | %%opts Layout [toolbar=None] 61 | def f(x, offset=0.07357338543088588): 62 | a = 0.01 63 | return x + a**2 / (a**2 + (x - offset)**2) 64 | 65 | def plot_loss_interval(learner): 66 | if learner.npoints >= 2: 67 | x_0, x_1 = max(learner.losses, key=learner.losses.get) 68 | y_0, y_1 = learner.data[x_0], learner.data[x_1] 69 | x, y = [x_0, x_1], [y_0, y_1] 70 | else: 71 | x, y = [], [] 72 | return hv.Scatter((x, y)).opts(style=dict(size=6, color='r')) 73 | 74 | def plot(learner, npoints): 75 | adaptive.runner.simple(learner, lambda l: l.npoints == npoints) 76 | return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1] 77 | 78 | def get_hm(loss_per_interval, N=101): 79 | learner = adaptive.Learner1D(f, bounds=(-1, 1), 80 | loss_per_interval=loss_per_interval) 81 | plots = {n: plot(learner, n) for n in range(N)} 82 | return hv.HoloMap(plots, kdims=['npoints']) 83 | 84 | (get_hm(uniform_loss).relabel('homogeneous samping') 85 | + get_hm(default_loss).relabel('with adaptive')) 86 | 87 | `adaptive.Learner2D` 88 | ~~~~~~~~~~~~~~~~~~~~ 89 | 90 | .. jupyter-execute:: 91 | :hide-code: 92 | 93 | def ring(xy): 94 | import numpy as np 95 | x, y = xy 96 | a = 0.2 97 | return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4) 98 | 99 | def plot(learner, npoints): 100 | adaptive.runner.simple(learner, lambda l: l.npoints == npoints) 101 | learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) 102 | xs = ys = np.linspace(*learner.bounds[0], learner.npoints**0.5) 103 | xys = list(itertools.product(xs, ys)) 104 | learner2.tell_many(xys, map(ring, xys)) 105 | return (learner2.plot().relabel('homogeneous grid') 106 | + learner.plot().relabel('with adaptive') 107 | + learner2.plot(tri_alpha=0.5).relabel('homogeneous sampling') 108 | + learner.plot(tri_alpha=0.5).relabel('with adaptive')).cols(2) 109 | 110 | learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) 111 | plots = {n: plot(learner, n) for n in range(4, 1010, 20)} 112 | hv.HoloMap(plots, kdims=['npoints']).collate() 113 | 114 | `adaptive.AverageLearner` 115 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 116 | 117 | .. jupyter-execute:: 118 | :hide-code: 119 | 120 | def g(n): 121 | import random 122 | random.seed(n) 123 | val = random.gauss(0.5, 0.5) 124 | return val 125 | 126 | learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) 127 | 128 | def plot(learner, npoints): 129 | adaptive.runner.simple(learner, lambda l: l.npoints == npoints) 130 | return learner.plot().relabel(f'loss={learner.loss():.2f}') 131 | 132 | plots = {n: plot(learner, n) for n in range(10, 10000, 200)} 133 | hv.HoloMap(plots, kdims=['npoints']) 134 | 135 | `adaptive.LearnerND` 136 | ~~~~~~~~~~~~~~~~~~~~ 137 | 138 | .. jupyter-execute:: 139 | :hide-code: 140 | 141 | def sphere(xyz): 142 | import numpy as np 143 | x, y, z = xyz 144 | a = 0.4 145 | return np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4) 146 | 147 | learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) 148 | adaptive.runner.simple(learner, lambda l: l.npoints == 3000) 149 | 150 | learner.plot_3D() 151 | 152 | see more in the :ref:`Tutorial Adaptive`. 153 | 154 | .. include:: ../../README.rst 155 | :start-after: not-in-documentation-end 156 | :end-before: credits-end 157 | 158 | .. mdinclude:: ../../AUTHORS.md 159 | 160 | .. include:: ../../README.rst 161 | :start-after: credits-end 162 | :end-before: references-start 163 | -------------------------------------------------------------------------------- /adaptive/learner/integrator_coeffs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Based on an adaptive quadrature algorithm by Pedro Gonnet 3 | 4 | from fractions import Fraction 5 | from collections import defaultdict 6 | import numpy as np 7 | import scipy.linalg 8 | 9 | 10 | def legendre(n): 11 | """Return the first n Legendre polynomials. 12 | 13 | The polynomials have *standard* normalization, i.e. 14 | int_{-1}^1 dx L_n(x) L_m(x) = delta(m, n) * 2 / (2 * n + 1). 15 | 16 | The return value is a list of list of fraction.Fraction instances. 17 | """ 18 | result = [[Fraction(1)], [Fraction(0), Fraction(1)]] 19 | if n <= 2: 20 | return result[:n] 21 | for i in range(2, n): 22 | # Use Bonnet's recursion formula. 23 | new = (i + 1) * [Fraction(0)] 24 | new[1:] = (r * (2*i - 1) for r in result[-1]) 25 | new[:-2] = (n - r * (i - 1) for n, r in zip(new[:-2], result[-2])) 26 | new[:] = (n / i for n in new) 27 | result.append(new) 28 | return result 29 | 30 | 31 | def newton(n): 32 | """Compute the monomial coefficients of the Newton polynomial over the 33 | nodes of the n-point Clenshaw-Curtis quadrature rule. 34 | """ 35 | # The nodes of the Clenshaw-Curtis rule are x_i = -cos(i * Pi / (n-1)). 36 | # Here, we calculate the coefficients c_i such that sum_i c_i * x^i 37 | # = prod_i (x - x_i). The coefficients are thus sums of products of 38 | # cosines. 39 | # 40 | # This routine uses the relation 41 | # cos(a) cos(b) = (cos(a + b) + cos(a - b)) / 2 42 | # to efficiently calculate the coefficients. 43 | # 44 | # The dictionary 'terms' descibes the terms that make up the 45 | # monomial coefficients. Each item ((d, a), m) corresponds to a 46 | # term m * cos(a * Pi / n) to be added to prefactor of the 47 | # monomial x^(n-d). 48 | 49 | mod = 2 * (n-1) 50 | terms = defaultdict(int) 51 | terms[0, 0] += 1 52 | 53 | for i in range(n): 54 | newterms = [] 55 | for (d, a), m in terms.items(): 56 | for b in [i, -i]: 57 | # In order to reduce the number of terms, cosine 58 | # arguments are mapped back to the inteval [0, pi/2). 59 | arg = (a + b) % mod 60 | if arg > n-1: 61 | arg = mod - arg 62 | if arg >= n // 2: 63 | if n % 2 and arg == n // 2: 64 | # Zero term: ignore 65 | continue 66 | newterms.append((d + 1, n - 1 - arg, -m)) 67 | else: 68 | newterms.append((d + 1, arg, m)) 69 | for d, s, m in newterms: 70 | terms[d, s] += m 71 | 72 | c = (n + 1) * [0] 73 | for (d, a), m in terms.items(): 74 | if m and a != 0: 75 | raise ValueError("Newton polynomial cannot be represented exactly.") 76 | c[n - d] += m 77 | # The check could be removed and the above line replaced by 78 | # the following, but then the result would be no longer exact. 79 | # c[n - d] += m * np.cos(a * np.pi / (n - 1)) 80 | 81 | cf = np.array(c, float) 82 | assert all(int(cfe) == ce for cfe, ce in zip(cf, c)), 'Precision loss' 83 | 84 | cf /= 2.**np.arange(n, -1, -1) 85 | return cf 86 | 87 | 88 | def scalar_product(a, b): 89 | """Compute the polynomial scalar product int_-1^1 dx a(x) b(x). 90 | 91 | The args must be sequences of polynomial coefficients. This 92 | function is careful to use the input data type for calculations. 93 | """ 94 | la = len(a) 95 | lc = len(b) + la + 1 96 | 97 | # Compute the even coefficients of the product of a and b. 98 | c = lc * [a[0].__class__()] 99 | for i, bi in enumerate(b): 100 | if bi == 0: 101 | continue 102 | for j in range(i % 2, la, 2): 103 | c[i + j] += a[j] * bi 104 | 105 | # Calculate the definite integral from -1 to 1. 106 | return 2 * sum(c[i] / (i + 1) for i in range(0, lc, 2)) 107 | 108 | 109 | def calc_bdef(ns): 110 | """Calculate the decompositions of Newton polynomials (over the nodes 111 | of the n-point Clenshaw-Curtis quadrature rule) in terms of 112 | Legandre polynomials. 113 | 114 | The parameter 'ns' is a sequence of numers of points of the 115 | quadrature rule. The return value is a corresponding sequence of 116 | normalized Legendre polynomial coefficients. 117 | """ 118 | legs = legendre(max(ns) + 1) 119 | result = [] 120 | for n in ns: 121 | poly = [] 122 | a = list(map(Fraction, newton(n))) 123 | for b in legs[:n + 1]: 124 | igral = scalar_product(a, b) 125 | 126 | # Normalize & store. (The polynomials returned by 127 | # legendre() have standard normalization that is not 128 | # orthonormal.) 129 | poly.append(np.sqrt((2*len(b) - 1) / 2) * igral) 130 | 131 | result.append(np.array(poly)) 132 | return result 133 | 134 | 135 | def calc_V(x, n): 136 | V = [np.ones(x.shape), x.copy()] 137 | for i in range(2, n): 138 | V.append((2*i-1) / i * x * V[-1] - (i-1) / i * V[-2]) 139 | for i in range(n): 140 | V[i] *= np.sqrt(i + 0.5) 141 | return np.array(V).T 142 | 143 | 144 | eps = np.spacing(1) 145 | 146 | # the nodes and Newton polynomials 147 | ns = (5, 9, 17, 33) 148 | xi = [-np.cos(np.linspace(0, np.pi, n)) for n in ns] 149 | 150 | # Make `xi` perfectly anti-symmetric, important for splitting the intervals 151 | xi = [(row - row[::-1]) / 2 for row in xi] 152 | 153 | # Compute the Vandermonde-like matrix and its inverse. 154 | V = [calc_V(x, n) for x, n in zip(xi, ns)] 155 | V_inv = list(map(scipy.linalg.inv, V)) 156 | Vcond = [scipy.linalg.norm(a, 2) * scipy.linalg.norm(b, 2) for a, b in zip(V, V_inv)] 157 | 158 | # Compute the shift matrices. 159 | T_left, T_right = [V_inv[3] @ calc_V((xi[3] + a) / 2, ns[3]) for a in [-1, 1]] 160 | 161 | # If the relative difference between two consecutive approximations is 162 | # lower than this value, the error estimate is considered reliable. 163 | # See section 6.2 of Pedro Gonnet's thesis. 164 | hint = 0.1 165 | 166 | # Smallest acceptable relative difference of points in a rule. This was chosen 167 | # such that no artifacts are apparent in plots of (i, log(a_i)), where a_i is 168 | # the sequence of estimates of the integral value of an interval and all its 169 | # ancestors.. 170 | min_sep = 16 * eps 171 | 172 | ndiv_max = 20 173 | 174 | # set-up the downdate matrix 175 | k = np.arange(ns[3]) 176 | alpha = np.sqrt((k+1)**2 / (2*k+1) / (2*k+3)) 177 | gamma = np.concatenate([[0, 0], np.sqrt(k[2:]**2 / (4*k[2:]**2-1))]) 178 | 179 | b_def = calc_bdef(ns) 180 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.custom_loss.rst: -------------------------------------------------------------------------------- 1 | Custom adaptive logic for 1D and 2D 2 | ----------------------------------- 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.custom-loss` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | # Import modules that are used in multiple cells 20 | import numpy as np 21 | from functools import partial 22 | 23 | 24 | `~adaptive.Learner1D` and `~adaptive.Learner2D` both work on the principle of 25 | subdividing their domain into subdomains, and assigning a property to 26 | each subdomain, which we call the *loss*. The algorithm for choosing the 27 | best place to evaluate our function is then simply *take the subdomain 28 | with the largest loss and add a point in the center, creating new 29 | subdomains around this point*. 30 | 31 | The *loss function* that defines the loss per subdomain is the canonical 32 | place to define what regions of the domain are “interesting”. The 33 | default loss function for `~adaptive.Learner1D` and `~adaptive.Learner2D` is sufficient 34 | for a wide range of common cases, but it is by no means a panacea. For 35 | example, the default loss function will tend to get stuck on 36 | divergences. 37 | 38 | Both the `~adaptive.Learner1D` and `~adaptive.Learner2D` allow you to specify a *custom 39 | loss function*. Below we illustrate how you would go about writing your 40 | own loss function. The documentation for `~adaptive.Learner1D` and `~adaptive.Learner2D` 41 | specifies the signature that your loss function needs to have in order 42 | for it to work with ``adaptive``. 43 | 44 | tl;dr, one can use the following *loss functions* that 45 | **we** already implemented: 46 | 47 | + `adaptive.learner.learner1D.default_loss` 48 | + `adaptive.learner.learner1D.uniform_loss` 49 | + `adaptive.learner.learner1D.curvature_loss_function` 50 | + `adaptive.learner.learner2D.default_loss` 51 | + `adaptive.learner.learner2D.uniform_loss` 52 | + `adaptive.learner.learner2D.minimize_triangle_surface_loss` 53 | + `adaptive.learner.learner2D.resolution_loss_function` 54 | 55 | Whenever a loss function has `_function` appended to its name, it is a factory function 56 | that returns the loss function with certain settings. 57 | 58 | Uniform sampling 59 | ~~~~~~~~~~~~~~~~ 60 | 61 | Say we want to properly sample a function that contains divergences. A 62 | simple (but naive) strategy is to *uniformly* sample the domain: 63 | 64 | .. jupyter-execute:: 65 | 66 | def uniform_sampling_1d(xs, ys): 67 | dx = xs[1] - xs[0] 68 | return dx 69 | 70 | def f_divergent_1d(x): 71 | return 1 / x**2 72 | 73 | learner = adaptive.Learner1D(f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d) 74 | runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) 75 | learner.plot().select(y=(0, 10000)) 76 | 77 | .. jupyter-execute:: 78 | 79 | %%opts EdgePaths (color='w') Image [logz=True colorbar=True] 80 | 81 | from adaptive.runner import SequentialExecutor 82 | 83 | def uniform_sampling_2d(ip): 84 | from adaptive.learner.learner2D import areas 85 | A = areas(ip) 86 | return np.sqrt(A) 87 | 88 | def f_divergent_2d(xy): 89 | x, y = xy 90 | return 1 / (x**2 + y**2) 91 | 92 | learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=uniform_sampling_2d) 93 | 94 | # this takes a while, so use the async Runner so we know *something* is happening 95 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.02) 96 | 97 | .. jupyter-execute:: 98 | :hide-code: 99 | 100 | await runner.task # This is not needed in a notebook environment! 101 | 102 | .. jupyter-execute:: 103 | 104 | runner.live_info() 105 | 106 | .. jupyter-execute:: 107 | 108 | plotter = lambda l: l.plot(tri_alpha=0.3).relabel( 109 | '1 / (x^2 + y^2) in log scale') 110 | runner.live_plot(update_interval=0.2, plotter=plotter) 111 | 112 | The uniform sampling strategy is a common case to benchmark against, so 113 | the 1D and 2D versions are included in ``adaptive`` as 114 | `adaptive.learner.learner1D.uniform_loss` and 115 | `adaptive.learner.learner2D.uniform_loss`. 116 | 117 | Doing better 118 | ~~~~~~~~~~~~ 119 | 120 | Of course, using ``adaptive`` for uniform sampling is a bit of a waste! 121 | 122 | Let’s see if we can do a bit better. Below we define a loss per 123 | subdomain that scales with the degree of nonlinearity of the function 124 | (this is very similar to the default loss function for `~adaptive.Learner2D`), 125 | but which is 0 for subdomains smaller than a certain area, and infinite 126 | for subdomains larger than a certain area. 127 | 128 | A loss defined in this way means that the adaptive algorithm will first 129 | prioritise subdomains that are too large (infinite loss). After all 130 | subdomains are appropriately small it will prioritise places where the 131 | function is very nonlinear, but will ignore subdomains that are too 132 | small (0 loss). 133 | 134 | .. jupyter-execute:: 135 | 136 | %%opts EdgePaths (color='w') Image [logz=True colorbar=True] 137 | 138 | def resolution_loss_function(min_distance=0, max_distance=1): 139 | """min_distance and max_distance should be in between 0 and 1 140 | because the total area is normalized to 1.""" 141 | def resolution_loss(ip): 142 | from adaptive.learner.learner2D import default_loss, areas 143 | loss = default_loss(ip) 144 | 145 | A = areas(ip) 146 | # Setting areas with a small area to zero such that they won't be chosen again 147 | loss[A < min_distance**2] = 0 148 | 149 | # Setting triangles that have a size larger than max_distance to infinite loss 150 | loss[A > max_distance**2] = np.inf 151 | 152 | return loss 153 | return resolution_loss 154 | loss = resolution_loss_function(min_distance=0.01) 155 | 156 | learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss) 157 | runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02) 158 | learner.plot(tri_alpha=0.3).relabel('1 / (x^2 + y^2) in log scale') 159 | 160 | Awesome! We zoom in on the singularity, but not at the expense of 161 | sampling the rest of the domain a reasonable amount. 162 | 163 | The above strategy is available as 164 | `adaptive.learner.learner2D.resolution_loss_function`. 165 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.Learner1D.rst: -------------------------------------------------------------------------------- 1 | Tutorial `~adaptive.Learner1D` 2 | ------------------------------ 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.Learner1D` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | import numpy as np 20 | from functools import partial 21 | import random 22 | 23 | scalar output: ``f:ℝ → ℝ`` 24 | .......................... 25 | 26 | We start with the most common use-case: sampling a 1D function 27 | :math:`\ f: ℝ → ℝ`. 28 | 29 | We will use the following function, which is a smooth (linear) 30 | background with a sharp peak at a random location: 31 | 32 | .. jupyter-execute:: 33 | 34 | offset = random.uniform(-0.5, 0.5) 35 | 36 | def f(x, offset=offset, wait=True): 37 | from time import sleep 38 | from random import random 39 | 40 | a = 0.01 41 | if wait: 42 | sleep(random() / 10) 43 | return x + a**2 / (a**2 + (x - offset)**2) 44 | 45 | We start by initializing a 1D “learner”, which will suggest points to 46 | evaluate, and adapt its suggestions as more and more points are 47 | evaluated. 48 | 49 | .. jupyter-execute:: 50 | 51 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 52 | 53 | Next we create a “runner” that will request points from the learner and 54 | evaluate ‘f’ on them. 55 | 56 | By default on Unix-like systems the runner will evaluate the points in 57 | parallel using local processes `concurrent.futures.ProcessPoolExecutor`. 58 | 59 | On Windows systems the runner will try to use a `distributed.Client` 60 | if `distributed` is installed. A `~concurrent.futures.ProcessPoolExecutor` 61 | cannot be used on Windows for reasons. 62 | 63 | .. jupyter-execute:: 64 | 65 | # The end condition is when the "loss" is less than 0.1. In the context of the 66 | # 1D learner this means that we will resolve features in 'func' with width 0.1 or wider. 67 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) 68 | 69 | .. jupyter-execute:: 70 | :hide-code: 71 | 72 | await runner.task # This is not needed in a notebook environment! 73 | 74 | When instantiated in a Jupyter notebook the runner does its job in the 75 | background and does not block the IPython kernel. We can use this to 76 | create a plot that updates as new data arrives: 77 | 78 | .. jupyter-execute:: 79 | 80 | runner.live_info() 81 | 82 | .. jupyter-execute:: 83 | 84 | runner.live_plot(update_interval=0.1) 85 | 86 | We can now compare the adaptive sampling to a homogeneous sampling with 87 | the same number of points: 88 | 89 | .. jupyter-execute:: 90 | 91 | if not runner.task.done(): 92 | raise RuntimeError('Wait for the runner to finish before executing the cells below!') 93 | 94 | .. jupyter-execute:: 95 | 96 | learner2 = adaptive.Learner1D(f, bounds=learner.bounds) 97 | 98 | xs = np.linspace(*learner.bounds, len(learner.data)) 99 | learner2.tell_many(xs, map(partial(f, wait=False), xs)) 100 | 101 | learner.plot() + learner2.plot() 102 | 103 | 104 | vector output: ``f:ℝ → ℝ^N`` 105 | ............................ 106 | 107 | Sometimes you may want to learn a function with vector output: 108 | 109 | .. jupyter-execute:: 110 | 111 | random.seed(0) 112 | offsets = [random.uniform(-0.8, 0.8) for _ in range(3)] 113 | 114 | # sharp peaks at random locations in the domain 115 | def f_levels(x, offsets=offsets): 116 | a = 0.01 117 | return np.array([offset + x + a**2 / (a**2 + (x - offset)**2) 118 | for offset in offsets]) 119 | 120 | ``adaptive`` has you covered! The ``Learner1D`` can be used for such 121 | functions: 122 | 123 | .. jupyter-execute:: 124 | 125 | learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) 126 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) 127 | 128 | .. jupyter-execute:: 129 | :hide-code: 130 | 131 | await runner.task # This is not needed in a notebook environment! 132 | 133 | .. jupyter-execute:: 134 | 135 | runner.live_info() 136 | 137 | .. jupyter-execute:: 138 | 139 | runner.live_plot(update_interval=0.1) 140 | 141 | 142 | Looking at curvature 143 | .................... 144 | 145 | By default ``adaptive`` will sample more points where the (normalized) 146 | euclidean distance between the neighboring points is large. 147 | You may achieve better results sampling more points in regions with high 148 | curvature. To do this, you need to tell the learner to look at the curvature 149 | by specifying ``loss_per_interval``. 150 | 151 | .. jupyter-execute:: 152 | 153 | from adaptive.learner.learner1D import (curvature_loss_function, 154 | uniform_loss, 155 | default_loss) 156 | curvature_loss = curvature_loss_function() 157 | learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=curvature_loss) 158 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) 159 | 160 | .. jupyter-execute:: 161 | :hide-code: 162 | 163 | await runner.task # This is not needed in a notebook environment! 164 | 165 | .. jupyter-execute:: 166 | 167 | runner.live_info() 168 | 169 | .. jupyter-execute:: 170 | 171 | runner.live_plot(update_interval=0.1) 172 | 173 | We may see the difference of homogeneous sampling vs only one interval vs 174 | including nearest neighboring intervals in this plot: We will look at 100 points. 175 | 176 | .. jupyter-execute:: 177 | 178 | def sin_exp(x): 179 | from math import exp, sin 180 | return sin(15 * x) * exp(-x**2*2) 181 | 182 | learner_h = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=uniform_loss) 183 | learner_1 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=default_loss) 184 | learner_2 = adaptive.Learner1D(sin_exp, (-1, 1), loss_per_interval=curvature_loss) 185 | 186 | npoints_goal = lambda l: l.npoints >= 100 187 | # adaptive.runner.simple is a non parallel blocking runner. 188 | adaptive.runner.simple(learner_h, goal=npoints_goal) 189 | adaptive.runner.simple(learner_1, goal=npoints_goal) 190 | adaptive.runner.simple(learner_2, goal=npoints_goal) 191 | 192 | (learner_h.plot().relabel('homogeneous') 193 | + learner_1.plot().relabel('euclidean loss') 194 | + learner_2.plot().relabel('curvature loss')).cols(2) 195 | 196 | More info about using custom loss functions can be found 197 | in :ref:`Custom adaptive logic for 1D and 2D`. 198 | -------------------------------------------------------------------------------- /adaptive/_version.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # This file is part of 'miniver': https://github.com/jbweston/miniver 3 | # 4 | from collections import namedtuple 5 | import os 6 | import subprocess 7 | 8 | from distutils.command.build_py import build_py as build_py_orig 9 | from setuptools.command.sdist import sdist as sdist_orig 10 | 11 | Version = namedtuple('Version', ('release', 'dev', 'labels')) 12 | 13 | # No public API 14 | __all__ = [] 15 | 16 | package_root = os.path.dirname(os.path.realpath(__file__)) 17 | package_name = os.path.basename(package_root) 18 | distr_root = os.path.dirname(package_root) 19 | 20 | STATIC_VERSION_FILE = '_static_version.py' 21 | 22 | 23 | def get_version(version_file=STATIC_VERSION_FILE): 24 | version_info = get_static_version_info(version_file) 25 | version = version_info['version'] 26 | if version == "__use_git__": 27 | version = get_version_from_git() 28 | if not version: 29 | version = get_version_from_git_archive(version_info) 30 | if not version: 31 | version = Version("unknown", None, None) 32 | return pep440_format(version) 33 | else: 34 | return version 35 | 36 | 37 | def get_static_version_info(version_file=STATIC_VERSION_FILE): 38 | version_info = {} 39 | with open(os.path.join(package_root, version_file), 'rb') as f: 40 | exec(f.read(), {}, version_info) 41 | return version_info 42 | 43 | 44 | def version_is_from_git(version_file=STATIC_VERSION_FILE): 45 | return get_static_version_info(version_file)['version'] == '__use_git__' 46 | 47 | 48 | def pep440_format(version_info): 49 | release, dev, labels = version_info 50 | 51 | version_parts = [release] 52 | if dev: 53 | if release.endswith('-dev') or release.endswith('.dev'): 54 | version_parts.append(dev) 55 | else: # prefer PEP440 over strict adhesion to semver 56 | version_parts.append('.dev{}'.format(dev)) 57 | 58 | if labels: 59 | version_parts.append('+') 60 | version_parts.append(".".join(labels)) 61 | 62 | return "".join(version_parts) 63 | 64 | 65 | def get_version_from_git(): 66 | try: 67 | p = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], 68 | cwd=distr_root, 69 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) 70 | except OSError: 71 | return 72 | if p.wait() != 0: 73 | return 74 | if not os.path.samefile(p.communicate()[0].decode().rstrip('\n'), 75 | distr_root): 76 | # The top-level directory of the current Git repository is not the same 77 | # as the root directory of the distribution: do not extract the 78 | # version from Git. 79 | return 80 | 81 | # git describe --first-parent does not take into account tags from branches 82 | # that were merged-in. The '--long' flag gets us the 'dev' version and 83 | # git hash, '--always' returns the git hash even if there are no tags. 84 | for opts in [['--first-parent'], []]: 85 | try: 86 | p = subprocess.Popen( 87 | ['git', 'describe', '--long', '--always'] + opts, 88 | cwd=distr_root, 89 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) 90 | except OSError: 91 | return 92 | if p.wait() == 0: 93 | break 94 | else: 95 | return 96 | 97 | description = ( 98 | p.communicate()[0] 99 | .decode() 100 | .strip('v') # Tags can have a leading 'v', but the version should not 101 | .rstrip('\n') 102 | .rsplit('-', 2) # Split the latest tag, commits since tag, and hash 103 | ) 104 | 105 | try: 106 | release, dev, git = description 107 | except ValueError: # No tags, only the git hash 108 | # prepend 'g' to match with format returned by 'git describe' 109 | git = 'g{}'.format(*description) 110 | release = 'unknown' 111 | dev = None 112 | 113 | labels = [] 114 | if dev == "0": 115 | dev = None 116 | else: 117 | labels.append(git) 118 | 119 | try: 120 | p = subprocess.Popen(['git', 'diff', '--quiet'], cwd=distr_root) 121 | except OSError: 122 | labels.append('confused') # This should never happen. 123 | else: 124 | if p.wait() == 1: 125 | labels.append('dirty') 126 | 127 | return Version(release, dev, labels) 128 | 129 | 130 | # TODO: change this logic when there is a git pretty-format 131 | # that gives the same output as 'git describe'. 132 | # Currently we can only tell the tag the current commit is 133 | # pointing to, or its hash (with no version info) 134 | # if it is not tagged. 135 | def get_version_from_git_archive(version_info): 136 | try: 137 | refnames = version_info['refnames'] 138 | git_hash = version_info['git_hash'] 139 | except KeyError: 140 | # These fields are not present if we are running from an sdist. 141 | # Execution should never reach here, though 142 | return None 143 | 144 | if git_hash.startswith('$Format') or refnames.startswith('$Format'): 145 | # variables not expanded during 'git archive' 146 | return None 147 | 148 | VTAG = 'tag: v' 149 | refs = set(r.strip() for r in refnames.split(",")) 150 | version_tags = set(r[len(VTAG):] for r in refs if r.startswith(VTAG)) 151 | if version_tags: 152 | release, *_ = sorted(version_tags) # prefer e.g. "2.0" over "2.0rc1" 153 | return Version(release, dev=None, labels=None) 154 | else: 155 | return Version('unknown', dev=None, labels=['g{}'.format(git_hash)]) 156 | 157 | 158 | __version__ = get_version() 159 | 160 | 161 | # The following section defines a module global 'cmdclass', 162 | # which can be used from setup.py. The 'package_name' and 163 | # '__version__' module globals are used (but not modified). 164 | 165 | def _write_version(fname): 166 | # This could be a hard link, so try to delete it first. Is there any way 167 | # to do this atomically together with opening? 168 | try: 169 | os.remove(fname) 170 | except OSError: 171 | pass 172 | with open(fname, 'w') as f: 173 | f.write("# This file has been created by setup.py.\n" 174 | "version = '{}'\n".format(__version__)) 175 | 176 | 177 | class _build_py(build_py_orig): 178 | def run(self): 179 | super().run() 180 | _write_version(os.path.join(self.build_lib, package_name, 181 | STATIC_VERSION_FILE)) 182 | 183 | 184 | class _sdist(sdist_orig): 185 | def make_release_tree(self, base_dir, files): 186 | super().make_release_tree(base_dir, files) 187 | _write_version(os.path.join(base_dir, package_name, 188 | STATIC_VERSION_FILE)) 189 | 190 | 191 | cmdclass = dict(sdist=_sdist, build_py=_build_py) 192 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. summary-start 2 | 3 | |logo| adaptive 4 | =============== 5 | 6 | |PyPI| |Conda| |Downloads| |Pipeline status| |DOI| |Binder| |Gitter| 7 | |Documentation| |GitHub| 8 | 9 | **Tools for adaptive parallel sampling of mathematical functions.** 10 | 11 | ``adaptive`` is an open-source Python library designed to 12 | make adaptive parallel function evaluation simple. With ``adaptive`` you 13 | just supply a function with its bounds, and it will be evaluated at the 14 | “best” points in parameter space. With just a few lines of code you can 15 | evaluate functions on a computing cluster, live-plot the data as it 16 | returns, and fine-tune the adaptive sampling algorithm. 17 | 18 | Run the ``adaptive`` example notebook `live on 19 | Binder `_ 20 | to see examples of how to use ``adaptive`` or visit the 21 | `tutorial on Read the Docs `__. 22 | 23 | .. summary-end 24 | 25 | **WARNING: adaptive is still in a beta development stage** 26 | 27 | .. not-in-documentation-start 28 | 29 | Implemented algorithms 30 | ---------------------- 31 | 32 | The core concept in ``adaptive`` is that of a *learner*. A *learner* 33 | samples a function at the best places in its parameter space to get 34 | maximum “information” about the function. As it evaluates the function 35 | at more and more points in the parameter space, it gets a better idea of 36 | where the best places are to sample next. 37 | 38 | Of course, what qualifies as the “best places” will depend on your 39 | application domain! ``adaptive`` makes some reasonable default choices, 40 | but the details of the adaptive sampling are completely customizable. 41 | 42 | The following learners are implemented: 43 | 44 | - ``Learner1D``, for 1D functions ``f: ℝ → ℝ^N``, 45 | - ``Learner2D``, for 2D functions ``f: ℝ^2 → ℝ^N``, 46 | - ``LearnerND``, for ND functions ``f: ℝ^N → ℝ^M``, 47 | - ``AverageLearner``, For stochastic functions where you want to 48 | average the result over many evaluations, 49 | - ``IntegratorLearner``, for 50 | when you want to intergrate a 1D function ``f: ℝ → ℝ``, 51 | - ``BalancingLearner``, for when you want to run several learners at once, 52 | selecting the “best” one each time you get more points. 53 | 54 | In addition to the learners, ``adaptive`` also provides primitives for 55 | running the sampling across several cores and even several machines, 56 | with built-in support for 57 | `concurrent.futures `_, 58 | `ipyparallel `_ and 59 | `distributed `_. 60 | 61 | Examples 62 | -------- 63 | 64 | .. raw:: html 65 | 66 | 67 | 68 | .. not-in-documentation-end 69 | 70 | Installation 71 | ------------ 72 | 73 | ``adaptive`` works with Python 3.6 and higher on Linux, Windows, or Mac, 74 | and provides optional extensions for working with the Jupyter/IPython 75 | Notebook. 76 | 77 | The recommended way to install adaptive is using ``conda``: 78 | 79 | .. code:: bash 80 | 81 | conda install -c conda-forge adaptive 82 | 83 | ``adaptive`` is also available on PyPI: 84 | 85 | .. code:: bash 86 | 87 | pip install adaptive[notebook] 88 | 89 | The ``[notebook]`` above will also install the optional dependencies for 90 | running ``adaptive`` inside a Jupyter notebook. 91 | 92 | Development 93 | ----------- 94 | 95 | Clone the repository and run ``setup.py develop`` to add a link to the 96 | cloned repo into your Python path: 97 | 98 | .. code:: bash 99 | 100 | git clone git@github.com:python-adaptive/adaptive.git 101 | cd adaptive 102 | python3 setup.py develop 103 | 104 | We highly recommend using a Conda environment or a virtualenv to manage 105 | the versions of your installed packages while working on ``adaptive``. 106 | 107 | In order to not pollute the history with the output of the notebooks, 108 | please setup the git filter by executing 109 | 110 | .. code:: bash 111 | 112 | python ipynb_filter.py 113 | 114 | in the repository. 115 | 116 | Credits 117 | ------- 118 | 119 | We would like to give credits to the following people: 120 | 121 | - Pedro Gonnet for his implementation of `CQUAD `_, 122 | “Algorithm 4” as described in “Increasing the Reliability of Adaptive 123 | Quadrature Using Explicit Interpolants”, P. Gonnet, ACM Transactions on 124 | Mathematical Software, 37 (3), art. no. 26, 2010. 125 | - Pauli Virtanen for his ``AdaptiveTriSampling`` script (no longer 126 | available online since SciPy Central went down) which served as 127 | inspiration for the `~adaptive.Learner2D`. 128 | 129 | .. credits-end 130 | 131 | For general discussion, we have a `Gitter chat 132 | channel `_. If you find any 133 | bugs or have any feature suggestions please file a GitLab 134 | `issue `_ 135 | or submit a `merge 136 | request `_. 137 | 138 | .. references-start 139 | .. |logo| image:: https://adaptive.readthedocs.io/en/latest/_static/logo.png 140 | .. |PyPI| image:: https://img.shields.io/pypi/v/adaptive.svg 141 | :target: https://pypi.python.org/pypi/adaptive 142 | .. |Conda| image:: https://img.shields.io/badge/install%20with-conda-green.svg 143 | :target: https://anaconda.org/conda-forge/adaptive 144 | .. |Downloads| image:: https://img.shields.io/conda/dn/conda-forge/adaptive.svg 145 | :target: https://anaconda.org/conda-forge/adaptive 146 | .. |Pipeline status| image:: https://gitlab.kwant-project.org/qt/adaptive/badges/master/pipeline.svg 147 | :target: https://gitlab.kwant-project.org/qt/adaptive/pipelines 148 | .. |DOI| image:: https://img.shields.io/badge/doi-10.5281%2Fzenodo.1182437-blue.svg 149 | :target: https://doi.org/10.5281/zenodo.1182437 150 | .. |Binder| image:: https://mybinder.org/badge.svg 151 | :target: https://mybinder.org/v2/gh/python-adaptive/adaptive/master?filepath=learner.ipynb 152 | .. |Gitter| image:: https://img.shields.io/gitter/room/nwjs/nw.js.svg 153 | :target: https://gitter.im/python-adaptive/adaptive 154 | .. |Documentation| image:: https://readthedocs.org/projects/adaptive/badge/?version=latest 155 | :target: https://adaptive.readthedocs.io/en/latest/?badge=latest 156 | .. |GitHub| image:: https://img.shields.io/github/stars/python-adaptive/adaptive.svg?style=social 157 | :target: https://github.com/python-adaptive/adaptive/stargazers 158 | .. references-end 159 | -------------------------------------------------------------------------------- /adaptive/notebook_integration.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import asyncio 4 | from contextlib import suppress 5 | import datetime 6 | import importlib 7 | import random 8 | import warnings 9 | 10 | 11 | _async_enabled = False 12 | _holoviews_enabled = False 13 | _ipywidgets_enabled = False 14 | _plotly_enabled = False 15 | 16 | 17 | def notebook_extension(): 18 | """Enable ipywidgets, holoviews, and asyncio notebook integration.""" 19 | if not in_ipynb(): 20 | raise RuntimeError('"adaptive.notebook_extension()" may only be run ' 21 | 'from a Jupyter notebook.') 22 | 23 | global _async_enabled, _holoviews_enabled, _ipywidgets_enabled 24 | 25 | # Load holoviews 26 | try: 27 | _holoviews_enabled = False # After closing a notebook the js is gone 28 | if not _holoviews_enabled: 29 | import holoviews 30 | holoviews.notebook_extension('bokeh', logo=False) 31 | _holoviews_enabled = True 32 | except ModuleNotFoundError: 33 | warnings.warn("holoviews is not installed; plotting " 34 | "is disabled.", RuntimeWarning) 35 | 36 | # Load ipywidgets 37 | try: 38 | if not _ipywidgets_enabled: 39 | import ipywidgets 40 | _ipywidgets_enabled = True 41 | except ModuleNotFoundError: 42 | warnings.warn("ipywidgets is not installed; live_info " 43 | "is disabled.", RuntimeWarning) 44 | 45 | # Enable asyncio integration 46 | if not _async_enabled: 47 | get_ipython().magic('gui asyncio') 48 | _async_enabled = True 49 | 50 | 51 | def ensure_holoviews(): 52 | try: 53 | return importlib.import_module('holoviews') 54 | except ModuleNotFoundError: 55 | raise RuntimeError('holoviews is not installed; plotting is disabled.') 56 | 57 | 58 | def ensure_plotly(): 59 | global _plotly_enabled 60 | try: 61 | import plotly 62 | if not _plotly_enabled: 63 | import plotly.graph_objs 64 | import plotly.figure_factory 65 | import plotly.offline 66 | # This injects javascript and should happen only once 67 | plotly.offline.init_notebook_mode() 68 | _plotly_enabled = True 69 | return plotly 70 | except ModuleNotFoundError: 71 | raise RuntimeError('plotly is not installed; plotting is disabled.') 72 | 73 | 74 | def in_ipynb(): 75 | try: 76 | # If we are running in IPython, then `get_ipython()` is always a global 77 | return get_ipython().__class__.__name__ == 'ZMQInteractiveShell' 78 | except NameError: 79 | return False 80 | 81 | 82 | # Fancy displays in the Jupyter notebook 83 | 84 | active_plotting_tasks = dict() 85 | 86 | 87 | def live_plot(runner, *, plotter=None, update_interval=2, name=None): 88 | """Live plotting of the learner's data. 89 | 90 | Parameters 91 | ---------- 92 | runner : `Runner` 93 | plotter : function 94 | A function that takes the learner as a argument and returns a 95 | holoviews object. By default ``learner.plot()`` will be called. 96 | update_interval : int 97 | Number of second between the updates of the plot. 98 | name : hasable 99 | Name for the `live_plot` task in `adaptive.active_plotting_tasks`. 100 | By default the name is None and if another task with the same name 101 | already exists that other `live_plot` is canceled. 102 | 103 | Returns 104 | ------- 105 | dm : `holoviews.core.DynamicMap` 106 | The plot that automatically updates every `update_interval`. 107 | """ 108 | if not _holoviews_enabled: 109 | raise RuntimeError("Live plotting is not enabled; did you run " 110 | "'adaptive.notebook_extension()'?") 111 | 112 | import holoviews as hv 113 | import ipywidgets 114 | from IPython.display import display 115 | 116 | if name in active_plotting_tasks: 117 | active_plotting_tasks[name].cancel() 118 | 119 | def plot_generator(): 120 | while True: 121 | if not plotter: 122 | yield runner.learner.plot() 123 | else: 124 | yield plotter(runner.learner) 125 | 126 | dm = hv.DynamicMap(plot_generator(), 127 | streams=[hv.streams.Stream.define('Next')()]) 128 | cancel_button = ipywidgets.Button(description='cancel live-plot', 129 | layout=ipywidgets.Layout(width='150px')) 130 | 131 | # Could have used dm.periodic in the following, but this would either spin 132 | # off a thread (and learner is not threadsafe) or block the kernel. 133 | 134 | async def updater(): 135 | try: 136 | while not runner.task.done(): 137 | dm.event() 138 | await asyncio.sleep(update_interval) 139 | dm.event() # fire off one last update before we die 140 | finally: 141 | if active_plotting_tasks[name] is asyncio.Task.current_task(): 142 | active_plotting_tasks.pop(name, None) 143 | cancel_button.layout.display = 'none' # remove cancel button 144 | 145 | def cancel(_): 146 | with suppress(KeyError): 147 | active_plotting_tasks[name].cancel() 148 | 149 | active_plotting_tasks[name] = runner.ioloop.create_task(updater()) 150 | cancel_button.on_click(cancel) 151 | 152 | display(cancel_button) 153 | return dm 154 | 155 | 156 | def should_update(status): 157 | try: 158 | # Get the length of the write buffer size 159 | buffer_size = len(status.comm.kernel.iopub_thread._events) 160 | 161 | # Make sure to only keep all the messages when the notebook 162 | # is viewed, this means 'buffer_size == 1'. However, when not 163 | # viewing the notebook the buffer fills up. When this happens 164 | # we decide to only add messages to it when a certain probability. 165 | # i.e. we're offline for 12h, with an update_interval of 0.5s, 166 | # and without the reduced probability, we have buffer_size=86400. 167 | # With the correction this is np.log(86400) / np.log(1.1) = 119.2 168 | return 1.1**buffer_size * random.random() < 1 169 | except Exception: 170 | # We catch any Exception because we are using a private API. 171 | return True 172 | 173 | 174 | def live_info(runner, *, update_interval=0.5): 175 | """Display live information about the runner. 176 | 177 | Returns an interactive ipywidget that can be 178 | visualized in a Jupyter notebook. 179 | """ 180 | if not _holoviews_enabled: 181 | raise RuntimeError("Live plotting is not enabled; did you run " 182 | "'adaptive.notebook_extension()'?") 183 | 184 | import ipywidgets 185 | from IPython.display import display 186 | 187 | status = ipywidgets.HTML(value=_info_html(runner)) 188 | 189 | cancel = ipywidgets.Button(description='cancel runner', 190 | layout=ipywidgets.Layout(width='100px')) 191 | cancel.on_click(lambda _: runner.cancel()) 192 | 193 | async def update(): 194 | while not runner.task.done(): 195 | await asyncio.sleep(update_interval) 196 | 197 | if should_update(status): 198 | status.value = _info_html(runner) 199 | else: 200 | await asyncio.sleep(0.05) 201 | 202 | status.value = _info_html(runner) 203 | cancel.layout.display = 'none' 204 | 205 | runner.ioloop.create_task(update()) 206 | 207 | display(ipywidgets.HBox( 208 | (status, cancel), 209 | layout=ipywidgets.Layout(border='solid 1px', 210 | width='200px', 211 | align_items='center'), 212 | )) 213 | 214 | 215 | def _info_html(runner): 216 | status = runner.status() 217 | 218 | color = {'cancelled': 'orange', 219 | 'failed': 'red', 220 | 'running': 'blue', 221 | 'finished': 'green'}[status] 222 | 223 | info = [ 224 | ('status', f'{status}'), 225 | ('elapsed time', datetime.timedelta(seconds=runner.elapsed_time())), 226 | ('overhead', f'{runner.overhead():.2f}%'), 227 | ] 228 | 229 | with suppress(Exception): 230 | info.append(('# of points', runner.learner.npoints)) 231 | 232 | with suppress(Exception): 233 | info.append(('latest loss', f'{runner.learner._cache["loss"]:.3f}')) 234 | 235 | template = '
{}
{}
' 236 | table = '\n'.join(template.format(k, v) for k, v in info) 237 | 238 | return f''' 239 |
240 | {table} 241 |
242 | ''' 243 | -------------------------------------------------------------------------------- /adaptive/tests/test_cquad.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from functools import partial 4 | from operator import attrgetter 5 | 6 | import numpy as np 7 | import pytest 8 | 9 | from adaptive.learner import IntegratorLearner 10 | from adaptive.learner.integrator_learner import DivergentIntegralError 11 | from adaptive.learner.integrator_coeffs import ns 12 | from .algorithm_4 import algorithm_4, f0, f7, f21, f24, f63, fdiv 13 | from .algorithm_4 import DivergentIntegralError as A4DivergentIntegralError 14 | 15 | eps = np.spacing(1) 16 | 17 | 18 | def run_integrator_learner(f, a, b, tol, n): 19 | learner = IntegratorLearner(f, bounds=(a, b), tol=tol) 20 | for _ in range(n): 21 | points, _ = learner.ask(1) 22 | learner.tell_many(points, map(learner.function, points)) 23 | return learner 24 | 25 | 26 | def equal_ival(ival, other, *, verbose=False): 27 | """Note: Implementing __eq__ breaks SortedContainers in some way.""" 28 | if ival.depth_complete is None: 29 | if verbose: 30 | print('Interval {} is not complete.'.format(ival)) 31 | return False 32 | 33 | slots = set(ival.__slots__).intersection(other.__slots__) 34 | same_slots = [] 35 | for s in slots: 36 | a = getattr(ival, s) 37 | b = getattr(other, s) 38 | is_equal = np.allclose(a, b, rtol=0, atol=eps, equal_nan=True) 39 | if verbose and not is_equal: 40 | print('ival.{} - other.{} = {}'.format(s, s, a - b)) 41 | same_slots.append(is_equal) 42 | 43 | return all(same_slots) 44 | 45 | 46 | def equal_ivals(ivals, other, *, verbose=False): 47 | """Note: `other` is a list of ivals.""" 48 | if len(ivals) != len(other): 49 | if verbose: 50 | print('len(ivals)={} != len(other)={}'.format( 51 | len(ivals), len(other))) 52 | return False 53 | 54 | ivals = [sorted(i, key=attrgetter('a')) for i in [ivals, other]] 55 | return all(equal_ival(ival, other_ival, verbose=verbose) 56 | for ival, other_ival in zip(*ivals)) 57 | 58 | 59 | def same_ivals(f, a, b, tol): 60 | igral, err, n, ivals = algorithm_4(f, a, b, tol) 61 | 62 | learner = run_integrator_learner(f, a, b, tol, n) 63 | 64 | # This will only show up if the test fails, anyway 65 | print('igral difference', learner.igral-igral, 66 | 'err difference', learner.err - err) 67 | 68 | return equal_ivals(learner.ivals, ivals, verbose=True) 69 | 70 | 71 | # XXX: This *should* pass (https://gitlab.kwant-project.org/qt/adaptive/issues/84) 72 | @pytest.mark.xfail 73 | def test_that_gives_same_intervals_as_reference_implementation(): 74 | for i, args in enumerate([[f0, 0, 3, 1e-5], 75 | [f7, 0, 1, 1e-6], 76 | [f21, 0, 1, 1e-3], 77 | [f24, 0, 3, 1e-3]]): 78 | assert same_ivals(*args), 'Function {}'.format(i) 79 | 80 | 81 | @pytest.mark.xfail 82 | def test_machine_precision(): 83 | f, a, b, tol = [partial(f63, alpha=0.987654321, beta=0.45), 0, 1, 1e-10] 84 | igral, err, n, ivals = algorithm_4(f, a, b, tol) 85 | 86 | learner = run_integrator_learner(f, a, b, tol, n) 87 | 88 | print('igral difference', learner.igral-igral, 89 | 'err difference', learner.err - err) 90 | 91 | assert equal_ivals(learner.ivals, ivals, verbose=True) 92 | 93 | 94 | def test_machine_precision2(): 95 | f, a, b, tol = [partial(f63, alpha=0.987654321, beta=0.45), 0, 1, 1e-10] 96 | igral, err, n, ivals = algorithm_4(f, a, b, tol) 97 | 98 | learner = run_integrator_learner(f, a, b, tol, n) 99 | 100 | np.testing.assert_almost_equal(igral, learner.igral) 101 | np.testing.assert_almost_equal(err, learner.err) 102 | 103 | 104 | def test_divergence(): 105 | """This function should raise a DivergentIntegralError.""" 106 | f, a, b, tol = fdiv, 0, 1, 1e-6 107 | with pytest.raises(A4DivergentIntegralError) as e: 108 | igral, err, n, ivals = algorithm_4(f, a, b, tol) 109 | 110 | n = e.value.nr_points 111 | 112 | with pytest.raises(DivergentIntegralError): 113 | run_integrator_learner(f, a, b, tol, n) 114 | 115 | 116 | def test_choosing_and_adding_points_one_by_one(): 117 | learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) 118 | for _ in range(1000): 119 | xs, _ = learner.ask(1) 120 | for x in xs: 121 | learner.tell(x, learner.function(x)) 122 | 123 | 124 | def test_choosing_and_adding_multiple_points_at_once(): 125 | learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) 126 | xs, _ = learner.ask(100) 127 | for x in xs: 128 | learner.tell(x, learner.function(x)) 129 | 130 | 131 | def test_adding_points_and_skip_one_point(): 132 | learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) 133 | xs, _ = learner.ask(17) 134 | skip_x = xs[1] 135 | 136 | for x in xs: 137 | if x != skip_x: 138 | learner.tell(x, learner.function(x)) 139 | 140 | for i in range(1000): 141 | xs, _ = learner.ask(1) 142 | for x in xs: 143 | if x != skip_x: 144 | learner.tell(x, learner.function(x)) 145 | 146 | # Now add the point that was skipped 147 | learner.tell(skip_x, learner.function(skip_x)) 148 | 149 | # Create a learner with the same number of points, which should 150 | # give an identical igral value. 151 | learner2 = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) 152 | for i in range(1017): 153 | xs, _ = learner2.ask(1) 154 | for x in xs: 155 | learner2.tell(x, learner2.function(x)) 156 | 157 | np.testing.assert_almost_equal(learner.igral, learner2.igral) 158 | 159 | 160 | # XXX: This *should* pass (https://gitlab.kwant-project.org/qt/adaptive/issues/84) 161 | @pytest.mark.xfail 162 | def test_tell_in_random_order(first_add_33=False): 163 | from operator import attrgetter 164 | import random 165 | tol = 1e-10 166 | for f, a, b in ([f0, 0, 3], 167 | [f21, 0, 1], 168 | [f24, 0, 3], 169 | [f7, 0, 1], 170 | ): 171 | learners = [] 172 | 173 | for shuffle in [True, False]: 174 | l = IntegratorLearner(f, bounds=(a, b), tol=tol) 175 | 176 | if first_add_33: 177 | xs, _ = l.ask(33) 178 | for x in xs: 179 | l.tell(x, f(x)) 180 | 181 | xs, _ = l.ask(10000) 182 | 183 | if shuffle: 184 | random.shuffle(xs) 185 | for x in xs: 186 | l.tell(x, f(x)) 187 | 188 | learners.append(l) 189 | 190 | # Check whether the points of the learners are identical 191 | assert set(learners[0].done_points) == set(learners[1].done_points) 192 | 193 | # Test whether approximating_intervals gives a complete set of intervals 194 | for l in learners: 195 | ivals = sorted(l.approximating_intervals, key=lambda l: l.a) 196 | for i in range(len(ivals) - 1): 197 | assert ivals[i].b == ivals[i + 1].a, (ivals[i], ivals[i + 1]) 198 | 199 | # Test if approximating_intervals is the same for random order of adding the point 200 | ivals = [sorted(ival, key=attrgetter('a')) for ival in 201 | [l.approximating_intervals for l in learners]] 202 | assert all(ival.a == other_ival.a for ival, other_ival in zip(*ivals)) 203 | 204 | # Test if the approximating_intervals are the same 205 | ivals = [set((i.a, i.b) for i in l.approximating_intervals) 206 | for l in learners] 207 | assert ivals[0] == ivals[1] 208 | 209 | # Test whether the igral is identical 210 | assert np.allclose(learners[0].igral, learners[1].igral), f 211 | 212 | # Compare if the errors are in line with the sequential case 213 | igral, err, *_ = algorithm_4(f, a, b, tol=tol) 214 | assert all((l.err + err >= abs(l.igral - igral)) for l in learners) 215 | 216 | # Check that the errors are finite 217 | for l in learners: 218 | assert np.isfinite(l.err) 219 | 220 | 221 | # XXX: This *should* pass (https://gitlab.kwant-project.org/qt/adaptive/issues/84) 222 | @pytest.mark.xfail 223 | def test_tell_in_random_order_first_add_33(): 224 | test_tell_in_random_order(first_add_33=True) 225 | 226 | 227 | def test_approximating_intervals(): 228 | import random 229 | learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) 230 | 231 | xs, _ = learner.ask(10000) 232 | random.shuffle(xs) 233 | for x in xs: 234 | learner.tell(x, f24(x)) 235 | 236 | ivals = sorted(learner.approximating_intervals, key=lambda l: l.a) 237 | for i in range(len(ivals) - 1): 238 | assert ivals[i].b == ivals[i + 1].a, (ivals[i], ivals[i + 1]) 239 | 240 | 241 | # XXX: This *should* pass (https://gitlab.kwant-project.org/qt/adaptive/issues/43) 242 | @pytest.mark.xfail 243 | def test_removed_choose_mutiple_points_at_once(): 244 | """Given that a high-precision interval that was split into 2 low-precision ones, 245 | we should use the high-precision interval. 246 | """ 247 | learner = IntegratorLearner(np.exp, bounds=(0, 1), tol=1e-15) 248 | n = ns[-1] + 2 * (ns[0] - 2) # first + two children (33+6=39) 249 | xs, _ = learner.ask(n) 250 | for x in xs: 251 | learner.tell(x, learner.function(x)) 252 | assert list(learner.approximating_intervals)[0] == learner.first_ival 253 | 254 | 255 | def test_removed_ask_one_by_one(): 256 | with pytest.raises(RuntimeError): 257 | # This test should raise because integrating np.exp should be done 258 | # after the 33th point 259 | learner = IntegratorLearner(np.exp, bounds=(0, 1), tol=1e-15) 260 | n = ns[-1] + 2 * (ns[0] - 2) # first + two children (33+6=39) 261 | for _ in range(n): 262 | xs, _ = learner.ask(1) 263 | for x in xs: 264 | learner.tell(x, learner.function(x)) 265 | -------------------------------------------------------------------------------- /adaptive/tests/test_triangulation.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from collections import Counter 4 | from math import factorial 5 | import itertools 6 | import pytest 7 | 8 | import numpy as np 9 | 10 | from adaptive.learner.triangulation import Triangulation 11 | 12 | with_dimension = pytest.mark.parametrize('dim', [2, 3, 4]) 13 | 14 | 15 | def _make_triangulation(points): 16 | num_vertices = points.shape[1] + 1 17 | first_simplex, points = points[:num_vertices], points[num_vertices:] 18 | t = Triangulation(first_simplex) 19 | for p in points: 20 | _add_point_with_check(t, p) 21 | return t 22 | 23 | 24 | def _make_standard_simplex(dim): 25 | """Return the vertices of the standard simplex in dimension 'dim'.""" 26 | return np.vstack((np.zeros(dim), np.eye(dim))) 27 | 28 | 29 | def _standard_simplex_volume(dim): 30 | return 1 / factorial(dim) 31 | 32 | 33 | def _check_simplices_are_valid(t): 34 | """Check that 'simplices' and 'vertex_to_simplices' are consistent.""" 35 | vertex_to_simplices = [set() for _ in t.vertices] 36 | 37 | for simplex in t.simplices: 38 | for vertex in simplex: 39 | vertex_to_simplices[vertex].add(simplex) 40 | assert vertex_to_simplices == t.vertex_to_simplices 41 | 42 | 43 | def _check_faces_are_valid(t): 44 | """Check that a 'dim-1'-D face is shared by no more than 2 simplices.""" 45 | counts = Counter(t.faces()) 46 | assert not any(i > 2 for i in counts.values()), counts 47 | 48 | 49 | def _check_hull_is_valid(t): 50 | """Check that the stored hull is consistent with one computed from scratch.""" 51 | counts = Counter(t.faces()) 52 | hull = set(point 53 | for face, count in counts.items() 54 | if count == 1 55 | for point in face) 56 | 57 | assert t.hull == hull 58 | 59 | 60 | def _check_triangulation_is_valid(t): 61 | _check_simplices_are_valid(t) 62 | _check_faces_are_valid(t) 63 | _check_hull_is_valid(t) 64 | 65 | 66 | def _add_point_with_check(tri, point, simplex=None): 67 | """Check that the difference in simplices before and after adding a point 68 | is returned by tri.add_point""" 69 | old_simplices = tri.simplices.copy() 70 | deleted_simplices, created_simplices = tri.add_point(point, simplex=simplex) 71 | new_simplices = tri.simplices.copy() 72 | 73 | assert deleted_simplices == old_simplices - new_simplices 74 | assert created_simplices == new_simplices - old_simplices 75 | 76 | 77 | def test_triangulation_raises_exception_for_1d_list(): 78 | # We could support 1d, but we don't for now, because it is not relevant 79 | # so a user has to be aware 80 | pts = [0, 1] 81 | with pytest.raises(TypeError): 82 | Triangulation(pts) 83 | 84 | 85 | def test_triangulation_raises_exception_for_1d_points(): 86 | # We could support 1d, but we don't for now, because it is not relevant 87 | # so a user has to be aware 88 | pts = [(0,), (1,)] 89 | with pytest.raises(ValueError): 90 | Triangulation(pts) 91 | 92 | 93 | @with_dimension 94 | def test_triangulation_of_standard_simplex(dim): 95 | t = Triangulation(_make_standard_simplex(dim)) 96 | expected_simplex = tuple(range(dim + 1)) 97 | assert t.simplices == {expected_simplex} 98 | _check_triangulation_is_valid(t) 99 | assert np.isclose(t.volume(expected_simplex), 100 | _standard_simplex_volume(dim)) 101 | 102 | 103 | @with_dimension 104 | def test_zero_volume_initial_simplex_raises_exception(dim): 105 | points = _make_standard_simplex(dim)[:-1] 106 | linearly_dependent_point = np.dot(np.random.random(dim), points) 107 | zero_volume_simplex = np.vstack((points, linearly_dependent_point)) 108 | 109 | assert np.isclose(np.linalg.det(zero_volume_simplex[1:]), 0) # sanity check 110 | 111 | with pytest.raises(ValueError): 112 | Triangulation(zero_volume_simplex) 113 | 114 | 115 | @with_dimension 116 | def test_adding_point_outside_circumscribed_hypersphere_in_positive_orthant(dim): 117 | t = Triangulation(_make_standard_simplex(dim)) 118 | 119 | point_outside_circumscribed_sphere = (1.1,) * dim 120 | _add_point_with_check(t, point_outside_circumscribed_sphere) 121 | 122 | simplex1 = tuple(range(dim + 1)) 123 | simplex2 = tuple(range(1, dim + 2)) 124 | n_vertices = len(t.vertices) 125 | 126 | _check_triangulation_is_valid(t) 127 | assert t.simplices == {simplex1, simplex2} 128 | 129 | # All points are in the hull 130 | assert t.hull == set(range(n_vertices)) 131 | 132 | assert t.vertex_to_simplices[0] == {simplex1} 133 | assert t.vertex_to_simplices[n_vertices - 1] == {simplex2} 134 | 135 | # rest of the points are shared between the 2 simplices 136 | shared_simplices = {simplex1, simplex2} 137 | assert all(t.vertex_to_simplices[v] == shared_simplices 138 | for v in range(1, n_vertices - 1)) 139 | 140 | 141 | @with_dimension 142 | def test_adding_point_outside_standard_simplex_in_negative_orthant(dim): 143 | t = Triangulation(_make_standard_simplex(dim)) 144 | new_point = list(range(-dim, 0)) 145 | 146 | _add_point_with_check(t, new_point) 147 | 148 | n_vertices = len(t.vertices) 149 | 150 | initial_simplex = tuple(range(dim + 1)) 151 | 152 | _check_triangulation_is_valid(t) 153 | assert len(t.simplices) == dim + 1 154 | assert initial_simplex in t.simplices 155 | 156 | # Hull consists of all points except the origin 157 | assert set(range(1, n_vertices)) == t.hull 158 | 159 | # Origin belongs to all the simplices 160 | assert t.vertex_to_simplices[0] == t.simplices 161 | 162 | # new point belongs to all the simplices *except* the initial one 163 | assert t.vertex_to_simplices[dim + 1] == t.simplices - {initial_simplex} 164 | 165 | other_points = list(range(1, dim+1)) 166 | last_vertex = n_vertices - 1 167 | extra_simplices = {(0, *points, last_vertex) 168 | for points in itertools.combinations(other_points, dim-1)} 169 | 170 | assert extra_simplices | {initial_simplex} == t.simplices 171 | 172 | 173 | @with_dimension 174 | @pytest.mark.parametrize('provide_simplex', [True, False]) 175 | def test_adding_point_inside_standard_simplex(dim, provide_simplex): 176 | t = Triangulation(_make_standard_simplex(dim)) 177 | first_simplex = tuple(range(dim + 1)) 178 | inside_simplex = (0.1,) * dim 179 | 180 | if provide_simplex: 181 | _add_point_with_check(t, inside_simplex, simplex=first_simplex) 182 | else: 183 | _add_point_with_check(t, inside_simplex) 184 | 185 | added_point = dim + 1 # *index* of added point 186 | 187 | _check_triangulation_is_valid(t) 188 | 189 | other_points = list(range(dim + 1)) 190 | expected_simplices = {(*points, added_point) 191 | for points in itertools.combinations(other_points, dim)} 192 | assert expected_simplices == t.simplices 193 | 194 | assert np.isclose(np.sum(t.volumes()), _standard_simplex_volume(dim)) 195 | 196 | 197 | @with_dimension 198 | def test_adding_point_on_standard_simplex_face(dim): 199 | pts = _make_standard_simplex(dim) 200 | t = Triangulation(pts) 201 | on_simplex = np.average(pts[1:], axis=0) 202 | 203 | _add_point_with_check(t, on_simplex) 204 | added_point = dim + 1 # *index* of added point 205 | 206 | _check_triangulation_is_valid(t) 207 | 208 | other_points = list(range(1, dim+1)) 209 | expected_simplices = {(0, *points, added_point) 210 | for points in itertools.combinations(other_points, dim-1)} 211 | assert expected_simplices == t.simplices 212 | 213 | assert np.isclose(np.sum(t.volumes()), _standard_simplex_volume(dim)) 214 | 215 | 216 | @with_dimension 217 | def test_adding_point_on_standard_simplex_edge(dim): 218 | pts = _make_standard_simplex(dim) 219 | t = Triangulation(pts) 220 | on_edge = np.average(pts[:2], axis=0) 221 | 222 | _add_point_with_check(t, on_edge) 223 | _check_triangulation_is_valid(t) 224 | 225 | other_points = list(range(2, dim+2)) 226 | 227 | new_simplices = {(0, *other_points), 228 | (1, *other_points)} 229 | 230 | assert new_simplices == t.simplices 231 | 232 | assert np.isclose(np.sum(t.volumes()), _standard_simplex_volume(dim)) 233 | 234 | 235 | @with_dimension 236 | def test_adding_point_colinear_with_first_edge(dim): 237 | pts = _make_standard_simplex(dim) 238 | t = Triangulation(pts) 239 | edge_extension = np.multiply(pts[1], 2) 240 | 241 | _add_point_with_check(t, edge_extension) 242 | _check_triangulation_is_valid(t) 243 | 244 | simplex1 = tuple(range(dim + 1)) 245 | simplex2 = tuple(range(1, dim + 2)) 246 | 247 | assert t.simplices == {simplex1, simplex2} 248 | 249 | 250 | @with_dimension 251 | def test_adding_point_coplanar_with_a_face(dim): 252 | pts = _make_standard_simplex(dim) 253 | t = Triangulation(pts) 254 | face_extension = np.sum(pts[:-1], axis=0) * 2 255 | 256 | _add_point_with_check(t, face_extension) 257 | _check_triangulation_is_valid(t) 258 | 259 | simplex1 = tuple(range(dim + 1)) 260 | simplex2 = tuple(range(1, dim + 2)) 261 | 262 | assert t.simplices == {simplex1, simplex2} 263 | 264 | 265 | @with_dimension 266 | def test_adding_point_inside_circumscribed_circle(dim): 267 | pts = _make_standard_simplex(dim) 268 | t = Triangulation(pts) 269 | on_simplex = (0.6,) * dim 270 | 271 | _add_point_with_check(t, on_simplex) 272 | added_point = dim + 1 # *index* of added point 273 | 274 | _check_triangulation_is_valid(t) 275 | 276 | other_points = list(range(1, dim+1)) 277 | new_simplices = {(0, *points, added_point) 278 | for points in itertools.combinations(other_points, dim-1)} 279 | assert new_simplices == t.simplices 280 | 281 | 282 | @with_dimension 283 | def test_triangulation_volume_is_less_than_bounding_box(dim): 284 | eps = 1e-8 285 | points = np.random.random((10, dim)) # all within the unit hypercube 286 | t = _make_triangulation(points) 287 | 288 | _check_triangulation_is_valid(t) 289 | assert np.sum(t.volumes()) < 1+eps 290 | 291 | 292 | @with_dimension 293 | def test_triangulation_is_deterministic(dim): 294 | points = np.random.random((10, dim)) 295 | t1 = _make_triangulation(points) 296 | t2 = _make_triangulation(points) 297 | assert t1.simplices == t2.simplices 298 | 299 | 300 | @with_dimension 301 | def test_initialisation_raises_when_not_enough_points(dim): 302 | deficient_simplex = _make_standard_simplex(dim)[:-1] 303 | 304 | with pytest.raises(ValueError): 305 | Triangulation(deficient_simplex) 306 | 307 | 308 | @with_dimension 309 | def test_initialisation_raises_when_points_coplanar(dim): 310 | zero_volume_simplex = _make_standard_simplex(dim)[:-1] 311 | 312 | new_point1 = np.average(zero_volume_simplex, axis=0) 313 | new_point2 = np.sum(zero_volume_simplex, axis=0) 314 | zero_volume_simplex = np.vstack((zero_volume_simplex, 315 | new_point1, new_point2)) 316 | 317 | with pytest.raises(ValueError): 318 | Triangulation(zero_volume_simplex) 319 | 320 | 321 | @with_dimension 322 | def test_initialisation_accepts_more_than_one_simplex(dim): 323 | points = _make_standard_simplex(dim) 324 | new_point = [1.1] * dim # Point oposing the origin but outside circumsphere 325 | points = np.vstack((points, new_point)) 326 | 327 | tri = Triangulation(points) 328 | 329 | simplex1 = tuple(range(dim+1)) 330 | simplex2 = tuple(range(1, dim+2)) 331 | 332 | _check_triangulation_is_valid(tri) 333 | 334 | assert tri.simplices == {simplex1, simplex2} 335 | -------------------------------------------------------------------------------- /adaptive/tests/test_learner1d.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import random 4 | import numpy as np 5 | 6 | from adaptive.learner import Learner1D 7 | from adaptive.learner.learner1D import curvature_loss_function 8 | from adaptive.runner import simple 9 | 10 | 11 | def test_pending_loss_intervals(): 12 | # https://gitlab.kwant-project.org/qt/adaptive/issues/99 13 | l = Learner1D(lambda x: x, (0, 4)) 14 | 15 | l.tell(0, 0) 16 | l.tell(1, 0) 17 | l.tell(2, 0) 18 | assert set(l.losses_combined.keys()) == {(0, 1), (1, 2)} 19 | l.ask(1) 20 | assert set(l.losses_combined.keys()) == {(0, 1), (1, 2), (2, 4)} 21 | l.tell(3.5, 0) 22 | assert set(l.losses_combined.keys()) == { 23 | (0, 1), (1, 2), (2, 3.5), (3.5, 4.0)} 24 | 25 | 26 | def test_loss_interpolation_for_unasked_point(): 27 | # https://gitlab.kwant-project.org/qt/adaptive/issues/99 28 | l = Learner1D(lambda x: x, (0, 4)) 29 | 30 | l.tell(0, 0) 31 | l.tell(1, 0) 32 | l.tell(2, 0) 33 | 34 | assert l.ask(1) == ([4], [np.inf]) 35 | assert l.losses == {(0, 1): 0.25, (1, 2): 0.25} 36 | assert l.losses_combined == {(0, 1): 0.25, (1, 2): 0.25, (2, 4.0): np.inf} 37 | 38 | # assert l.ask(1) == ([3], [np.inf]) # XXX: This doesn't return np.inf as loss_improvement... 39 | l.ask(1) 40 | assert l.losses == {(0, 1): 0.25, (1, 2): 0.25} 41 | assert l.losses_combined == { 42 | (0, 1): 0.25, (1, 2): 0.25, (2, 3.0): np.inf, (3.0, 4.0): np.inf} 43 | 44 | l.tell(4, 0) 45 | 46 | assert l.losses_combined == { 47 | (0, 1): 0.25, (1, 2): 0.25, (2, 3): 0.25, (3, 4): 0.25} 48 | 49 | 50 | def test_first_iteration(): 51 | """Edge cases where we ask for a few points at the start.""" 52 | learner = Learner1D(lambda x: None, (-1, 1)) 53 | points, loss_improvements = learner.ask(2) 54 | assert set(points) == set(learner.bounds) 55 | 56 | learner = Learner1D(lambda x: None, (-1, 1)) 57 | points, loss_improvements = learner.ask(3) 58 | assert set(points) == set([-1, 0, 1]) 59 | 60 | learner = Learner1D(lambda x: None, (-1, 1)) 61 | points, loss_improvements = learner.ask(1) 62 | assert len(points) == 1 and points[0] in learner.bounds 63 | rest = set([-1, 0, 1]) - set(points) 64 | points, loss_improvements = learner.ask(2) 65 | assert set(points) == set(rest) 66 | 67 | learner = Learner1D(lambda x: None, (-1, 1)) 68 | points, loss_improvements = learner.ask(1) 69 | to_see = set(learner.bounds) - set(points) 70 | points, loss_improvements = learner.ask(1) 71 | assert set(points) == set(to_see) 72 | 73 | learner = Learner1D(lambda x: None, (-1, 1)) 74 | learner.tell(1, 0) 75 | points, loss_improvements = learner.ask(1) 76 | assert points == [-1] 77 | 78 | learner = Learner1D(lambda x: None, (-1, 1)) 79 | learner.tell(-1, 0) 80 | points, loss_improvements = learner.ask(1) 81 | assert points == [1] 82 | 83 | 84 | def test_loss_interpolation(): 85 | learner = Learner1D(lambda _: 0, bounds=(-1, 1)) 86 | 87 | learner.tell(-1, 0) 88 | learner.tell(1, 0) 89 | for i in range(100): 90 | # Add a 100 points with either None or 0 91 | if random.random() < 0.9: 92 | learner.tell_pending(random.uniform(-1, 1)) 93 | else: 94 | learner.tell(random.uniform(-1, 1), 0) 95 | 96 | for (x1, x2), loss in learner.losses_combined.items(): 97 | expected_loss = (x2 - x1) / 2 98 | assert abs(expected_loss - loss) < 1e-15, (expected_loss, loss) 99 | 100 | 101 | def _run_on_discontinuity(x_0, bounds): 102 | 103 | def f(x): 104 | return -1 if x < x_0 else +1 105 | 106 | learner = Learner1D(f, bounds) 107 | while learner.loss() > 0.1: 108 | (x,), _ = learner.ask(1) 109 | learner.tell(x, learner.function(x)) 110 | 111 | return learner 112 | 113 | 114 | def test_termination_on_discontinuities(): 115 | 116 | learner = _run_on_discontinuity(0, (-1, 1)) 117 | smallest_interval = min(abs(a - b) for a, b in learner.losses.keys()) 118 | assert smallest_interval >= np.finfo(float).eps 119 | 120 | learner = _run_on_discontinuity(1, (-2, 2)) 121 | smallest_interval = min(abs(a - b) for a, b in learner.losses.keys()) 122 | assert smallest_interval >= np.finfo(float).eps 123 | 124 | learner = _run_on_discontinuity(0.5e3, (-1e3, 1e3)) 125 | smallest_interval = min(abs(a - b) for a, b in learner.losses.keys()) 126 | assert smallest_interval >= 0.5e3 * np.finfo(float).eps 127 | 128 | 129 | def test_order_adding_points(): 130 | # and https://gitlab.kwant-project.org/qt/adaptive/issues/98 131 | l = Learner1D(lambda x: x, (0, 1)) 132 | l.tell_many([1, 0, 0.5], [0, 0, 0]) 133 | assert l.losses_combined == {(0, 0.5): 0.5, (0.5, 1): 0.5} 134 | assert l.losses == {(0, 0.5): 0.5, (0.5, 1): 0.5} 135 | l.ask(1) 136 | 137 | 138 | def test_adding_existing_point_passes_silently(): 139 | # See https://gitlab.kwant-project.org/qt/adaptive/issues/97 140 | l = Learner1D(lambda x: x, (0, 4)) 141 | l.tell(0, 0) 142 | l.tell(1, 0) 143 | l.tell(2, 0) 144 | l.tell(1, 100) 145 | 146 | 147 | def test_loss_at_machine_precision_interval_is_zero(): 148 | """The loss of an interval smaller than _dx_eps 149 | should be set to zero.""" 150 | def f(x): 151 | return 1 if x == 0 else 0 152 | 153 | def goal(l): 154 | return l.loss() < 0.01 or l.npoints >= 1000 155 | 156 | learner = Learner1D(f, bounds=(-1, 1)) 157 | simple(learner, goal=goal) 158 | 159 | # this means loss < 0.01 was reached 160 | assert learner.npoints != 1000 161 | 162 | 163 | def small_deviations(x): 164 | return 0 if x <= 1 else 1 + 10**(-random.randint(12, 14)) 165 | 166 | 167 | def test_small_deviations(): 168 | """This tests whether the Learner1D can handle small deviations. 169 | See https://gitlab.kwant-project.org/qt/adaptive/merge_requests/73 and 170 | https://gitlab.kwant-project.org/qt/adaptive/issues/61.""" 171 | 172 | eps = 5e-14 173 | learner = Learner1D(small_deviations, bounds=(1 - eps, 1 + eps)) 174 | 175 | # Some non-determinism is needed to make this test fail so we keep 176 | # a list of points that will be evaluated later to emulate 177 | # parallel execution 178 | stash = [] 179 | 180 | for i in range(100): 181 | xs, _ = learner.ask(10) 182 | 183 | # Save 5 random points out of `xs` for later 184 | random.shuffle(xs) 185 | for _ in range(5): 186 | stash.append(xs.pop()) 187 | 188 | for x in xs: 189 | learner.tell(x, learner.function(x)) 190 | 191 | # Evaluate and add 5 random points from `stash` 192 | random.shuffle(stash) 193 | for _ in range(5): 194 | learner.tell(stash.pop(), learner.function(x)) 195 | 196 | if learner.loss() == 0: 197 | # If this condition is met, the learner can't return any 198 | # more points. 199 | break 200 | 201 | 202 | def test_uniform_sampling1D_v2(): 203 | def check(known, expect): 204 | def f(x): 205 | return x 206 | learner = Learner1D(f, bounds=(-1, 1)) 207 | for x in known: 208 | learner.tell(x, f(x)) 209 | pts, _ = learner.ask(len(expect)) 210 | assert set(pts) == expect 211 | 212 | check([-1, 0, 1], {-0.5, 0.5}) 213 | check([-1, -0.5, 1], {0, 0.5}) 214 | check([-1, 0.5, 1], {-0.5, 0}) 215 | check([-1, 0], {1}) 216 | # although the following test might be unexpected, this is indeed correct 217 | # given the default loss function 218 | check([-1, 0], {-.5, 1}) 219 | check([-1, -.5], {-.75, 1}) 220 | check([-1, -.5], {-.75, .25, 1}) 221 | 222 | 223 | def test_add_data_unordered(): 224 | # see https://gitlab.kwant-project.org/qt/adaptive/issues/95 225 | learner = Learner1D(lambda x: x, bounds=(-1, 1)) 226 | xs = [-1, 1, 0] 227 | 228 | ys = [learner.function(x) for x in xs] 229 | for x, y in zip(xs, ys): 230 | learner.tell(x, y) 231 | 232 | learner.ask(3) 233 | 234 | 235 | def test_ask_does_not_return_known_points_when_returning_bounds(): 236 | learner = Learner1D(lambda x: None, (-1, 1)) 237 | learner.tell(0, 0) 238 | points, _ = learner.ask(3) 239 | assert 0 not in points 240 | 241 | 242 | def test_tell_many(): 243 | def f(x, offset=0.123214): 244 | a = 0.01 245 | return (np.sin(x**2) + np.sin(x**5) 246 | + a**2 / (a**2 + (x - offset)**2) 247 | + x**2 + 1e-5 * x**3) 248 | 249 | def f_vec(x, offset=0.123214): 250 | a = 0.01 251 | y = x + a**2 / (a**2 + (x - offset)**2) 252 | return [y, 0.5 * y, y**2] 253 | 254 | def assert_equal_dicts(d1, d2): 255 | xs1, ys1 = zip(*sorted(d1.items())) 256 | xs2, ys2 = zip(*sorted(d2.items())) 257 | ys1 = np.array(ys1, dtype=np.float) 258 | ys2 = np.array(ys2, dtype=np.float) 259 | np.testing.assert_almost_equal(xs1, xs2) 260 | np.testing.assert_almost_equal(ys1, ys2) 261 | 262 | def test_equal(l1, l2): 263 | assert_equal_dicts(l1.neighbors, l2.neighbors) 264 | assert_equal_dicts(l1.neighbors_combined, l2.neighbors_combined) 265 | assert_equal_dicts(l1.data, l2.data) 266 | assert_equal_dicts(l2.losses, l1.losses) 267 | assert_equal_dicts(l2.losses_combined, l1.losses_combined) 268 | np.testing.assert_almost_equal(sorted(l1.pending_points), 269 | sorted(l2.pending_points)) 270 | np.testing.assert_almost_equal(l1._bbox[1], l1._bbox[1]) 271 | assert l1._scale == l2._scale 272 | assert l1._bbox[0] == l2._bbox[0] 273 | 274 | for function in [f, f_vec]: 275 | learner = Learner1D(function, bounds=(-1, 1)) 276 | learner2 = Learner1D(function, bounds=(-1, 1)) 277 | simple(learner, goal=lambda l: l.npoints > 200) 278 | xs, ys = zip(*learner.data.items()) 279 | 280 | # Make the scale huge to no get a scale doubling 281 | x = 1e-6 282 | max_value = 1e6 if learner.vdim == 1 else np.array(learner.vdim * [1e6]) 283 | learner.tell(x, max_value) 284 | learner2.tell(x, max_value) 285 | 286 | for x in xs: 287 | learner2.tell_pending(x) 288 | 289 | learner2.tell_many(xs, ys) 290 | test_equal(learner, learner2) 291 | 292 | # Test non-determinism. We keep a list of points that will be 293 | # evaluated later to emulate parallel execution. 294 | def _random_run(learner, learner2, scale_doubling=True): 295 | if not scale_doubling: 296 | # Make the scale huge to no get a scale doubling 297 | x = 1e-6 298 | max_value = 1e6 299 | learner.tell(x, max_value) 300 | learner2.tell(x, max_value) 301 | 302 | stash = [] 303 | for i in range(10): 304 | xs, _ = learner.ask(10) 305 | for x in xs: 306 | learner2.tell_pending(x) 307 | 308 | # Save 5 random points out of `xs` for later 309 | random.shuffle(xs) 310 | for _ in range(5): 311 | stash.append(xs.pop()) 312 | 313 | ys = [learner.function(x) for x in xs] 314 | 315 | learner.tell_many(xs, ys, force=True) 316 | for x, y in zip(xs, ys): 317 | learner2.tell(x, y) 318 | 319 | # Evaluate and add N random points from `stash` 320 | random.shuffle(stash) 321 | xs = [stash.pop() for _ in range(random.randint(1, 5))] 322 | ys = [learner.function(x) for x in xs] 323 | 324 | learner.tell_many(xs, ys, force=True) 325 | for x, y in zip(xs, ys): 326 | learner2.tell(x, y) 327 | 328 | if scale_doubling: 329 | # Double the scale to trigger the loss updates 330 | max_value = max(learner.data.values()) 331 | x = 1e-6 332 | learner.tell(x, max_value * 10) 333 | learner2.tell(x, max_value * 10) 334 | 335 | learner = Learner1D(f, bounds=(-1, 1)) 336 | learner2 = Learner1D(f, bounds=(-1, 1)) 337 | _random_run(learner, learner2, scale_doubling=False) 338 | test_equal(learner, learner2) 339 | 340 | learner = Learner1D(f, bounds=(-1, 1)) 341 | learner2 = Learner1D(f, bounds=(-1, 1)) 342 | _random_run(learner, learner2, scale_doubling=True) 343 | test_equal(learner, learner2) 344 | 345 | 346 | def test_curvature_loss(): 347 | def f(x): 348 | return np.tanh(20*x) 349 | 350 | loss = curvature_loss_function() 351 | assert loss.nth_neighbors == 1 352 | learner = Learner1D(f, (-1, 1), loss_per_interval=loss) 353 | simple(learner, goal=lambda l: l.npoints > 100) 354 | assert learner.npoints > 100 355 | 356 | 357 | def test_curvature_loss_vectors(): 358 | def f(x): 359 | return np.tanh(20*x), np.tanh(20*(x-0.4)) 360 | 361 | loss = curvature_loss_function() 362 | assert loss.nth_neighbors == 1 363 | learner = Learner1D(f, (-1, 1), loss_per_interval=loss) 364 | simple(learner, goal=lambda l: l.npoints > 100) 365 | assert learner.npoints > 100 366 | -------------------------------------------------------------------------------- /docs/source/tutorial/tutorial.advanced-topics.rst: -------------------------------------------------------------------------------- 1 | Advanced Topics 2 | =============== 3 | 4 | .. note:: 5 | Because this documentation consists of static html, the ``live_plot`` 6 | and ``live_info`` widget is not live. Download the notebook 7 | in order to see the real behaviour. 8 | 9 | .. seealso:: 10 | The complete source code of this tutorial can be found in 11 | :jupyter-download:notebook:`tutorial.advanced-topics` 12 | 13 | .. jupyter-execute:: 14 | :hide-code: 15 | 16 | import adaptive 17 | adaptive.notebook_extension() 18 | 19 | import asyncio 20 | from functools import partial 21 | import random 22 | 23 | offset = random.uniform(-0.5, 0.5) 24 | 25 | def f(x, offset=offset): 26 | a = 0.01 27 | return x + a**2 / (a**2 + (x - offset)**2) 28 | 29 | 30 | Saving and loading learners 31 | --------------------------- 32 | 33 | Every learner has a `~adaptive.BaseLearner.save` and `~adaptive.BaseLearner.load` 34 | method that can be used to save and load **only** the data of a learner. 35 | 36 | Use the ``fname`` argument in ``learner.save(fname=...)``. 37 | 38 | Or, when using a `~adaptive.BalancingLearner` one can use either a callable 39 | that takes the child learner and returns a filename **or** a list of filenames. 40 | 41 | By default the resulting pickle files are compressed, to turn this off 42 | use ``learner.save(fname=..., compress=False)`` 43 | 44 | .. jupyter-execute:: 45 | 46 | # Let's create two learners and run only one. 47 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 48 | control = adaptive.Learner1D(f, bounds=(-1, 1)) 49 | 50 | # Let's only run the learner 51 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) 52 | 53 | .. jupyter-execute:: 54 | :hide-code: 55 | 56 | await runner.task # This is not needed in a notebook environment! 57 | 58 | .. jupyter-execute:: 59 | 60 | runner.live_info() 61 | 62 | .. jupyter-execute:: 63 | 64 | fname = 'data/example_file.p' 65 | learner.save(fname) 66 | control.load(fname) 67 | 68 | (learner.plot().relabel('saved learner') 69 | + control.plot().relabel('loaded learner')) 70 | 71 | Or just (without saving): 72 | 73 | .. jupyter-execute:: 74 | 75 | control = adaptive.Learner1D(f, bounds=(-1, 1)) 76 | control.copy_from(learner) 77 | 78 | One can also periodically save the learner while running in a 79 | `~adaptive.Runner`. Use it like: 80 | 81 | .. jupyter-execute:: 82 | 83 | def slow_f(x): 84 | from time import sleep 85 | sleep(5) 86 | return x 87 | 88 | learner = adaptive.Learner1D(slow_f, bounds=[0, 1]) 89 | runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100) 90 | runner.start_periodic_saving(save_kwargs=dict(fname='data/periodic_example.p'), interval=6) 91 | 92 | .. jupyter-execute:: 93 | :hide-code: 94 | 95 | await asyncio.sleep(6) # This is not needed in a notebook environment! 96 | runner.cancel() 97 | 98 | .. jupyter-execute:: 99 | 100 | runner.live_info() # we cancelled it after 6 seconds 101 | 102 | .. jupyter-execute:: 103 | 104 | # See the data 6 later seconds with 105 | !ls -lah data # only works on macOS and Linux systems 106 | 107 | 108 | A watched pot never boils! 109 | -------------------------- 110 | 111 | `adaptive.Runner` does its work in an `asyncio` task that runs 112 | concurrently with the IPython kernel, when using ``adaptive`` from a 113 | Jupyter notebook. This is advantageous because it allows us to do things 114 | like live-updating plots, however it can trip you up if you’re not 115 | careful. 116 | 117 | Notably: **if you block the IPython kernel, the runner will not do any 118 | work**. 119 | 120 | For example if you wanted to wait for a runner to complete, **do not 121 | wait in a busy loop**: 122 | 123 | .. code:: python 124 | 125 | while not runner.task.done(): 126 | pass 127 | 128 | If you do this then **the runner will never finish**. 129 | 130 | What to do if you don’t care about live plotting, and just want to run 131 | something until its done? 132 | 133 | The simplest way to accomplish this is to use 134 | `adaptive.BlockingRunner`: 135 | 136 | .. jupyter-execute:: 137 | 138 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 139 | adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) 140 | # This will only get run after the runner has finished 141 | learner.plot() 142 | 143 | Reproducibility 144 | --------------- 145 | 146 | By default ``adaptive`` runners evaluate the learned function in 147 | parallel across several cores. The runners are also opportunistic, in 148 | that as soon as a result is available they will feed it to the learner 149 | and request another point to replace the one that just finished. 150 | 151 | Because the order in which computations complete is non-deterministic, 152 | this means that the runner behaves in a non-deterministic way. Adaptive 153 | makes this choice because in many cases the speedup from parallel 154 | execution is worth sacrificing the “purity” of exactly reproducible 155 | computations. 156 | 157 | Nevertheless it is still possible to run a learner in a deterministic 158 | way with adaptive. 159 | 160 | The simplest way is to use `adaptive.runner.simple` to run your 161 | learner: 162 | 163 | .. jupyter-execute:: 164 | 165 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 166 | 167 | # blocks until completion 168 | adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) 169 | 170 | learner.plot() 171 | 172 | Note that unlike `adaptive.Runner`, `adaptive.runner.simple` 173 | *blocks* until it is finished. 174 | 175 | If you want to enable determinism, want to continue using the 176 | non-blocking `adaptive.Runner`, you can use the 177 | `adaptive.runner.SequentialExecutor`: 178 | 179 | .. jupyter-execute:: 180 | 181 | from adaptive.runner import SequentialExecutor 182 | 183 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 184 | 185 | runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01) 186 | 187 | .. jupyter-execute:: 188 | :hide-code: 189 | 190 | await runner.task # This is not needed in a notebook environment! 191 | 192 | .. jupyter-execute:: 193 | 194 | runner.live_info() 195 | 196 | .. jupyter-execute:: 197 | 198 | runner.live_plot(update_interval=0.1) 199 | 200 | Cancelling a runner 201 | ------------------- 202 | 203 | Sometimes you want to interactively explore a parameter space, and want 204 | the function to be evaluated at finer and finer resolution and manually 205 | control when the calculation stops. 206 | 207 | If no ``goal`` is provided to a runner then the runner will run until 208 | cancelled. 209 | 210 | ``runner.live_info()`` will provide a button that can be clicked to stop 211 | the runner. You can also stop the runner programatically using 212 | ``runner.cancel()``. 213 | 214 | .. jupyter-execute:: 215 | 216 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 217 | runner = adaptive.Runner(learner) 218 | 219 | .. jupyter-execute:: 220 | :hide-code: 221 | 222 | await asyncio.sleep(0.1) # This is not needed in the notebook! 223 | 224 | .. jupyter-execute:: 225 | 226 | runner.cancel() # Let's execute this after 0.1 seconds 227 | 228 | .. jupyter-execute:: 229 | 230 | runner.live_info() 231 | 232 | .. jupyter-execute:: 233 | 234 | runner.live_plot(update_interval=0.1) 235 | 236 | .. jupyter-execute:: 237 | 238 | print(runner.status()) 239 | 240 | Debugging Problems 241 | ------------------ 242 | 243 | Runners work in the background with respect to the IPython kernel, which 244 | makes it convenient, but also means that inspecting errors is more 245 | difficult because exceptions will not be raised directly in the 246 | notebook. Often the only indication you will have that something has 247 | gone wrong is that nothing will be happening. 248 | 249 | Let’s look at the following example, where the function to be learned 250 | will raise an exception 10% of the time. 251 | 252 | .. jupyter-execute:: 253 | 254 | def will_raise(x): 255 | from random import random 256 | from time import sleep 257 | 258 | sleep(random()) 259 | if random() < 0.1: 260 | raise RuntimeError('something went wrong!') 261 | return x**2 262 | 263 | learner = adaptive.Learner1D(will_raise, (-1, 1)) 264 | runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled 265 | 266 | 267 | .. jupyter-execute:: 268 | :hide-code: 269 | 270 | await asyncio.sleep(4) # in 4 seconds it will surely have failed 271 | 272 | .. jupyter-execute:: 273 | 274 | runner.live_info() 275 | 276 | .. jupyter-execute:: 277 | 278 | runner.live_plot() 279 | 280 | The above runner should continue forever, but we notice that it stops 281 | after a few points are evaluated. 282 | 283 | First we should check that the runner has really finished: 284 | 285 | .. jupyter-execute:: 286 | 287 | runner.task.done() 288 | 289 | If it has indeed finished then we should check the ``result`` of the 290 | runner. This should be ``None`` if the runner stopped successfully. If 291 | the runner stopped due to an exception then asking for the result will 292 | raise the exception with the stack trace: 293 | 294 | .. jupyter-execute:: 295 | 296 | runner.task.result() 297 | 298 | 299 | You can also check ``runner.tracebacks`` which is a mapping from 300 | point → traceback. 301 | 302 | .. jupyter-execute:: 303 | 304 | for point, tb in runner.tracebacks.items(): 305 | print(f'point: {point}:\n {tb}') 306 | 307 | Logging runners 308 | ~~~~~~~~~~~~~~~ 309 | 310 | Runners do their job in the background, which makes introspection quite 311 | cumbersome. One way to inspect runners is to instantiate one with 312 | ``log=True``: 313 | 314 | .. jupyter-execute:: 315 | 316 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 317 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, 318 | log=True) 319 | 320 | .. jupyter-execute:: 321 | :hide-code: 322 | 323 | await runner.task # This is not needed in a notebook environment! 324 | 325 | .. jupyter-execute:: 326 | 327 | runner.live_info() 328 | 329 | This gives a the runner a ``log`` attribute, which is a list of the 330 | ``learner`` methods that were called, as well as their arguments. This 331 | is useful because executors typically execute their tasks in a 332 | non-deterministic order. 333 | 334 | This can be used with `adaptive.runner.replay_log` to perfom the same 335 | set of operations on another runner: 336 | 337 | .. jupyter-execute:: 338 | 339 | reconstructed_learner = adaptive.Learner1D(f, bounds=learner.bounds) 340 | adaptive.runner.replay_log(reconstructed_learner, runner.log) 341 | 342 | .. jupyter-execute:: 343 | 344 | learner.plot().Scatter.I.opts(style=dict(size=6)) * reconstructed_learner.plot() 345 | 346 | Adding coroutines 347 | ----------------- 348 | 349 | In the following example we'll add a `~asyncio.Task` that times the runner. 350 | This is *only* for demonstration purposes because one can simply 351 | check ``runner.elapsed_time()`` or use the ``runner.live_info()`` 352 | widget to see the time since the runner has started. 353 | 354 | So let's get on with the example. To time the runner 355 | you **cannot** simply use 356 | 357 | .. code:: python 358 | 359 | now = datetime.now() 360 | runner = adaptive.Runner(...) 361 | print(datetime.now() - now) 362 | 363 | because this will be done immediately. Also blocking the kernel with 364 | ``while not runner.task.done()`` will not work because the runner will 365 | not do anything when the kernel is blocked. 366 | 367 | Therefore you need to create an ``async`` function and hook it into the 368 | ``ioloop`` like so: 369 | 370 | .. jupyter-execute:: 371 | 372 | import asyncio 373 | 374 | async def time(runner): 375 | from datetime import datetime 376 | now = datetime.now() 377 | await runner.task 378 | return datetime.now() - now 379 | 380 | ioloop = asyncio.get_event_loop() 381 | 382 | learner = adaptive.Learner1D(f, bounds=(-1, 1)) 383 | runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) 384 | 385 | timer = ioloop.create_task(time(runner)) 386 | 387 | .. jupyter-execute:: 388 | :hide-code: 389 | 390 | await runner.task # This is not needed in a notebook environment! 391 | 392 | .. jupyter-execute:: 393 | 394 | # The result will only be set when the runner is done. 395 | timer.result() 396 | 397 | Using Runners from a script 398 | --------------------------- 399 | 400 | Runners can also be used from a Python script independently of the 401 | notebook. 402 | 403 | The simplest way to accomplish this is simply to use the 404 | `~adaptive.BlockingRunner`: 405 | 406 | .. code:: python 407 | 408 | import adaptive 409 | 410 | def f(x): 411 | return x 412 | 413 | learner = adaptive.Learner1D(f, (-1, 1)) 414 | 415 | adaptive.BlockingRunner(learner, goal=lambda: l: l.loss() < 0.1) 416 | 417 | If you use `asyncio` already in your script and want to integrate 418 | ``adaptive`` into it, then you can use the default `~adaptive.Runner` as you 419 | would from a notebook. If you want to wait for the runner to finish, 420 | then you can simply 421 | 422 | .. code:: python 423 | 424 | await runner.task 425 | 426 | from within a coroutine. 427 | -------------------------------------------------------------------------------- /adaptive/learner/balancing_learner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from collections import defaultdict, Iterable 4 | from contextlib import suppress 5 | from functools import partial 6 | from operator import itemgetter 7 | import os.path 8 | 9 | import numpy as np 10 | 11 | from adaptive.learner.base_learner import BaseLearner 12 | from adaptive.notebook_integration import ensure_holoviews 13 | from adaptive.utils import cache_latest, named_product, restore 14 | 15 | 16 | def dispatch(child_functions, arg): 17 | index, x = arg 18 | return child_functions[index](x) 19 | 20 | 21 | class BalancingLearner(BaseLearner): 22 | """Choose the optimal points from a set of learners. 23 | 24 | Parameters 25 | ---------- 26 | learners : sequence of `~adaptive.BaseLearner`\s 27 | The learners from which to choose. These must all have the same type. 28 | cdims : sequence of dicts, or (keys, iterable of values), optional 29 | Constant dimensions; the parameters that label the learners. Used 30 | in `plot`. 31 | Example inputs that all give identical results: 32 | 33 | - sequence of dicts: 34 | 35 | >>> cdims = [{'A': True, 'B': 0}, 36 | ... {'A': True, 'B': 1}, 37 | ... {'A': False, 'B': 0}, 38 | ... {'A': False, 'B': 1}]` 39 | 40 | - tuple with (keys, iterable of values): 41 | 42 | >>> cdims = (['A', 'B'], itertools.product([True, False], [0, 1])) 43 | >>> cdims = (['A', 'B'], [(True, 0), (True, 1), 44 | ... (False, 0), (False, 1)]) 45 | 46 | Attributes 47 | ---------- 48 | learners : list 49 | The sequence of `~adaptive.BaseLearner`\s. 50 | function : callable 51 | A function that calls the functions of the underlying learners. 52 | Its signature is ``function(learner_index, point)``. 53 | strategy : 'loss_improvements' (default), 'loss', or 'npoints' 54 | The points that the `BalancingLearner` choses can be either based on: 55 | the best 'loss_improvements', the smallest total 'loss' of the 56 | child learners, or the number of points per learner, using 'npoints'. 57 | One can dynamically change the strategy while the simulation is 58 | running by changing the ``learner.strategy`` attribute. 59 | 60 | Notes 61 | ----- 62 | This learner compares the `loss` calculated from the "child" learners. 63 | This requires that the 'loss' from different learners *can be meaningfully 64 | compared*. For the moment we enforce this restriction by requiring that 65 | all learners are the same type but (depending on the internals of the 66 | learner) it may be that the loss cannot be compared *even between learners 67 | of the same type*. In this case the `~adaptive.BalancingLearner` will 68 | behave in an undefined way. Change the `strategy` in that case. 69 | """ 70 | 71 | def __init__(self, learners, *, cdims=None, strategy='loss_improvements'): 72 | self.learners = learners 73 | 74 | # Naively we would make 'function' a method, but this causes problems 75 | # when using executors from 'concurrent.futures' because we have to 76 | # pickle the whole learner. 77 | self.function = partial(dispatch, [l.function for l in self.learners]) 78 | 79 | self._points = {} 80 | self._loss = {} 81 | self._pending_loss = {} 82 | self._cdims_default = cdims 83 | 84 | if len(set(learner.__class__ for learner in self.learners)) > 1: 85 | raise TypeError('A BalacingLearner can handle only one type' 86 | ' of learners.') 87 | 88 | self.strategy = strategy 89 | 90 | @property 91 | def strategy(self): 92 | """Can be either 'loss_improvements' (default), 'loss', or 'npoints' 93 | The points that the `BalancingLearner` choses can be either based on: 94 | the best 'loss_improvements', the smallest total 'loss' of the 95 | child learners, or the number of points per learner, using 'npoints'. 96 | One can dynamically change the strategy while the simulation is 97 | running by changing the ``learner.strategy`` attribute.""" 98 | return self._strategy 99 | 100 | @strategy.setter 101 | def strategy(self, strategy): 102 | self._strategy = strategy 103 | if strategy == 'loss_improvements': 104 | self._ask_and_tell = self._ask_and_tell_based_on_loss_improvements 105 | elif strategy == 'loss': 106 | self._ask_and_tell = self._ask_and_tell_based_on_loss 107 | elif strategy == 'npoints': 108 | self._ask_and_tell = self._ask_and_tell_based_on_npoints 109 | else: 110 | raise ValueError( 111 | 'Only strategy="loss_improvements", strategy="loss", or' 112 | ' strategy="npoints" is implemented.') 113 | 114 | def _ask_and_tell_based_on_loss_improvements(self, n): 115 | points = [] 116 | loss_improvements = [] 117 | for _ in range(n): 118 | improvements_per_learner = [] 119 | pairs = [] 120 | for index, learner in enumerate(self.learners): 121 | if index not in self._points: 122 | self._points[index] = learner.ask( 123 | n=1, tell_pending=False) 124 | point, loss_improvement = self._points[index] 125 | improvements_per_learner.append(loss_improvement[0]) 126 | pairs.append((index, point[0])) 127 | x, l = max(zip(pairs, improvements_per_learner), 128 | key=itemgetter(1)) 129 | points.append(x) 130 | loss_improvements.append(l) 131 | self.tell_pending(x) 132 | 133 | return points, loss_improvements 134 | 135 | def _ask_and_tell_based_on_loss(self, n): 136 | points = [] 137 | loss_improvements = [] 138 | for _ in range(n): 139 | losses = self._losses(real=False) 140 | max_ind = np.argmax(losses) 141 | xs, ls = self.learners[max_ind].ask(1) 142 | points.append((max_ind, xs[0])) 143 | loss_improvements.append(ls[0]) 144 | return points, loss_improvements 145 | 146 | def _ask_and_tell_based_on_npoints(self, n): 147 | points = [] 148 | loss_improvements = [] 149 | npoints = [l.npoints + len(l.pending_points) 150 | for l in self.learners] 151 | n_left = n 152 | while n_left > 0: 153 | i = np.argmin(npoints) 154 | xs, ls = self.learners[i].ask(1) 155 | npoints[i] += 1 156 | n_left -= 1 157 | points.append((i, xs[0])) 158 | loss_improvements.append(ls[0]) 159 | return points, loss_improvements 160 | 161 | def ask(self, n, tell_pending=True): 162 | """Chose points for learners.""" 163 | if not tell_pending: 164 | with restore(*self.learners): 165 | return self._ask_and_tell(n) 166 | else: 167 | return self._ask_and_tell(n) 168 | 169 | def tell(self, x, y): 170 | index, x = x 171 | self._points.pop(index, None) 172 | self._loss.pop(index, None) 173 | self._pending_loss.pop(index, None) 174 | self.learners[index].tell(x, y) 175 | 176 | def tell_pending(self, x): 177 | index, x = x 178 | self._points.pop(index, None) 179 | self._loss.pop(index, None) 180 | self.learners[index].tell_pending(x) 181 | 182 | def _losses(self, real=True): 183 | losses = [] 184 | loss_dict = self._loss if real else self._pending_loss 185 | 186 | for index, learner in enumerate(self.learners): 187 | if index not in loss_dict: 188 | loss_dict[index] = learner.loss(real) 189 | losses.append(loss_dict[index]) 190 | 191 | return losses 192 | 193 | @cache_latest 194 | def loss(self, real=True): 195 | losses = self._losses(real) 196 | return max(losses) 197 | 198 | def plot(self, cdims=None, plotter=None, dynamic=True): 199 | """Returns a DynamicMap with sliders. 200 | 201 | Parameters 202 | ---------- 203 | cdims : sequence of dicts, or (keys, iterable of values), optional 204 | Constant dimensions; the parameters that label the learners. 205 | Example inputs that all give identical results: 206 | 207 | - sequence of dicts: 208 | 209 | >>> cdims = [{'A': True, 'B': 0}, 210 | ... {'A': True, 'B': 1}, 211 | ... {'A': False, 'B': 0}, 212 | ... {'A': False, 'B': 1}]` 213 | 214 | - tuple with (keys, iterable of values): 215 | 216 | >>> cdims = (['A', 'B'], itertools.product([True, False], [0, 1])) 217 | >>> cdims = (['A', 'B'], [(True, 0), (True, 1), 218 | ... (False, 0), (False, 1)]) 219 | 220 | plotter : callable, optional 221 | A function that takes the learner as a argument and returns a 222 | holoviews object. By default ``learner.plot()`` will be called. 223 | dynamic : bool, default True 224 | Return a `holoviews.core.DynamicMap` if True, else a 225 | `holoviews.core.HoloMap`. The `~holoviews.core.DynamicMap` is 226 | rendered as the sliders change and can therefore not be exported 227 | to html. The `~holoviews.core.HoloMap` does not have this problem. 228 | 229 | Returns 230 | ------- 231 | dm : `holoviews.core.DynamicMap` (default) or `holoviews.core.HoloMap` 232 | A `DynamicMap` ``(dynamic=True)`` or `HoloMap` 233 | ``(dynamic=False)`` with sliders that are defined by `cdims`. 234 | """ 235 | hv = ensure_holoviews() 236 | cdims = cdims or self._cdims_default 237 | 238 | if cdims is None: 239 | cdims = [{'i': i} for i in range(len(self.learners))] 240 | elif not isinstance(cdims[0], dict): 241 | # Normalize the format 242 | keys, values_list = cdims 243 | cdims = [dict(zip(keys, values)) for values in values_list] 244 | 245 | mapping = {tuple(_cdims.values()): l for l, _cdims in zip(self.learners, cdims)} 246 | 247 | d = defaultdict(list) 248 | for _cdims in cdims: 249 | for k, v in _cdims.items(): 250 | d[k].append(v) 251 | 252 | def plot_function(*args): 253 | with suppress(KeyError): 254 | learner = mapping[tuple(args)] 255 | return learner.plot() if plotter is None else plotter(learner) 256 | 257 | dm = hv.DynamicMap(plot_function, kdims=list(d.keys())) 258 | dm = dm.redim.values(**d) 259 | 260 | if dynamic: 261 | return dm 262 | else: 263 | # XXX: change when https://github.com/ioam/holoviews/issues/3085 264 | # is fixed. 265 | vals = {d.name: d.values for d in dm.dimensions() if d.values} 266 | return hv.HoloMap(dm.select(**vals)) 267 | 268 | def remove_unfinished(self): 269 | """Remove uncomputed data from the learners.""" 270 | for learner in self.learners: 271 | learner.remove_unfinished() 272 | 273 | @classmethod 274 | def from_product(cls, f, learner_type, learner_kwargs, combos): 275 | """Create a `BalancingLearner` with learners of all combinations of 276 | named variables’ values. The `cdims` will be set correctly, so calling 277 | `learner.plot` will be a `holoviews.core.HoloMap` with the correct labels. 278 | 279 | Parameters 280 | ---------- 281 | f : callable 282 | Function to learn, must take arguments provided in in `combos`. 283 | learner_type : `BaseLearner` 284 | The learner that should wrap the function. For example `Learner1D`. 285 | learner_kwargs : dict 286 | Keyword argument for the `learner_type`. For example `dict(bounds=[0, 1])`. 287 | combos : dict (mapping individual fn arguments -> sequence of values) 288 | For all combinations of each argument a learner will be instantiated. 289 | 290 | Returns 291 | ------- 292 | learner : `BalancingLearner` 293 | A `BalancingLearner` with learners of all combinations of `combos` 294 | 295 | Example 296 | ------- 297 | >>> def f(x, n, alpha, beta): 298 | ... return scipy.special.eval_jacobi(n, alpha, beta, x) 299 | 300 | >>> combos = { 301 | ... 'n': [1, 2, 4, 8, 16], 302 | ... 'alpha': np.linspace(0, 2, 3), 303 | ... 'beta': np.linspace(0, 1, 5), 304 | ... } 305 | 306 | >>> learner = BalancingLearner.from_product( 307 | ... f, Learner1D, dict(bounds=(0, 1)), combos) 308 | 309 | Notes 310 | ----- 311 | The order of the child learners inside `learner.learners` is the same 312 | as ``adaptive.utils.named_product(**combos)``. 313 | """ 314 | learners = [] 315 | arguments = named_product(**combos) 316 | for combo in arguments: 317 | learner = learner_type(function=partial(f, **combo), **learner_kwargs) 318 | learners.append(learner) 319 | return cls(learners, cdims=arguments) 320 | 321 | def save(self, fname, compress=True): 322 | """Save the data of the child learners into pickle files 323 | in a directory. 324 | 325 | Parameters 326 | ---------- 327 | fname: callable or sequence of strings 328 | Given a learner, returns a filename into which to save the data. 329 | Or a list (or iterable) with filenames. 330 | compress : bool, default True 331 | Compress the data upon saving using `gzip`. When saving 332 | using compression, one must load it with compression too. 333 | 334 | Example 335 | ------- 336 | >>> def combo_fname(learner): 337 | ... val = learner.function.keywords # because functools.partial 338 | ... fname = '__'.join([f'{k}_{v}.pickle' for k, v in val.items()]) 339 | ... return 'data_folder/' + fname 340 | >>> 341 | >>> def f(x, a, b): return a * x**2 + b 342 | >>> 343 | >>> learners = [Learner1D(functools.partial(f, **combo), (-1, 1)) 344 | ... for combo in adaptive.utils.named_product(a=[1, 2], b=[1])] 345 | >>> 346 | >>> learner = BalancingLearner(learners) 347 | >>> # Run the learner 348 | >>> runner = adaptive.Runner(learner) 349 | >>> # Then save 350 | >>> learner.save(combo_fname) # use 'load' in the same way 351 | """ 352 | if isinstance(fname, Iterable): 353 | for l, _fname in zip(self.learners, fname): 354 | l.save(_fname, compress=compress) 355 | else: 356 | for l in self.learners: 357 | l.save(fname(l), compress=compress) 358 | 359 | def load(self, fname, compress=True): 360 | """Load the data of the child learners from pickle files 361 | in a directory. 362 | 363 | Parameters 364 | ---------- 365 | fname: callable or sequence of strings 366 | Given a learner, returns a filename from which to load the data. 367 | Or a list (or iterable) with filenames. 368 | compress : bool, default True 369 | If the data is compressed when saved, one must load it 370 | with compression too. 371 | 372 | Example 373 | ------- 374 | See the example in the `BalancingLearner.save` doc-string. 375 | """ 376 | if isinstance(fname, Iterable): 377 | for l, _fname in zip(self.learners, fname): 378 | l.load(_fname, compress=compress) 379 | else: 380 | for l in self.learners: 381 | l.load(fname(l), compress=compress) 382 | 383 | def _get_data(self): 384 | return [l._get_data() for l in learner.learners] 385 | 386 | def _set_data(self, data): 387 | for l, _data in zip(self.learners, data): 388 | l._set_data(_data) 389 | -------------------------------------------------------------------------------- /adaptive/tests/test_learners.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import collections 4 | import functools as ft 5 | import inspect 6 | import itertools as it 7 | import math 8 | import operator 9 | import os 10 | import random 11 | import shutil 12 | import tempfile 13 | 14 | import numpy as np 15 | import pytest 16 | import scipy.spatial 17 | 18 | import adaptive 19 | from adaptive.learner import (AverageLearner, BalancingLearner, DataSaver, 20 | IntegratorLearner, Learner1D, Learner2D, LearnerND) 21 | from adaptive.runner import simple 22 | 23 | 24 | try: 25 | import skopt 26 | from adaptive.learner import SKOptLearner 27 | except ModuleNotFoundError: 28 | SKOptLearner = None 29 | 30 | 31 | LOSS_FUNCTIONS = { 32 | Learner1D: ('loss_per_interval', ( 33 | adaptive.learner.learner1D.default_loss, 34 | adaptive.learner.learner1D.uniform_loss, 35 | adaptive.learner.learner1D.curvature_loss_function(), 36 | )), 37 | Learner2D: ('loss_per_triangle', ( 38 | adaptive.learner.learner2D.default_loss, 39 | adaptive.learner.learner2D.uniform_loss, 40 | adaptive.learner.learner2D.minimize_triangle_surface_loss, 41 | adaptive.learner.learner2D.resolution_loss_function(), 42 | )), 43 | LearnerND: ('loss_per_simplex', ( 44 | adaptive.learner.learnerND.default_loss, 45 | adaptive.learner.learnerND.std_loss, 46 | adaptive.learner.learnerND.uniform_loss, 47 | )), 48 | } 49 | 50 | 51 | def generate_random_parametrization(f): 52 | """Return a realization of 'f' with parameters bound to random values. 53 | 54 | Parameters 55 | ---------- 56 | f : callable 57 | All parameters but the first must be annotated with a callable 58 | that, when called with no arguments, produces a value of the 59 | appropriate type for the parameter in question. 60 | """ 61 | _, *params = inspect.signature(f).parameters.items() 62 | if any(not callable(v.annotation) for (p, v) in params): 63 | raise TypeError('All parameters to {} must be annotated with functions.' 64 | .format(f.__name__)) 65 | realization = {p: v.annotation() for (p, v) in params} 66 | return ft.partial(f, **realization) 67 | 68 | 69 | def uniform(a, b): 70 | return lambda: random.uniform(a, b) 71 | 72 | 73 | # Library of functions and associated learners. 74 | 75 | learner_function_combos = collections.defaultdict(list) 76 | 77 | 78 | def learn_with(learner_type, **init_kwargs): 79 | 80 | def _(f): 81 | learner_function_combos[learner_type].append((f, init_kwargs)) 82 | return f 83 | 84 | return _ 85 | 86 | 87 | def xfail(learner): 88 | return pytest.mark.xfail, learner 89 | 90 | 91 | def maybe_skip(learner): 92 | return (pytest.mark.skip, learner) if learner is None else learner 93 | 94 | 95 | # All parameters except the first must be annotated with a callable that 96 | # returns a random value for that parameter. 97 | 98 | @learn_with(Learner1D, bounds=(-1, 1)) 99 | def quadratic(x, m: uniform(0, 10), b: uniform(0, 1)): 100 | return m * x**2 + b 101 | 102 | 103 | @learn_with(Learner1D, bounds=(-1, 1)) 104 | def linear_with_peak(x, d: uniform(-1, 1)): 105 | a = 0.01 106 | return x + a**2 / (a**2 + (x - d)**2) 107 | 108 | 109 | @learn_with(LearnerND, bounds=((-1, 1), (-1, 1))) 110 | @learn_with(Learner2D, bounds=((-1, 1), (-1, 1))) 111 | def ring_of_fire(xy, d: uniform(0.2, 1)): 112 | a = 0.2 113 | x, y = xy 114 | return x + math.exp(-(x**2 + y**2 - d**2)**2 / a**4) 115 | 116 | 117 | @learn_with(LearnerND, bounds=((-1, 1), (-1, 1), (-1, 1))) 118 | def sphere_of_fire(xyz, d: uniform(0.2, 1)): 119 | a = 0.2 120 | x, y, z = xyz 121 | return x + math.exp(-(x**2 + y**2 + z**2 - d**2)**2 / a**4) + z**2 122 | 123 | 124 | @learn_with(AverageLearner, rtol=1) 125 | def gaussian(n): 126 | return random.gauss(0, 1) 127 | 128 | 129 | # Decorators for tests. 130 | 131 | 132 | # Create a sequence of learner parameters by adding all 133 | # possible loss functions to an existing parameter set. 134 | def add_loss_to_params(learner_type, existing_params): 135 | if learner_type not in LOSS_FUNCTIONS: 136 | return [existing_params] 137 | loss_param, loss_functions = LOSS_FUNCTIONS[learner_type] 138 | loss_params = [{loss_param: f} for f in loss_functions] 139 | return [dict(**existing_params, **lp) for lp in loss_params] 140 | 141 | 142 | def run_with(*learner_types, with_all_loss_functions=True): 143 | pars = [] 144 | for l in learner_types: 145 | has_marker = isinstance(l, tuple) 146 | if has_marker: 147 | marker, l = l 148 | for f, k in learner_function_combos[l]: 149 | ks = add_loss_to_params(l, k) if with_all_loss_functions else [k] 150 | for k in ks: 151 | # Check if learner was marked with our `xfail` decorator 152 | # XXX: doesn't work when feeding kwargs to xfail. 153 | if has_marker: 154 | pars.append(pytest.param(l, f, dict(k), 155 | marks=[marker])) 156 | else: 157 | pars.append((l, f, dict(k))) 158 | return pytest.mark.parametrize('learner_type, f, learner_kwargs', pars) 159 | 160 | 161 | def ask_randomly(learner, rounds, points): 162 | n_rounds = random.randrange(*rounds) 163 | n_points = [random.randrange(*points) for _ in range(n_rounds)] 164 | 165 | xs = [] 166 | ls = [] 167 | for n in n_points: 168 | x, l = learner.ask(n) 169 | xs.extend(x) 170 | ls.extend(l) 171 | 172 | return xs, ls 173 | 174 | 175 | # Tests 176 | 177 | @run_with(Learner1D) 178 | def test_uniform_sampling1D(learner_type, f, learner_kwargs): 179 | """Points are sampled uniformly if no data is provided. 180 | 181 | Non-uniform sampling implies that we think we know something about 182 | the function, which we do not in the absence of data. 183 | """ 184 | f = generate_random_parametrization(f) 185 | learner = learner_type(f, **learner_kwargs) 186 | 187 | points, _ = ask_randomly(learner, (10, 20), (10, 20)) 188 | 189 | points.sort() 190 | ivals = np.diff(sorted(points)) 191 | assert max(ivals) / min(ivals) < 2 + 1e-8 192 | 193 | 194 | @pytest.mark.xfail 195 | @run_with(Learner2D, LearnerND) 196 | def test_uniform_sampling2D(learner_type, f, learner_kwargs): 197 | """Points are sampled uniformly if no data is provided. 198 | 199 | Non-uniform sampling implies that we think we know something about 200 | the function, which we do not in the absence of data. 201 | """ 202 | f = generate_random_parametrization(f) 203 | learner = learner_type(f, **learner_kwargs) 204 | 205 | points, _ = ask_randomly(learner, (70, 100), (10, 20)) 206 | tree = scipy.spatial.cKDTree(points) 207 | 208 | # regular grid 209 | n = math.sqrt(len(points)) 210 | xbounds, ybounds = learner_kwargs['bounds'] 211 | r = math.sqrt((ybounds[1] - ybounds[0]) / (xbounds[1] - xbounds[0])) 212 | xs, dx = np.linspace(*xbounds, int(n / r), retstep=True) 213 | ys, dy = np.linspace(*ybounds, int(n * r), retstep=True) 214 | 215 | distances, neighbors = tree.query(list(it.product(xs, ys)), k=1) 216 | assert max(distances) < math.sqrt(dx**2 + dy**2) 217 | 218 | 219 | @pytest.mark.parametrize('learner_type, bounds', [ 220 | (Learner1D, (-1, 1)), 221 | (Learner2D, [(-1, 1), (-1, 1)]), 222 | (LearnerND, [(-1, 1), (-1, 1), (-1, 1)]), 223 | ]) 224 | def test_learner_accepts_lists(learner_type, bounds): 225 | def f(x): 226 | return [0, 1] 227 | 228 | learner = learner_type(f, bounds=bounds) 229 | simple(learner, goal=lambda l: l.npoints > 10) 230 | 231 | 232 | @run_with(Learner1D, Learner2D, LearnerND) 233 | def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs): 234 | """Adding already existing data is an idempotent operation. 235 | 236 | Either it is idempotent, or it is an error. 237 | This is the only sane behaviour. 238 | """ 239 | f = generate_random_parametrization(f) 240 | learner = learner_type(f, **learner_kwargs) 241 | control = learner_type(f, **learner_kwargs) 242 | if learner_type is Learner1D: 243 | learner._recompute_losses_factor = 1 244 | control._recompute_losses_factor = 1 245 | 246 | N = random.randint(10, 30) 247 | control.ask(N) 248 | xs, _ = learner.ask(N) 249 | points = [(x, f(x)) for x in xs] 250 | 251 | for p in points: 252 | control.tell(*p) 253 | learner.tell(*p) 254 | 255 | random.shuffle(points) 256 | for p in points: 257 | learner.tell(*p) 258 | 259 | M = random.randint(10, 30) 260 | pls = zip(*learner.ask(M)) 261 | cpls = zip(*control.ask(M)) 262 | # Point ordering is not defined, so compare as sets 263 | assert set(pls) == set(cpls) 264 | 265 | 266 | # XXX: This *should* pass (https://gitlab.kwant-project.org/qt/adaptive/issues/84) 267 | # but we xfail it now, as Learner2D will be deprecated anyway 268 | @run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner) 269 | def test_adding_non_chosen_data(learner_type, f, learner_kwargs): 270 | """Adding data for a point that was not returned by 'ask'.""" 271 | # XXX: learner, control and bounds are not defined 272 | f = generate_random_parametrization(f) 273 | learner = learner_type(f, **learner_kwargs) 274 | control = learner_type(f, **learner_kwargs) 275 | 276 | if learner_type is Learner2D: 277 | # If the stack_size is bigger then the number of points added, 278 | # ask will return a point from the _stack. 279 | learner.stack_size = 1 280 | control.stack_size = 1 281 | 282 | N = random.randint(10, 30) 283 | xs, _ = control.ask(N) 284 | 285 | ys = [f(x) for x in xs] 286 | for x, y in zip(xs, ys): 287 | control.tell(x, y) 288 | learner.tell(x, y) 289 | 290 | M = random.randint(10, 30) 291 | pls = zip(*learner.ask(M)) 292 | cpls = zip(*control.ask(M)) 293 | # Point ordering within a single call to 'ask' 294 | # is not guaranteed to be the same by the API. 295 | assert set(pls) == set(cpls) 296 | 297 | 298 | @run_with(Learner1D, xfail(Learner2D), xfail(LearnerND), AverageLearner) 299 | def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs): 300 | """The order of calls to 'tell' between calls to 'ask' 301 | is arbitrary. 302 | 303 | This test will fail for the Learner2D because 304 | `interpolate.interpnd.estimate_gradients_2d_global` will give different 305 | outputs based on the order of the triangles and values in 306 | (ip.tri, ip.values). Therefore the _stack will contain different points. 307 | """ 308 | f = generate_random_parametrization(f) 309 | learner = learner_type(f, **learner_kwargs) 310 | control = learner_type(f, **learner_kwargs) 311 | 312 | if learner_type is Learner1D: 313 | learner._recompute_losses_factor = 1 314 | control._recompute_losses_factor = 1 315 | 316 | N = random.randint(10, 30) 317 | control.ask(N) 318 | xs, _ = learner.ask(N) 319 | points = [(x, f(x)) for x in xs] 320 | 321 | for p in points: 322 | control.tell(*p) 323 | 324 | random.shuffle(points) 325 | for p in points: 326 | learner.tell(*p) 327 | 328 | M = random.randint(10, 30) 329 | pls = zip(*learner.ask(M)) 330 | cpls = zip(*control.ask(M)) 331 | # Point ordering within a single call to 'ask' 332 | # is not guaranteed to be the same by the API. 333 | # We compare the sorted points instead of set, because the points 334 | # should only be identical up to machine precision. 335 | np.testing.assert_almost_equal(sorted(pls), sorted(cpls)) 336 | 337 | 338 | # XXX: the Learner2D fails with ~50% chance 339 | # see https://gitlab.kwant-project.org/qt/adaptive/issues/84 340 | @run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner) 341 | def test_expected_loss_improvement_is_less_than_total_loss(learner_type, f, learner_kwargs): 342 | """The estimated loss improvement can never be greater than the total loss.""" 343 | f = generate_random_parametrization(f) 344 | learner = learner_type(f, **learner_kwargs) 345 | N = random.randint(50, 100) 346 | xs, loss_improvements = learner.ask(N) 347 | 348 | for x in xs: 349 | learner.tell(x, f(x)) 350 | 351 | M = random.randint(50, 100) 352 | _, loss_improvements = learner.ask(M) 353 | 354 | if learner_type is Learner2D: 355 | assert (sum(loss_improvements) 356 | < sum(learner.loss_per_triangle(learner.ip()))) 357 | elif learner_type is Learner1D: 358 | assert sum(loss_improvements) < sum(learner.losses.values()) 359 | elif learner_type is AverageLearner: 360 | assert sum(loss_improvements) < learner.loss() 361 | 362 | 363 | # XXX: This *should* pass (https://gitlab.kwant-project.org/qt/adaptive/issues/84) 364 | # but we xfail it now, as Learner2D will be deprecated anyway 365 | @run_with(Learner1D, xfail(Learner2D), LearnerND) 366 | def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner_kwargs): 367 | """Learners behave identically under transformations that leave 368 | the loss invariant. 369 | 370 | This is a statement that the learner makes decisions based solely 371 | on the loss function. 372 | """ 373 | # for now we just scale X and Y by random factors 374 | f = generate_random_parametrization(f) 375 | 376 | control_kwargs = dict(learner_kwargs) 377 | control = learner_type(f, **control_kwargs) 378 | 379 | xscale = 1000 * random.random() 380 | yscale = 1000 * random.random() 381 | 382 | l_kwargs = dict(learner_kwargs) 383 | l_kwargs['bounds'] = xscale * np.array(l_kwargs['bounds']) 384 | learner = learner_type(lambda x: yscale * f(np.array(x) / xscale), 385 | **l_kwargs) 386 | 387 | if learner_type in [Learner1D, LearnerND]: 388 | learner._recompute_losses_factor = 1 389 | control._recompute_losses_factor = 1 390 | 391 | npoints = random.randrange(300, 500) 392 | 393 | if learner_type is LearnerND: 394 | # Because the LearnerND is slow 395 | npoints //= 10 396 | 397 | for n in range(npoints): 398 | cxs, _ = control.ask(1) 399 | xs, _ = learner.ask(1) 400 | control.tell_many(cxs, [control.function(x) for x in cxs]) 401 | learner.tell_many(xs, [learner.function(x) for x in xs]) 402 | 403 | # Check whether the points returned are the same 404 | xs_unscaled = np.array(xs) / xscale 405 | assert np.allclose(xs_unscaled, cxs) 406 | 407 | # Check if the losses are close 408 | assert math.isclose(learner.loss(), control.loss(), rel_tol=1e-10) 409 | 410 | 411 | @run_with(Learner1D, Learner2D, LearnerND, AverageLearner, 412 | with_all_loss_functions=False) 413 | def test_balancing_learner(learner_type, f, learner_kwargs): 414 | """Test if the BalancingLearner works with the different types of learners.""" 415 | learners = [learner_type(generate_random_parametrization(f), **learner_kwargs) 416 | for i in range(4)] 417 | 418 | learner = BalancingLearner(learners) 419 | 420 | # Emulate parallel execution 421 | stash = [] 422 | 423 | for i in range(100): 424 | n = random.randint(1, 10) 425 | m = random.randint(0, n) 426 | xs, _ = learner.ask(n, tell_pending=False) 427 | 428 | # Save 'm' random points out of `xs` for later 429 | random.shuffle(xs) 430 | for _ in range(m): 431 | stash.append(xs.pop()) 432 | 433 | for x in xs: 434 | learner.tell(x, learner.function(x)) 435 | 436 | # Evaluate and add 'm' random points from `stash` 437 | random.shuffle(stash) 438 | for _ in range(m): 439 | x = stash.pop() 440 | learner.tell(x, learner.function(x)) 441 | 442 | assert all(l.npoints > 10 for l in learner.learners), [l.npoints for l in learner.learners] 443 | 444 | 445 | @run_with(Learner1D, Learner2D, LearnerND, AverageLearner, 446 | maybe_skip(SKOptLearner), IntegratorLearner, 447 | with_all_loss_functions=False) 448 | def test_saving(learner_type, f, learner_kwargs): 449 | f = generate_random_parametrization(f) 450 | learner = learner_type(f, **learner_kwargs) 451 | control = learner_type(f, **learner_kwargs) 452 | if learner_type is Learner1D: 453 | learner._recompute_losses_factor = 1 454 | control._recompute_losses_factor = 1 455 | simple(learner, lambda l: l.npoints > 100) 456 | fd, path = tempfile.mkstemp() 457 | try: 458 | learner.save(path) 459 | control.load(path) 460 | 461 | np.testing.assert_almost_equal(learner.loss(), control.loss()) 462 | 463 | # Try if the control is runnable 464 | simple(control, lambda l: l.npoints > 200) 465 | finally: 466 | os.remove(path) 467 | 468 | 469 | @run_with(Learner1D, Learner2D, LearnerND, AverageLearner, 470 | maybe_skip(SKOptLearner), IntegratorLearner, 471 | with_all_loss_functions=False) 472 | def test_saving_of_balancing_learner(learner_type, f, learner_kwargs): 473 | f = generate_random_parametrization(f) 474 | learner = BalancingLearner([learner_type(f, **learner_kwargs)]) 475 | control = BalancingLearner([learner_type(f, **learner_kwargs)]) 476 | 477 | if learner_type is Learner1D: 478 | for l, c in zip(learner.learners, control.learners): 479 | l._recompute_losses_factor = 1 480 | c._recompute_losses_factor = 1 481 | 482 | simple(learner, lambda l: l.learners[0].npoints > 100) 483 | folder = tempfile.mkdtemp() 484 | 485 | def fname(learner): 486 | return folder + 'test' 487 | 488 | try: 489 | learner.save(fname=fname) 490 | control.load(fname=fname) 491 | 492 | np.testing.assert_almost_equal(learner.loss(), control.loss()) 493 | 494 | # Try if the control is runnable 495 | simple(control, lambda l: l.learners[0].npoints > 200) 496 | finally: 497 | shutil.rmtree(folder) 498 | 499 | 500 | @run_with(Learner1D, Learner2D, LearnerND, AverageLearner, 501 | maybe_skip(SKOptLearner), IntegratorLearner, 502 | with_all_loss_functions=False) 503 | def test_saving_with_datasaver(learner_type, f, learner_kwargs): 504 | f = generate_random_parametrization(f) 505 | g = lambda x: {'y': f(x), 't': random.random()} 506 | arg_picker = operator.itemgetter('y') 507 | learner = DataSaver(learner_type(g, **learner_kwargs), arg_picker) 508 | control = DataSaver(learner_type(g, **learner_kwargs), arg_picker) 509 | 510 | if learner_type is Learner1D: 511 | learner.learner._recompute_losses_factor = 1 512 | control.learner._recompute_losses_factor = 1 513 | 514 | simple(learner, lambda l: l.npoints > 100) 515 | fd, path = tempfile.mkstemp() 516 | try: 517 | learner.save(path) 518 | control.load(path) 519 | 520 | np.testing.assert_almost_equal(learner.loss(), control.loss()) 521 | 522 | assert learner.extra_data == control.extra_data 523 | 524 | # Try if the control is runnable 525 | simple(control, lambda l: l.npoints > 200) 526 | finally: 527 | os.remove(path) 528 | 529 | 530 | @pytest.mark.xfail 531 | @run_with(Learner1D, Learner2D, LearnerND) 532 | def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs): 533 | """Learners that are learning the same function should converge 534 | to the same result "eventually" if given the same data, regardless 535 | of the order in which that data is given. 536 | """ 537 | # XXX: not sure how to implement this. Can we say anything at all about 538 | # the scaling of the loss with the number of points? 539 | raise NotImplementedError() 540 | 541 | 542 | @pytest.mark.xfail 543 | @run_with(Learner1D, Learner2D, LearnerND) 544 | def test_learner_subdomain(learner_type, f, learner_kwargs): 545 | """Learners that never receive data outside of a subdomain should 546 | perform 'similarly' to learners defined on that subdomain only.""" 547 | # XXX: not sure how to implement this. How do we measure "performance"? 548 | raise NotImplementedError() 549 | --------------------------------------------------------------------------------