├── .gitignore
├── .idea
└── .gitignore
├── LICENSE
├── README.md
├── environment.yml
├── evaluate
├── Evaluate-BJAQ.ipynb
├── Evaluate-Power.ipynb
└── oracle
│ ├── BJAQ_rng-45.csv
│ └── power_rng-45.csv
├── framework.png
├── torchquadMy
├── LICENSE
├── README.md
├── docs
│ ├── Makefile
│ ├── make.bat
│ └── source
│ │ ├── Torchquad_RtD_background.png
│ │ ├── Torchquad_logo_white_background.png
│ │ ├── autodoc.rst
│ │ ├── conf.py
│ │ ├── contact.rst
│ │ ├── contributing.rst
│ │ ├── index.rst
│ │ ├── install.rst
│ │ ├── integration_methods.rst
│ │ └── tutorial.rst
├── environment.yml
├── environment_all_backends.yml
├── logos
│ ├── torchquad_black_background_PNG.png
│ ├── torchquad_logo_black_background.svg
│ ├── torchquad_logo_white_background.svg
│ └── torchquad_white_background_PNG.png
├── paper
│ ├── paper.bib
│ └── paper.md
├── requirements.txt
├── resources
│ ├── torchquad_convergence.png
│ └── torchquad_runtime.png
├── rtd_environment.yml
├── setup.py
└── torchquad
│ ├── __init__.py
│ ├── integration
│ ├── BatchMulVegas.py
│ ├── BatchVegas.py
│ ├── base_integrator.py
│ ├── boole.py
│ ├── integration_grid.py
│ ├── monte_carlo.py
│ ├── newton_cotes.py
│ ├── rng.py
│ ├── simpson.py
│ ├── trapezoid.py
│ ├── utils.py
│ ├── vegas.py
│ ├── vegas_map.py
│ ├── vegas_mul_map.py
│ ├── vegas_mul_stratification.py
│ └── vegas_stratification.py
│ ├── plots
│ ├── plot_convergence.py
│ └── plot_runtime.py
│ ├── tests
│ ├── BatchMulVegas_test.py
│ ├── BatchVegas_test.py
│ ├── boole_test.py
│ ├── gradient_test.py
│ ├── helper_functions.py
│ ├── integration_grid_test.py
│ ├── integration_test_functions.py
│ ├── integrator_types_test.py
│ ├── monte_carlo_test.py
│ ├── rng_test.py
│ ├── simpson_test.py
│ ├── trapezoid_test.py
│ ├── utils_integration_test.py
│ ├── vegas_map_test.py
│ ├── vegas_mul_map_test.py
│ ├── vegas_mul_stratification_test.py
│ ├── vegas_stratification_test.py
│ └── vegas_test.py
│ └── utils
│ ├── deployment_test.py
│ ├── enable_cuda.py
│ ├── set_log_level.py
│ ├── set_precision.py
│ └── set_up_backend.py
├── train
├── BJAQ.pickle
├── Train-BJAQ.ipynb
├── Train-Power.ipynb
├── models
│ ├── BJAQ
│ │ └── BJAQ-id1-best-val.t
│ └── power
│ │ └── power-id1-best-val.t
├── power.pickle
└── prefetcher.py
└── utils
└── dataUtils.py
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | *.xml
3 | *.iml
4 | data/.DS_Store
5 | .DS_Store
6 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Datasource local storage ignored files
5 | /dataSources/
6 | /dataSources.local.xml
7 | # Editor-based HTTP Client requests
8 | /httpRequests/
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 NotExist
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # FACE
2 | This is a pytorch implementation for the VLDB 2022 paper [FACE: A Normalizing Flow based Cardinality Estimator](http://www.vldb.org/pvldb/vol15/p72-li.pdf) [[**Citation**]](#citation).
3 | Our codes are built based on [nflows](https://pypi.org/project/nflows/) and [torchquad](https://github.com/esa/torchquad).
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | # Folder Structure
14 |
15 | .
16 | ├── torchquadMy # A modified pytorch implementation of adaptive importance sampling.
17 | ├── utils # A wrapper for datasets, used to generate queries, define error metrics, etc.
18 | ├── data # A directory for storing data. Downloaded data can be stored here.
19 | ├── train # Codes for training normalizing flow models.
20 | ├── evaluate # Evaluate trained models on real-world datasets for cardinality estimation.
21 | ├── environment.yml # Configuration file used to build conda environment.
22 | └── README.md
23 |
24 |
25 |
26 |
27 | # Quick Start
28 | The real-world datasets can be downloaded from [dataset link](https://drive.google.com/drive/folders/16b-yaSYK6l6ZclLkP6rIxxUyye5Iockm?usp=sharing).
29 | We use power and BJAQ as concrete examples to illustrate how to use FACE for cardinality estimation.
30 | - **Step 1:** Build conda environment with `conda env create -f environment.yml`.
31 | - **Step 2:** Switch to the installed environment by `conda activate testenv`.
32 | - **Step 3:** Install modified torchquad by `cd ./torchquadMy`, and then `pip install .` .
33 | - **Step 4:** Download the datasets from [dataset link](https://drive.google.com/drive/folders/16b-yaSYK6l6ZclLkP6rIxxUyye5Iockm?usp=sharing), and then place the data into `data` directory.
34 | - **Step 5:** After properly setting the paths of datasets, models, etc,
35 | you can use the notebook files under `train` and `evaluate` directories to conduct experiments.
36 |
37 |
38 |
39 |
40 |
41 | **Notes:**
42 | - Before running the codes, make sure the path variable `PROJECT_PATH` is set properly.
43 | This variable should be set as the path of the project root directory.
44 | - Current codes may be incompatible with machines that do not have GPUs.
45 | - For GPUs with memory less than 2080Ti (11GB),
46 | some parameters need to be set smaller, which will bring some performance loss.
47 |
48 | ## License
49 |
50 | The project is available under the [MIT](LICENSE) license.
51 |
52 |
53 |
54 | ## Citation
55 | If our work is helpful to you, please cite our paper:
56 | ```bibtex
57 | @article{DBLP:journals/pvldb/WangCLL21,
58 | author = {Jiayi Wang and
59 | Chengliang Chai and
60 | Jiabin Liu and
61 | Guoliang Li},
62 | title = {{FACE:} {A} Normalizing Flow based Cardinality Estimator},
63 | journal = {Proc. {VLDB} Endow.},
64 | volume = {15},
65 | number = {1},
66 | pages = {72--84},
67 | year = {2021},
68 | url = {http://www.vldb.org/pvldb/vol15/p72-li.pdf},
69 | doi = {10.14778/3485450.3485458},
70 | timestamp = {Thu, 21 Apr 2022 17:09:21 +0200},
71 | biburl = {https://dblp.org/rec/journals/pvldb/WangCLL21.bib},
72 | bibsource = {dblp computer science bibliography, https://dblp.org}
73 | }
74 | ```
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: testenv
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 |
7 | - numpy-base=1.19.2=py38hfa32c7d_0
8 |
9 | - python=3.8.5=h7579374_1
10 | - pytorch-gpu=1.7.1=cuda102py38hf05f184_1
11 |
12 | - pip:
13 | - jupyter
14 | - pandas==1.0.3
15 | - nflows==0.14
16 | - torch==1.11.0
17 |
18 | prefix: /home/jiayi/.conda/envs/testenv
19 |
--------------------------------------------------------------------------------
/framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/framework.png
--------------------------------------------------------------------------------
/torchquadMy/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/torchquadMy/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/torchquadMy/docs/source/Torchquad_RtD_background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/torchquadMy/docs/source/Torchquad_RtD_background.png
--------------------------------------------------------------------------------
/torchquadMy/docs/source/Torchquad_logo_white_background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/torchquadMy/docs/source/Torchquad_logo_white_background.png
--------------------------------------------------------------------------------
/torchquadMy/docs/source/autodoc.rst:
--------------------------------------------------------------------------------
1 | .. _autodoc:
2 |
3 | All content
4 | ======================================
5 |
6 | This is the list of all content in *torchquad*.
7 | The type *backend tensor* in the documentation is a placeholder for the tensor
8 | type of the current numerical backend,
9 | for example ``numpy.array`` or ``torch.Tensor``.
10 |
11 | We are continuously implementing new content in our library.
12 | For the code, please see the `code page `_
13 | or check out our full code and latest news at https://github.com/esa/torchquad.
14 |
15 | .. automodule:: torchquad
16 | :members:
17 | :undoc-members:
18 | :show-inheritance:
19 |
20 |
21 | .. Abstract classes which are not part of __all__
22 |
23 | .. autoclass:: torchquad.integration.newton_cotes.NewtonCotes
24 | :members:
25 | :undoc-members:
26 | :show-inheritance:
27 |
28 | .. autoclass:: torchquad.integration.base_integrator.BaseIntegrator
29 | :members:
30 | :undoc-members:
31 | :show-inheritance:
32 |
--------------------------------------------------------------------------------
/torchquadMy/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import sys
14 | import os
15 |
16 | sys.path.insert(0, os.path.abspath("../../"))
17 |
18 | # -- Project information -----------------------------------------------------
19 |
20 | project = "torchquad"
21 | copyright = "2021, Gabriele Meoni, Håvard Hem Toftevaag, Pablo Gómez ."
22 | author = "ESA Advanced Concepts Team"
23 |
24 | # The full version, including alpha/beta/rc tags
25 | release = "0.3.0"
26 |
27 |
28 | # -- General configuration ---------------------------------------------------
29 |
30 | # Add any Sphinx extension module names here, as strings. They can be
31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 | # ones.
33 | extensions = [
34 | "sphinx.ext.autodoc",
35 | "sphinx.ext.viewcode",
36 | "sphinx.ext.napoleon",
37 | "sphinx.ext.imgmath",
38 | ]
39 |
40 | # Add any paths that contain templates here, relative to this directory.
41 | templates_path = ["_templates"]
42 |
43 | # List of patterns, relative to source directory, that match files and
44 | # directories to ignore when looking for source files.
45 | # This pattern also affects html_static_path and html_extra_path.
46 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
47 |
48 |
49 | # -- Options for HTML output -------------------------------------------------
50 |
51 | # The theme to use for HTML and HTML Help pages. See the documentation for
52 | # a list of builtin themes.
53 | #
54 | html_theme = "sphinx_rtd_theme"
55 |
56 | html_logo = "Torchquad_logo_white_background.png"
57 |
58 | html_theme_options = {
59 | "logo_only": True,
60 | "display_version": True,
61 | "prev_next_buttons_location": "bottom",
62 | "style_nav_header_background": "white",
63 | }
64 |
65 | # Add any paths that contain custom static files (such as style sheets) here,
66 | # relative to this directory. They are copied after the builtin static files,
67 | # so a file named "default.css" will overwrite the builtin "default.css".
68 | # html_static_path = ["_static"]
69 |
70 | # -- Extension configuration -------------------------------------------------
71 |
--------------------------------------------------------------------------------
/torchquadMy/docs/source/contact.rst:
--------------------------------------------------------------------------------
1 | .. _contact:
2 |
3 | Contact information
4 | ===================
5 |
6 | Created by ESA's `Advanced Concepts Team `_:
7 |
8 | - Pablo Gómez - *pablo.gomez (at) esa.int*
9 | - Gabriele Meoni - *gabriele.meoni (at) esa.int*
10 | - Håvard Hem Toftevaag - *havard.hem.toftevaag (at) esa.int*
11 |
12 | Project Link: `https://github.com/esa/torchquad `_.
13 |
14 | Feedback
15 | --------
16 |
17 | If you want to get in touch with the creators of torchquad,
18 | please send an email to *pablo.gomez (at) esa.int*.
19 |
--------------------------------------------------------------------------------
/torchquadMy/docs/source/contributing.rst:
--------------------------------------------------------------------------------
1 | .. _contributing:
2 |
3 | Contributing
4 | ================
5 |
6 | The project is open to community contributions. Feel free to open an `issue `_
7 | or write us an `email `_ if you would like to discuss a problem or idea first.
8 |
9 | If you want to contribute, please
10 |
11 | 1. Fork the project on `GitHub `_.
12 | 2. Get the most up-to-date code by following this quick guide for installing *torchquad* from source:
13 |
14 | a. Get `miniconda `_ or similar
15 | b. Clone the repo
16 |
17 | .. code-block:: bash
18 |
19 | git clone https://github.com/esa/torchquad.git
20 |
21 | c. With the default configuration, all numerical backends with CUDA support are installed. If this should not happen, comment out unwanted packages in ``environment.yml``.
22 |
23 | d. Set up the environment. This creates a conda environment called ``torchquad`` and installs the required dependencies.
24 |
25 | .. code-block:: bash
26 |
27 | conda env create -f environment.yml
28 | conda activate torchquad
29 |
30 | Once the installation is done, then you are ready to contribute.
31 | Please note that PRs should be created from and into the ``develop`` branch. For each release the develop branch is merged into main.
32 |
33 | 3. Create your Feature Branch: ``git checkout -b feature/AmazingFeature``
34 | 4. Commit your Changes: ``git commit -m 'Add some AmazingFeature'``
35 | 5. Push to the Branch: ``git push origin feature/AmazingFeature``
36 | 6. Open a Pull Request on the ``develop`` branch, *not* ``main`` (NB: We autoformat every PR with black. Our GitHub actions may create additional commits on your PR for that reason.)
37 |
38 | and we will have a look at your contribution as soon as we can.
39 |
40 | Furthermore, please make sure that your PR passes all automated tests. Review will only happen after that.
41 | Only PRs created on the ``develop`` branch with all tests passing will be considered. The only exception to this rule is if you want to update the documentation in relation to the current release on ``conda`` / ``pip``.
42 | In that case you may ask to merge directly into ``main``.
43 |
--------------------------------------------------------------------------------
/torchquadMy/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. torchquad documentation master file, created by
2 | sphinx-quickstart on Fri Nov 20 14:20:34 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | .. image:: Torchquad_RtD_background.png
7 | :width: 530px
8 | :align: center
9 | :height: 227px
10 | :alt: torchquad_logo
11 |
12 |
13 | .. image:: https://readthedocs.org/projects/torchquad/badge/?version=main
14 | :target: https://torchquad.readthedocs.io/en/main/?badge=main
15 | :alt: Documentation Status
16 |
17 | .. image:: https://img.shields.io/github/workflow/status/esa/torchquad/Running%20tests/develop
18 | :target: https://img.shields.io/github/workflow/status/esa/torchquad/Running%20tests/develop
19 | :alt: GitHub Workflow Status (branch)
20 |
21 | .. image:: https://img.shields.io/github/last-commit/esa/torchquad
22 | :target: https://img.shields.io/github/last-commit/esa/torchquad
23 | :alt: GitHub last commit
24 |
25 | .. image:: https://img.shields.io/github/license/esa/torchquad
26 | :target: https://img.shields.io/github/license/esa/torchquad
27 | :alt: GitHub license
28 |
29 | .. image:: https://img.shields.io/conda/vn/conda-forge/torchquad
30 | :target: https://img.shields.io/conda/vn/conda-forge/torchquad
31 | :alt: Conda (channel only)
32 |
33 | .. image:: https://img.shields.io/pypi/v/torchquad
34 | :target: https://img.shields.io/pypi/v/torchquad
35 | :alt: PyPI Version
36 |
37 | .. image:: https://img.shields.io/pypi/pyversions/torchquad
38 | :target: https://img.shields.io/pypi/pyversions/torchquad
39 | :alt: PyPI - Python Version
40 |
41 | .. image:: https://img.shields.io/github/contributors/esa/torchquad
42 | :target: https://img.shields.io/github/contributors/esa/torchquad
43 | :alt: GitHub contributors
44 |
45 | .. image:: https://img.shields.io/github/issues/esa/torchquad
46 | :target: https://img.shields.io/github/issues/esa/torchquad
47 | :alt: GitHub issues
48 |
49 | .. image:: https://img.shields.io/github/issues-pr/esa/torchquad
50 | :target: https://img.shields.io/github/issues-pr/esa/torchquad
51 | :alt: GitHub pull requests
52 |
53 | .. image:: https://img.shields.io/conda/dn/conda-forge/torchquad
54 | :target: https://img.shields.io/conda/dn/conda-forge/torchquad
55 | :alt: Conda
56 |
57 | .. image:: https://img.shields.io/pypi/dm/torchquad
58 | :target: https://img.shields.io/pypi/dm/torchquad
59 | :alt: PyPI - Downloads
60 |
61 | .. image:: https://joss.theoj.org/papers/d6f22f83f1a889ddf83b3c2e0cd0919c/status.svg
62 | :target: https://joss.theoj.org/papers/d6f22f83f1a889ddf83b3c2e0cd0919c
63 | :alt: JOSS Status
64 |
65 |
66 | Welcome to torchquad's documentation!
67 | =====================================================
68 |
69 | *torchquad* is a Python3 module for multidimensional numerical integration on the GPU.
70 | It uses `autoray `_ to support
71 | `PyTorch `_ and
72 | :ref:`other machine learning modules `.
73 |
74 | You can see the latest code at https://github.com/esa/torchquad.
75 |
76 | .. toctree::
77 | :maxdepth: 2
78 | :caption: Overview:
79 |
80 | install
81 | tutorial
82 | integration_methods
83 | autodoc
84 |
85 |
86 | .. toctree::
87 | :maxdepth: 1
88 | :caption: Contact:
89 |
90 | contact
91 | contributing
92 |
93 | Roadmap
94 | ---------
95 |
96 | See the `open issues `_ for a list of proposed features (and known issues).
97 |
98 |
99 | License
100 | --------
101 |
102 | Distributed under the GPL-3.0 License. See `LICENSE `_ for more information.
103 |
104 |
105 | Indices and tables
106 | ==================
107 |
108 | * :ref:`genindex`
109 | * :ref:`modindex`
110 | * :ref:`search`
111 |
--------------------------------------------------------------------------------
/torchquadMy/docs/source/install.rst:
--------------------------------------------------------------------------------
1 | .. _installation:
2 |
3 | Getting started
4 | ===============
5 |
6 | This is a brief introduction on how to set up *torchquad*.
7 |
8 | Prerequisites
9 | --------------
10 |
11 | *torchquad* is built with
12 |
13 | - `autoray `_, which means the implemented quadrature supports `NumPy `_ and can be used for machine learning with modules such as `PyTorch `_, `JAX `_ and `Tensorflow `_, where it is fully differentiable
14 | - `conda `_, which will take care of all requirements for you
15 |
16 | We recommend using `conda `_, especially if you want to utilize the GPU.
17 | With PyTorch it will automatically set up CUDA and the cudatoolkit for you, for example.
18 | Note that *torchquad* also works on the CPU; however, it is optimized for GPU usage.
19 | torchquad's GPU support is tested only on NVIDIA cards with CUDA. We are investigating future support for AMD cards through `ROCm `_.
20 |
21 | For a detailed list of required packages and packages for numerical backends,
22 | please refer to the conda environment files `environment.yml `_ and
23 | `environment_all_backends.yml `_.
24 | torchquad has been tested with JAX 0.2.25, NumPy 1.19.5, PyTorch 1.10.0 and Tensorflow 2.7.0; other versions of the backends should work as well.
25 |
26 |
27 | Installation
28 | -------------
29 |
30 | First, we must make sure we have `torchquad `_ installed.
31 | The easiest way to do this is simply to
32 |
33 | .. code-block:: bash
34 |
35 | conda install torchquad -c conda-forge
36 |
37 | Alternatively, it is also possible to use
38 |
39 | .. code-block:: bash
40 |
41 | pip install torchquad
42 |
43 | The PyTorch backend with CUDA support can be installed with
44 |
45 | .. code-block:: bash
46 |
47 | conda install "cudatoolkit>=11.1" "pytorch>=1.9=*cuda*" -c conda-forge -c pytorch
48 |
49 | Note that since PyTorch is not yet on *conda-forge* for Windows, we have
50 | explicitly included it here using ``-c pytorch``.
51 | Note also that installing PyTorch with *pip* may **not** set it up with CUDA
52 | support.
53 | Therefore, we recommend to use *conda*.
54 |
55 | Here are installation instructions for other numerical backends:
56 |
57 | .. code-block:: bash
58 |
59 | conda install "tensorflow>=2.6.0=cuda*" -c conda-forge
60 | pip install "jax[cuda]>=0.2.22" --find-links https://storage.googleapis.com/jax-releases/jax_releases.html # linux only
61 | conda install "numpy>=1.19.5" -c conda-forge
62 |
63 | More installation instructions for numerical backends can be found in
64 | `environment_all_backends.yml `__
65 | and at the backend documentations, for example
66 | https://pytorch.org/get-started/locally/,
67 | https://github.com/google/jax/#installation and
68 | https://www.tensorflow.org/install/gpu, and often there are multiple
69 | ways to install them.
70 |
71 |
72 | Usage
73 | -----
74 |
75 | Now you are ready to use *torchquad*.
76 | A brief example of how *torchquad* can be used to compute a simple integral can be found on our `GitHub `_.
77 | For a more thorough introduction, please refer to the `tutorial `_.
78 |
--------------------------------------------------------------------------------
/torchquadMy/docs/source/integration_methods.rst:
--------------------------------------------------------------------------------
1 | Integration methods
2 | ======================================
3 |
4 | This is the list of all available integration methods in *torchquad*.
5 |
6 | We are continuously implementing new methods in our library.
7 | For the code behind the integration methods, please see the `code page `_
8 | or check out our full code and latest news at https://github.com/esa/torchquad.
9 |
10 | .. contents::
11 |
12 | Stochastic Methods
13 | ----------------------
14 |
15 | Monte Carlo Integrator
16 | ^^^^^^^^^^^^^^^^^^^^^^
17 |
18 | .. autoclass:: torchquad.MonteCarlo
19 | :members: integrate
20 | :noindex:
21 |
22 | VEGAS Enhanced
23 | ^^^^^^^^^^^^^^^^^^^^^^
24 |
25 | .. autoclass:: torchquad.VEGAS
26 | :members: integrate
27 | :noindex:
28 |
29 | Deterministic Methods
30 | ----------------------
31 |
32 | Boole's Rule
33 | ^^^^^^^^^^^^^^^^^^^^^^
34 |
35 | .. autoclass:: torchquad.Boole
36 | :members: integrate
37 | :noindex:
38 |
39 |
40 | Simpson's Rule
41 | ^^^^^^^^^^^^^^^^^^^^^^
42 |
43 | .. autoclass:: torchquad.Simpson
44 | :members: integrate
45 | :noindex:
46 |
47 |
48 | Trapezoid Rule
49 | ^^^^^^^^^^^^^^^^^^^^^^
50 |
51 | .. autoclass:: torchquad.Trapezoid
52 | :members: integrate
53 | :noindex:
54 |
--------------------------------------------------------------------------------
/torchquadMy/environment.yml:
--------------------------------------------------------------------------------
1 | name: torchquad
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - autoray>=0.2.5
6 | - loguru>=0.5.3
7 | - matplotlib>=3.3.3
8 | - pytest>=6.2.1
9 | - python>=3.8
10 | - scipy>=1.6.0
11 | - sphinx>=3.4.3
12 | - sphinx_rtd_theme>=0.5.1
13 | - tqdm>=4.56.0
14 |
--------------------------------------------------------------------------------
/torchquadMy/environment_all_backends.yml:
--------------------------------------------------------------------------------
1 | # A conda environment file to install all supported numerical backends
2 | name: torchquad
3 | channels:
4 | - conda-forge
5 | - pytorch
6 | dependencies:
7 | - autoray>=0.2.5
8 | - loguru>=0.5.3
9 | - matplotlib>=3.3.3
10 | - pytest>=6.2.1
11 | - python>=3.8
12 | - scipy>=1.6.0
13 | - sphinx>=3.4.3
14 | - sphinx_rtd_theme>=0.5.1
15 | - tqdm>=4.56.0
16 | # Numerical backend installations with CUDA support where possible:
17 | - numpy>=1.19.5
18 | - cudatoolkit>=11.1
19 | - pytorch>=1.9=*cuda*
20 | - tensorflow-gpu>=2.6.0
21 | # jaxlib with CUDA support is not available for conda
22 | - pip:
23 | - --find-links https://storage.googleapis.com/jax-releases/jax_releases.html
24 | - jax[cuda]>=0.2.22 # this will only work on linux. for win see e.g. https://github.com/cloudhan/jax-windows-builder
25 |
--------------------------------------------------------------------------------
/torchquadMy/logos/torchquad_black_background_PNG.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/torchquadMy/logos/torchquad_black_background_PNG.png
--------------------------------------------------------------------------------
/torchquadMy/logos/torchquad_logo_white_background.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
223 |
--------------------------------------------------------------------------------
/torchquadMy/logos/torchquad_white_background_PNG.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/torchquadMy/logos/torchquad_white_background_PNG.png
--------------------------------------------------------------------------------
/torchquadMy/paper/paper.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'torchquad: Numerical Integration in Arbitrary Dimensions with PyTorch'
3 | tags:
4 | - Python
5 | - n-dimensional
6 | - numerical integration
7 | - GPU
8 | - automatic differentiation
9 | - PyTorch
10 | - high-performance computing
11 | - machine learning
12 | authors:
13 | - name: Pablo Gómez^[corresponding author]
14 | orcid: 0000-0002-5631-8240
15 | affiliation: 1
16 | - name: Håvard Hem Toftevaag
17 | orcid: 0000-0003-4692-5722
18 | affiliation: 1
19 | - name: Gabriele Meoni
20 | orcid: 0000-0001-9311-6392
21 | affiliation: 1
22 | affiliations:
23 | - name: Advanced Concepts Team, European Space Agency, Noordwijk, The Netherlands
24 | index: 1
25 | date: 15 June 2021
26 | bibliography: paper.bib
27 |
28 | ---
29 |
30 | # Summary
31 |
32 | \texttt{torchquad} is a \texttt{Python} module for $n$-dimensional numerical integration optimized for graphics processing units (GPUs).
33 | Various deterministic and stochastic integration methods, such as \texttt{Newton\textendash Cotes} formulas and \texttt{Monte} \texttt{Carlo} integration methods like \texttt{VEGAS} \texttt{Enhanced} [@VegasEnhanced-paper], are available for computationally efficient integration for arbitrary dimensionality $n_{\mathrm{d}}$.
34 | As it is implemented using \texttt{PyTorch} [@PyTorch2019], one of the most popular machine learning frameworks, \texttt{torchquad} provides fully automatic differentiation throughout the integration, which is essential for many machine learning applications.
35 |
36 | # Statement of Need
37 |
38 | Multidimensional integration is needed in many fields, such as physics (ranging from particle physics [@ParticlePhysics-Paper] to astrophysics [@izzo2021geodesy]), applied finance [@AppliedFinance-Paper], medical statistics [@MedicalStatistics-Paper], and machine learning [@VEGASinMachineLearning-Paper].
39 | Most of the conventional \texttt{Python} packages for multidimensional integration, such as \texttt{quadpy} [@quadpy] and \texttt{nquad} [@scipy], only target and are optimized for central processing units (CPUs).
40 | However, as many numerical integration methods are embarrassingly parallel, GPUs can offer superior computational performance in their computation.
41 | Furthermore, numerical integration methods typically suffer from the so-called \textit{curse of dimensionality} [@ZMCintegral].
42 | This phenomenon refers to the fact that the computational complexity of the integration grows exponentially with the number of dimensions [@CurseOfDim-Book]. Reducing the error of the integration value requires increasing the number of function evaluation points $N$ exponentially, which significantly increases the runtime of the computation, especially for higher dimensions.
43 | Previous work has demonstrated that this problem can be mitigated by leveraging the \textit{single instruction, multiple data} parallelization of GPUs [@ZMCintegral].
44 |
45 | Although GPU-based implementations for multidimensional numerical integration in \mbox{\texttt{Python}} exist, some of these packages do not allow fully automatic differentiation [@borowka2019gpu], which is crucial for many machine learning applications [@Baydin2018autodiffinML]. Recently, to fill this gap, the packages \texttt{VegasFlow} [@VegasFlow-Paper] and \mbox{\texttt{ZMCintegral}} [@ZMCintegral] were developed. Both of these implementations are, however, based on \texttt{TensorFlow} [@Tensorflow], and there are currently no packages available that enable more than one-dimensional integration in \texttt{PyTorch}.
46 | Additionally, the available GPU-based \texttt{Python} packages that allow fully automatic differentiation rely solely on \texttt{Monte} \texttt{Carlo} methods [@ZMCintegral; @VegasFlow-Paper].
47 | Even though such methods offer good speed\textendash accuracy trade-offs for problems of high dimensionality $n_{\mathrm{d}}$, the efficiency of deterministic methods, such as the \texttt{Newton\textendash Cotes} formulas, is often superior for lower dimensionality [@Vegas-paper].
48 |
49 | In summary, to the authors' knowledge, \texttt{torchquad} is the first \texttt{PyTorch}-based module for $n$-dimensional numerical integration.
50 | Furthermore, it incorporates several deterministic and stochastic methods, including \texttt{Newton\textendash Cotes} formulas and \texttt{VEGAS} \texttt{Enhanced}, which allow obtaining high-accuracy estimates for varying dimensionality at configurable computational cost as controlled by the maximum number of function evaluations $N$. It is, to the authors' knowledge, also the first GPU-capable implementation of \texttt{VEGAS} \texttt{Enhanced} [@VegasEnhanced-paper], which improves on its predecessor \texttt{VEGAS} by introducing an adaptive stratified sampling strategy.
51 |
52 | Finally, being \texttt{PyTorch}-based, \texttt{torchquad} is fully differentiable, extending its applicability to use cases such as those in machine learning. In these applications, it is typically necessary to compute the gradient of some parameters with regard to input variables to perform updates of the trainable parameters in the machine learning model. With \texttt{torchquad}, e.g., the employed loss function can contain integrals without breaking the automatic differentiation required for training.
53 |
54 |
55 | # Implemented Integration Methods
56 |
57 | \texttt{torchquad} features fully vectorized implementations of various deterministic and stochastic methods to perform $n$-dimensional integration over cubical domains.
58 | In particular, the following deterministic integration methods are available in \texttt{torchquad} (version 0.2.1):
59 |
60 | * \texttt{Trapezoid} \texttt{Rule} [@sag1964numerical]
61 | * \texttt{Simpson's} \texttt{Rule} [@sag1964numerical]
62 | * \texttt{Boole's} \texttt{Rule} [@ubale2012numerical]
63 |
64 | The stochastic integration methods implemented in \texttt{torchquad} so far are:
65 |
66 | * \texttt{Classic} \texttt{Monte} \texttt{Carlo} \texttt{Integrator} [@caflisch1998monte]
67 | * \texttt{VEGAS} \texttt{Enhanced} (\mbox{\texttt{VEGAS+}}) integration method [@VegasEnhanced-paper]
68 |
69 | The functionality and the convergence of all the methods are ensured through automatic unit testing, which relies on an extensible set of different test functions.
70 | Both single and double precision are supported to allow different trade-offs between accuracy and memory utilization. Even though it is optimized for GPUs, \texttt{torchquad} can also be employed without a GPU without any functional limitations.
71 |
72 | # Installation \& Contribution
73 |
74 | The \texttt{torchquad} package is implemented in \texttt{Python} \texttt{3.8} and is openly available under a GPL-3 license. Installation with either \texttt{pip} (\texttt{PyPi})\footnote{\texttt{torchquad} package on \texttt{PyPi}, \url{https://pypi.org/project/torchquad/}} or \texttt{conda}\footnote{\texttt{torchquad} package on \texttt{conda}, \url{https://anaconda.org/conda-forge/torchquad}} is available. Our public \texttt{GitHub} repository\footnote{\texttt{torchquad} \texttt{GitHub} repository, \url{https://github.com/esa/torchquad}} provides users with direct access to the main development branch. Users wishing to contribute to \texttt{torchquad} can submit issues or pull requests to our \texttt{GitHub} repository following the contribution guidelines outlined there.
75 |
76 | # Tutorials
77 |
78 | The \texttt{torchquad} documentation, hosted on \texttt{Read} \texttt{the} \texttt{Docs},\footnote{\texttt{torchquad} documentation on \texttt{Read} \texttt{the} \texttt{Docs}, \url{https://torchquad.readthedocs.io/}} provides some examples of the use of \texttt{torchquad} for one-dimensional and multidimensional integration utilizing a variety of the implemented methods.
79 |
80 | # References
81 |
--------------------------------------------------------------------------------
/torchquadMy/requirements.txt:
--------------------------------------------------------------------------------
1 | loguru>=0.5.3
2 | matplotlib>=3.3.3
3 | autoray>=0.2.5
4 | scipy>=1.6.0
5 | tqdm>=4.56.0
6 |
--------------------------------------------------------------------------------
/torchquadMy/resources/torchquad_convergence.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/torchquadMy/resources/torchquad_convergence.png
--------------------------------------------------------------------------------
/torchquadMy/resources/torchquad_runtime.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/torchquadMy/resources/torchquad_runtime.png
--------------------------------------------------------------------------------
/torchquadMy/rtd_environment.yml:
--------------------------------------------------------------------------------
1 | name: rtd_torchquad
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - ipython
6 | - loguru>=0.5.3
7 | - matplotlib>=3.3.3
8 | - pytest>=6.2.1
9 | - python>=3.8
10 | - autoray>=0.2.5
11 | - scipy>=1.6.0
12 | - sphinx>=3.4.3
13 | - sphinx_rtd_theme>=0.5.1
14 | - tqdm>=4.56.0
15 |
--------------------------------------------------------------------------------
/torchquadMy/setup.py:
--------------------------------------------------------------------------------
1 | """A setuptools based setup module.
2 | See:
3 | https://packaging.python.org/guides/distributing-packages-using-setuptools/
4 | https://github.com/pypa/sampleproject
5 | """
6 |
7 | # Always prefer setuptools over distutils
8 | from setuptools import setup
9 |
10 | setup(
11 | name="torchquad",
12 | version="0.3.x",
13 | description="Package providing torch-based numerical integration methods.",
14 | long_description=open("README.md").read(),
15 | long_description_content_type="text/markdown",
16 | url="https://github.com/esa/torchquad",
17 | author="ESA Advanced Concepts Team",
18 | author_email="pablo.gomez@esa.int",
19 | install_requires=[
20 | "loguru>=0.5.3",
21 | "matplotlib>=3.3.3",
22 | "scipy>=1.6.0",
23 | "tqdm>=4.56.1",
24 | "autoray>=0.2.5",
25 | ],
26 | classifiers=[
27 | "Development Status :: 3 - Alpha",
28 | "Intended Audience :: Developers",
29 | "Intended Audience :: Science/Research",
30 | "Topic :: Scientific/Engineering :: Mathematics",
31 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
32 | "Programming Language :: Python :: 3.8",
33 | ],
34 | packages=[
35 | "torchquad",
36 | "torchquad.integration",
37 | "torchquad.plots",
38 | "torchquad.utils",
39 | ],
40 | python_requires=">=3.7, <4",
41 | project_urls={
42 | "Source": "https://github.com/esa/torchquad/",
43 | },
44 | )
45 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from loguru import logger
3 |
4 | # TODO: Currently this is the way to expose to the docs
5 | # hopefully changes with setup.py
6 | from .integration.integration_grid import IntegrationGrid
7 | from .integration.monte_carlo import MonteCarlo
8 | from .integration.trapezoid import Trapezoid
9 | from .integration.simpson import Simpson
10 | from .integration.boole import Boole
11 | from .integration.vegas import VEGAS
12 | from .integration.BatchVegas import BatchVEGAS
13 | from .integration.BatchMulVegas import BatchMulVEGAS
14 |
15 | from .integration.rng import RNG
16 |
17 | from .plots.plot_convergence import plot_convergence
18 | from .plots.plot_runtime import plot_runtime
19 |
20 | from .utils.set_log_level import set_log_level
21 | from .utils.enable_cuda import enable_cuda
22 | from .utils.set_precision import set_precision
23 | from .utils.set_up_backend import set_up_backend
24 | from .utils.deployment_test import _deployment_test
25 |
26 | __all__ = [
27 | "IntegrationGrid",
28 | "MonteCarlo",
29 | "Trapezoid",
30 | "Simpson",
31 | "Boole",
32 | "VEGAS",
33 | "RNG",
34 | "BatchVEGAS",
35 | "plot_convergence",
36 | "plot_runtime",
37 | "enable_cuda",
38 | "set_precision",
39 | "set_log_level",
40 | "set_up_backend",
41 | "_deployment_test",
42 | ]
43 |
44 | set_log_level(os.environ.get("TORCHQUAD_LOG_LEVEL", "WARNING"))
45 | logger.info("Initializing torchquad.")
46 |
47 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/base_integrator.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | from autoray import numpy as anp
3 | from autoray import infer_backend
4 | from loguru import logger
5 |
6 | from .utils import _check_integration_domain
7 |
8 |
9 | class BaseIntegrator:
10 | """The (abstract) integrator that all other integrators inherit from. Provides no explicit definitions for methods."""
11 |
12 | # Function to evaluate
13 | _fn = None
14 |
15 | # Dimensionality of function to evaluate
16 | _dim = None
17 |
18 | # Integration domain
19 | _integration_domain = None
20 |
21 | # Number of function evaluations
22 | _nr_of_fevals = None
23 |
24 | def __init__(self):
25 | self._nr_of_fevals = 0
26 |
27 | def integrate(self):
28 | raise (
29 | NotImplementedError("This is an abstract base class. Should not be called.")
30 | )
31 |
32 | def _eval(self, points):
33 | """Call evaluate_integrand to evaluate self._fn function at the passed points and update self._nr_of_evals
34 |
35 | Args:
36 | points (backend tensor): Integration points
37 | """
38 | result, num_points = self.evaluate_integrand(self._fn, points)
39 | self._nr_of_fevals += num_points
40 | return result
41 |
42 | @staticmethod
43 | def evaluate_integrand(fn, points):
44 | """Evaluate the integrand function at the passed points
45 |
46 | Args:
47 | fn (function): Integrand function
48 | points (backend tensor): Integration points
49 |
50 | Returns:
51 | backend tensor: Integrand function output
52 | int: Number of evaluated points
53 | """
54 | num_points = points.shape[0]
55 | result = fn(points)
56 | if infer_backend(result) != infer_backend(points):
57 | warnings.warn(
58 | "The passed function's return value has a different numerical backend than the passed points. Will try to convert. Note that this may be slow as it results in memory transfers between CPU and GPU, if torchquad uses the GPU."
59 | )
60 | result = anp.array(result, like=points)
61 |
62 | num_results = result.shape[0]
63 | if num_results != num_points:
64 | raise ValueError(
65 | f"The passed function was given {num_points} points but only returned {num_results} value(s)."
66 | f"Please ensure that your function is vectorized, i.e. can be called with multiple evaluation points at once. It should return a tensor "
67 | f"where first dimension matches length of passed elements. "
68 | )
69 |
70 | return result, num_points
71 |
72 | @staticmethod
73 | def _check_inputs(dim=None, N=None, integration_domain=None):
74 | """Used to check input validity
75 |
76 | Args:
77 | dim (int, optional): Dimensionality of function to integrate. Defaults to None.
78 | N (int, optional): Total number of integration points. Defaults to None.
79 | integration_domain (list or backend tensor, optional): Integration domain, e.g. [[0,1],[1,2]]. Defaults to None.
80 |
81 | Raises:
82 | ValueError: if inputs are not compatible with each other.
83 | """
84 | logger.debug("Checking inputs to Integrator.")
85 | if dim is not None:
86 | if dim < 1:
87 | raise ValueError("Dimension needs to be 1 or larger.")
88 |
89 | if N is not None:
90 | if N < 1 or type(N) is not int:
91 | raise ValueError("N has to be a positive integer.")
92 |
93 | if integration_domain is not None:
94 | dim_domain = _check_integration_domain(integration_domain)
95 | if dim is not None and dim != dim_domain:
96 | raise ValueError(
97 | "The dimension of the integration domain must match the passed function dimensionality dim."
98 | )
99 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/boole.py:
--------------------------------------------------------------------------------
1 | from autoray import numpy as anp
2 | import warnings
3 | from loguru import logger
4 |
5 | from .newton_cotes import NewtonCotes
6 |
7 |
8 | class Boole(NewtonCotes):
9 |
10 | """Boole's rule. See https://en.wikipedia.org/wiki/Newton%E2%80%93Cotes_formulas#Closed_Newton%E2%80%93Cotes_formulas ."""
11 |
12 | def __init__(self):
13 | super().__init__()
14 |
15 | def integrate(self, fn, dim, N=None, integration_domain=None, backend=None):
16 | """Integrates the passed function on the passed domain using Boole's rule.
17 |
18 | Args:
19 | fn (func): The function to integrate over.
20 | dim (int): Dimensionality of the integration domain.
21 | N (int, optional): Total number of sample points to use for the integration. N has to be such that N^(1/dim) - 1 % 4 == 0. Defaults to 5 points per dimension if None is given.
22 | integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It also determines the numerical backend if possible.
23 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from integration_domain. Defaults to the backend from the latest call to set_up_backend or "torch" for backwards compatibility.
24 |
25 | Returns:
26 | backend-specific number: Integral value
27 | """
28 | return super().integrate(fn, dim, N, integration_domain, backend)
29 |
30 | @staticmethod
31 | def _apply_composite_rule(cur_dim_areas, dim, hs):
32 | """Apply composite Boole quadrature.
33 | cur_dim_areas will contain the areas per dimension
34 | """
35 | # We collapse dimension by dimension
36 | for cur_dim in range(dim):
37 | cur_dim_areas = (
38 | hs[cur_dim]
39 | / 22.5
40 | * (
41 | 7 * cur_dim_areas[..., 0:-4][..., ::4]
42 | + 32 * cur_dim_areas[..., 1:-3][..., ::4]
43 | + 12 * cur_dim_areas[..., 2:-2][..., ::4]
44 | + 32 * cur_dim_areas[..., 3:-1][..., ::4]
45 | + 7 * cur_dim_areas[..., 4:][..., ::4]
46 | )
47 | )
48 | cur_dim_areas = anp.sum(cur_dim_areas, axis=dim - cur_dim - 1)
49 | return cur_dim_areas
50 |
51 | @staticmethod
52 | def _get_minimal_N(dim):
53 | """Get the minimal number of points N for the integrator rule"""
54 | return 5**dim
55 |
56 | @staticmethod
57 | def _adjust_N(dim, N):
58 | """Adjusts the total number of points to a valid number, i.e. N satisfies N^(1/dim) - 1 % 4 == 0.
59 |
60 | Args:
61 | dim (int): Dimensionality of the integration domain.
62 | N (int): Total number of sample points to use for the integration.
63 |
64 | Returns:
65 | int: An N satisfying N^(1/dim) - 1 % 4 == 0.
66 | """
67 | n_per_dim = int(N ** (1.0 / dim) + 1e-8)
68 | logger.debug(
69 | "Checking if N per dim is >=5 and N = 1 + 4n, where n is a positive integer."
70 | )
71 |
72 | # Boole's rule requires N per dim >=5 and N = 1 + 4n,
73 | # where n is a positive integer, for correctness.
74 | if n_per_dim < 5:
75 | warnings.warn(
76 | "N per dimension cannot be lower than 5. "
77 | "N per dim will now be changed to 5."
78 | )
79 | N = 5**dim
80 | elif (n_per_dim - 1) % 4 != 0:
81 | new_n_per_dim = n_per_dim - ((n_per_dim - 1) % 4)
82 | warnings.warn(
83 | "N per dimension must be N = 1 + 4n with n a positive integer due to necessary subdivisions. "
84 | "N per dim will now be changed to the next lower N satisfying this, i.e. "
85 | f"{n_per_dim} -> {new_n_per_dim}."
86 | )
87 | N = (new_n_per_dim) ** (dim)
88 | return N
89 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/integration_grid.py:
--------------------------------------------------------------------------------
1 | from autoray import numpy as anp
2 | from autoray import infer_backend
3 | from time import perf_counter
4 | from loguru import logger
5 |
6 | from .utils import (
7 | _linspace_with_grads,
8 | _check_integration_domain,
9 | _setup_integration_domain,
10 | )
11 |
12 |
13 | class IntegrationGrid:
14 | """This class is used to store the integration grid for methods like Trapezoid or Simpsons, which require a grid."""
15 |
16 | points = None # integration points
17 | h = None # mesh width
18 | _N = None # number of mesh points
19 | _dim = None # dimensionality of the grid
20 | _runtime = None # runtime for the creation of the integration grid
21 |
22 | def __init__(self, N, integration_domain):
23 | """Creates an integration grid of N points in the passed domain. Dimension will be len(integration_domain)
24 |
25 | Args:
26 | N (int): Total desired number of points in the grid (will take next lower root depending on dim)
27 | integration_domain (list or backend tensor): Domain to choose points in, e.g. [[-1,1],[0,1]]. It also determines the numerical backend (if it is a list, the backend is "torch").
28 | """
29 | start = perf_counter()
30 | self._check_inputs(N, integration_domain)
31 | if infer_backend(integration_domain) == "builtins":
32 | integration_domain = _setup_integration_domain(
33 | len(integration_domain), integration_domain, backend="torch"
34 | )
35 | self._dim = integration_domain.shape[0]
36 |
37 | # TODO Add that N can be different for each dimension
38 | # A rounding error occurs for certain numbers with certain powers,
39 | # e.g. (4**3)**(1/3) = 3.99999... Because int() floors the number,
40 | # i.e. int(3.99999...) -> 3, a little error term is useful
41 | self._N = int(N ** (1.0 / self._dim) + 1e-8) # convert to points per dim
42 |
43 | logger.opt(lazy=True).debug(
44 | "Creating {dim}-dimensional integration grid with {N} points over {dom}",
45 | dim=lambda: str(self._dim),
46 | N=lambda: str(N),
47 | dom=lambda: str(integration_domain),
48 | )
49 |
50 | # Check if domain requires gradient
51 | if hasattr(integration_domain, "requires_grad"):
52 | requires_grad = integration_domain.requires_grad
53 | else:
54 | requires_grad = False
55 |
56 | grid_1d = []
57 | # Determine for each dimension grid points and mesh width
58 | for dim in range(self._dim):
59 | grid_1d.append(
60 | _linspace_with_grads(
61 | integration_domain[dim][0],
62 | integration_domain[dim][1],
63 | self._N,
64 | requires_grad=requires_grad,
65 | )
66 | )
67 | self.h = anp.stack(
68 | [grid_1d[dim][1] - grid_1d[dim][0] for dim in range(self._dim)],
69 | like=integration_domain,
70 | )
71 |
72 | logger.opt(lazy=True).debug("Grid mesh width is {h}", h=lambda: str(self.h))
73 |
74 | # Get grid points
75 | points = anp.meshgrid(*grid_1d)
76 | self.points = anp.stack(
77 | [mg.ravel() for mg in points], axis=1, like=integration_domain
78 | )
79 |
80 | logger.info("Integration grid created.")
81 |
82 | self._runtime = perf_counter() - start
83 |
84 | def _check_inputs(self, N, integration_domain):
85 | """Used to check input validity"""
86 |
87 | logger.debug("Checking inputs to IntegrationGrid.")
88 | dim = _check_integration_domain(integration_domain)
89 |
90 | if N < 2:
91 | raise ValueError("N has to be > 1.")
92 |
93 | if N ** (1.0 / dim) < 2:
94 | raise ValueError(
95 | "Cannot create a ",
96 | dim,
97 | "-dimensional grid with ",
98 | N,
99 | " points. Too few points per dimension.",
100 | )
101 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/newton_cotes.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | from autoray import infer_backend
3 |
4 | from .base_integrator import BaseIntegrator
5 | from .integration_grid import IntegrationGrid
6 | from .utils import _setup_integration_domain
7 |
8 |
9 | class NewtonCotes(BaseIntegrator):
10 | """The abstract integrator that Composite Newton Cotes integrators inherit from"""
11 |
12 | def __init__(self):
13 | super().__init__()
14 |
15 | def integrate(self, fn, dim, N, integration_domain, backend):
16 | """Integrate the passed function on the passed domain using a Composite Newton Cotes rule.
17 | The argument meanings are explained in the sub-classes.
18 |
19 | Returns:
20 | float: integral value
21 | """
22 | # If N is None, use the minimal required number of points per dimension
23 | if N is None:
24 | N = self._get_minimal_N(dim)
25 |
26 | integration_domain = _setup_integration_domain(dim, integration_domain, backend)
27 | self._check_inputs(dim=dim, N=N, integration_domain=integration_domain)
28 |
29 | grid_points, hs, n_per_dim = self.calculate_grid(N, integration_domain)
30 |
31 | logger.debug("Evaluating integrand on the grid.")
32 | function_values, num_points = self.evaluate_integrand(fn, grid_points)
33 | self._nr_of_fevals = num_points
34 |
35 | return self.calculate_result(function_values, dim, n_per_dim, hs)
36 |
37 | def calculate_result(self, function_values, dim, n_per_dim, hs):
38 | """Apply the Composite Newton Cotes rule to calculate a result from the evaluated integrand.
39 |
40 | Args:
41 | function_values (backend tensor): Output of the integrand
42 | dim (int): Dimensionality
43 | n_per_dim (int): Number of grid slices per dimension
44 | hs (backend tensor): Distances between grid slices for each dimension
45 |
46 | Returns:
47 | backend tensor: Quadrature result
48 | """
49 | # Reshape the output to be [N,N,...] points instead of [dim*N] points
50 | function_values = function_values.reshape([n_per_dim] * dim)
51 |
52 | logger.debug("Computing areas.")
53 |
54 | result = self._apply_composite_rule(function_values, dim, hs)
55 |
56 | logger.opt(lazy=True).info(
57 | "Computed integral: {result}", result=lambda: str(result)
58 | )
59 | return result
60 |
61 | def calculate_grid(self, N, integration_domain):
62 | """Calculate grid points, widths and N per dim
63 |
64 | Args:
65 | N (int): Number of points
66 | integration_domain (backend tensor): Integration domain
67 |
68 | Returns:
69 | backend tensor: Grid points
70 | backend tensor: Grid widths
71 | int: Number of grid slices per dimension
72 | """
73 | N = self._adjust_N(dim=integration_domain.shape[0], N=N)
74 |
75 | # Log with lazy to avoid redundant synchronisations with certain
76 | # backends
77 | logger.opt(lazy=True).debug(
78 | "Creating a grid for {name} to integrate a function with {N} points over {d}.",
79 | name=lambda: type(self).__name__,
80 | N=lambda: str(N),
81 | d=lambda: str(integration_domain),
82 | )
83 |
84 | # Create grid and assemble evaluation points
85 | grid = IntegrationGrid(N, integration_domain)
86 |
87 | return grid.points, grid.h, grid._N
88 |
89 | def get_jit_compiled_integrate(
90 | self, dim, N=None, integration_domain=None, backend=None
91 | ):
92 | """Create an integrate function where the performance-relevant steps except the integrand evaluation are JIT compiled.
93 | Use this method only if the integrand cannot be compiled.
94 | The compilation happens when the function is executed the first time.
95 | With PyTorch, return values of different integrands passed to the compiled function must all have the same format, e.g. precision.
96 |
97 | Args:
98 | dim (int): Dimensionality of the integration domain.
99 | N (int, optional): Total number of sample points to use for the integration. See the integrate method documentation for more details.
100 | integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It also determines the numerical backend if possible.
101 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from integration_domain. Defaults to the backend from the latest call to set_up_backend or "torch" for backwards compatibility.
102 |
103 | Returns:
104 | function(fn, integration_domain): JIT compiled integrate function where all parameters except the integrand and domain are fixed
105 | """
106 | # If N is None, use the minimal required number of points per dimension
107 | if N is None:
108 | N = self._get_minimal_N(dim)
109 |
110 | integration_domain = _setup_integration_domain(dim, integration_domain, backend)
111 | self._check_inputs(dim=dim, N=N, integration_domain=integration_domain)
112 | backend = infer_backend(integration_domain)
113 | if backend in ["tensorflow", "jax"]:
114 | # Tensorflow and JAX automatically recompile functions if
115 | # the parameters change
116 | if backend == "tensorflow":
117 | if not hasattr(self, "_tf_jit_calculate_grid"):
118 | import tensorflow as tf
119 |
120 | self._tf_jit_calculate_grid = tf.function(
121 | self.calculate_grid, jit_compile=True
122 | )
123 | self._tf_jit_calculate_result = tf.function(
124 | self.calculate_result, jit_compile=True
125 | )
126 | jit_calculate_grid = self._tf_jit_calculate_grid
127 | jit_calculate_result = self._tf_jit_calculate_result
128 | elif backend == "jax":
129 | if not hasattr(self, "_jax_jit_calculate_grid"):
130 | import jax
131 |
132 | self._jax_jit_calculate_grid = jax.jit(
133 | self.calculate_grid, static_argnames=["N"]
134 | )
135 | self._jax_jit_calculate_result = jax.jit(
136 | self.calculate_result, static_argnames=["dim", "n_per_dim"]
137 | )
138 | jit_calculate_grid = self._jax_jit_calculate_grid
139 | jit_calculate_result = self._jax_jit_calculate_result
140 |
141 | def compiled_integrate(fn, integration_domain):
142 | grid_points, hs, n_per_dim = jit_calculate_grid(N, integration_domain)
143 | function_values, _ = self.evaluate_integrand(fn, grid_points)
144 | return jit_calculate_result(function_values, dim, int(n_per_dim), hs)
145 |
146 | return compiled_integrate
147 |
148 | elif backend == "torch":
149 | # Torch requires explicit tracing with example inputs.
150 | def do_compile(example_integrand):
151 | import torch
152 |
153 | # Define traceable first and third steps
154 | def step1(integration_domain):
155 | grid_points, hs, n_per_dim = self.calculate_grid(
156 | N, integration_domain
157 | )
158 | return (
159 | grid_points,
160 | hs,
161 | torch.Tensor([n_per_dim]),
162 | ) # n_per_dim is constant
163 |
164 | dim = int(integration_domain.shape[0])
165 |
166 | def step3(function_values, hs):
167 | return self.calculate_result(function_values, dim, n_per_dim, hs)
168 |
169 | # Trace the first step
170 | step1 = torch.jit.trace(step1, (integration_domain,))
171 |
172 | # Get example input for the third step
173 | grid_points, hs, n_per_dim = step1(integration_domain)
174 | n_per_dim = int(n_per_dim)
175 | function_values, _ = self.evaluate_integrand(
176 | example_integrand, grid_points
177 | )
178 |
179 | # Trace the third step
180 | # Avoid the warnings about a .grad attribute access of a
181 | # non-leaf Tensor
182 | if hs.requires_grad:
183 | hs = hs.detach()
184 | hs.requires_grad = True
185 | if function_values.requires_grad:
186 | function_values = function_values.detach()
187 | function_values.requires_grad = True
188 | step3 = torch.jit.trace(step3, (function_values, hs))
189 |
190 | # Define a compiled integrate function
191 | def compiled_integrate(fn, integration_domain):
192 | grid_points, hs, _ = step1(integration_domain)
193 | function_values, _ = self.evaluate_integrand(fn, grid_points)
194 | result = step3(function_values, hs)
195 | return result
196 |
197 | return compiled_integrate
198 |
199 | # Do the compilation when the returned function is executed the
200 | # first time
201 | compiled_func = [None]
202 |
203 | def lazy_compiled_integrate(fn, integration_domain):
204 | if compiled_func[0] is None:
205 | compiled_func[0] = do_compile(fn)
206 | return compiled_func[0](fn, integration_domain)
207 |
208 | return lazy_compiled_integrate
209 |
210 | raise ValueError(f"Compilation not implemented for backend {backend}")
211 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/rng.py:
--------------------------------------------------------------------------------
1 | from autoray import numpy as anp
2 | from autoray import get_dtype_name
3 |
4 |
5 | class RNG:
6 | """
7 | A random number generator helper class for multiple numerical backends
8 |
9 | Notes:
10 | - The seed argument may behave differently in different versions of a
11 | numerical backend and when using GPU instead of CPU
12 |
13 | - https://pytorch.org/docs/stable/notes/randomness.html
14 | - https://numpy.org/doc/stable/reference/random/generator.html#numpy.random.Generator
15 | - https://www.tensorflow.org/api_docs/python/tf/random/Generator
16 | Only the Philox RNG guarantees consistent behaviour in Tensorflow.
17 | - Often uniform random numbers are generated in [0, 1) instead of [0, 1].
18 |
19 | - numpy: random() is in [0, 1) and uniform() in [0, 1]
20 | - JAX: uniform() is in [0, 1)
21 | - torch: rand() is in [0, 1)
22 | - tensorflow: uniform() is in [0, 1)
23 | """
24 |
25 | def __init__(self, backend, seed=None, torch_save_state=False):
26 | """Initialize a RNG which can be seeded.
27 |
28 | An initialized RNG maintains a local PRNG state with JAX, Tensorflow and NumPy, and PyTorch if torch_save_state is True.
29 |
30 | Args:
31 | backend (string): Numerical backend, e.g. "torch".
32 | seed (int or None, optional): Random number generation seed. If set to None, the RNG is seeded randomly. Defaults to None.
33 | torch_save_state (Bool, optional): If True, maintain a separate RNG state for PyTorch. This argument can be helpful to avoid problems with integrand functions which set PyTorch's RNG seed. Unused unless backend is "torch". Defaults to False.
34 |
35 | Returns:
36 | An object whose "uniform" method generates uniform random numbers for the given backend
37 | """
38 | if backend == "numpy":
39 | import numpy as np
40 |
41 | self._rng = np.random.default_rng(seed)
42 | self.uniform = lambda size, dtype: self._rng.random(size=size, dtype=dtype)
43 | elif backend == "torch":
44 | self._set_torch_uniform(seed, torch_save_state)
45 | elif backend == "jax":
46 | from jax.random import PRNGKey, split, uniform
47 |
48 | if seed is None:
49 | # Generate a random seed; copied from autoray:
50 | # https://github.com/jcmgray/autoray/blob/35677037863d7d0d25ff025998d9fda75dce3b44/autoray/autoray.py#L737
51 | from random import SystemRandom
52 |
53 | seed = SystemRandom().randint(-(2**63), 2**63 - 1)
54 | self._jax_key = PRNGKey(seed)
55 |
56 | def uniform_func(size, dtype):
57 | self._jax_key, subkey = split(self._jax_key)
58 | return uniform(subkey, shape=size, dtype=dtype)
59 |
60 | self.uniform = uniform_func
61 | elif backend == "tensorflow":
62 | import tensorflow as tf
63 |
64 | if seed is None:
65 | self._rng = tf.random.Generator.from_non_deterministic_state()
66 | else:
67 | self._rng = tf.random.Generator.from_seed(seed)
68 | self.uniform = lambda size, dtype: self._rng.uniform(
69 | shape=size, dtype=dtype
70 | )
71 | else:
72 | if seed is not None:
73 | anp.random.seed(seed, like=backend)
74 | self._backend = backend
75 | self.uniform = lambda size, dtype: anp.random.uniform(
76 | size=size, dtype=get_dtype_name(dtype), like=self._backend
77 | )
78 |
79 | def _set_torch_uniform(self, seed, save_state):
80 | """Set self.uniform to generate random numbers with PyTorch
81 |
82 | Args:
83 | seed (int or None): Random number generation seed. If set to None, the RNG is seeded randomly.
84 | save_state (Bool): If True, maintain a separate RNG state.
85 | """
86 | import torch
87 |
88 | if save_state:
89 | # Set and restore the global RNG state before and after
90 | # generating random numbers
91 |
92 | if torch.cuda.is_initialized():
93 | # RNG state functions for the current CUDA device
94 | get_state = torch.cuda.get_rng_state
95 | set_state = torch.cuda.set_rng_state
96 | else:
97 | # RNG state functions for the Host
98 | get_state = torch.get_rng_state
99 | set_state = torch.set_rng_state
100 |
101 | previous_rng_state = get_state()
102 | if seed is None:
103 | torch.random.seed()
104 | else:
105 | torch.random.manual_seed(seed)
106 | self._rng_state = get_state()
107 | set_state(previous_rng_state)
108 |
109 | def uniform_func(size, dtype):
110 | # Swap the state
111 | previous_rng_state = get_state()
112 | set_state(self._rng_state)
113 | # Generate numbers
114 | random_values = torch.rand(size=size, dtype=dtype)
115 | # Swap the state back
116 | self._rng_state = get_state()
117 | set_state(previous_rng_state)
118 | return random_values
119 |
120 | self.uniform = uniform_func
121 | else:
122 | # Use the global RNG state for random number generation
123 | if seed is None:
124 | torch.random.seed()
125 | else:
126 | torch.random.manual_seed(seed)
127 | self.uniform = lambda size, dtype: torch.rand(size=size, dtype=dtype)
128 |
129 | def uniform(self, size, dtype):
130 | """Generate uniform random numbers in [0, 1) for the given numerical backend.
131 | This function is backend-specific; its definitions are in the constructor.
132 |
133 | Args:
134 | size (list): The shape of the generated numbers tensor
135 | dtype (backend dtype): The dtype for the numbers, e.g. torch.float32
136 |
137 | Returns:
138 | backend tensor: A tensor with random values for the given numerical backend
139 | """
140 | pass
141 |
142 | def jax_get_key(self):
143 | """
144 | Get the current PRNGKey.
145 | This function is needed for non-determinism when JIT-compiling with JAX.
146 | """
147 | return self._jax_key
148 |
149 | def jax_set_key(self, key):
150 | """
151 | Set the PRNGKey.
152 | This function is needed for non-determinism when JIT-compiling with JAX.
153 | """
154 | self._jax_key = key
155 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/simpson.py:
--------------------------------------------------------------------------------
1 | from autoray import numpy as anp
2 | from loguru import logger
3 | import warnings
4 |
5 | from .newton_cotes import NewtonCotes
6 |
7 |
8 | class Simpson(NewtonCotes):
9 |
10 | """Simpson's rule. See https://en.wikipedia.org/wiki/Newton%E2%80%93Cotes_formulas#Closed_Newton%E2%80%93Cotes_formulas ."""
11 |
12 | def __init__(self):
13 | super().__init__()
14 |
15 | def integrate(self, fn, dim, N=None, integration_domain=None, backend=None):
16 | """Integrates the passed function on the passed domain using Simpson's rule.
17 |
18 | Args:
19 | fn (func): The function to integrate over.
20 | dim (int): Dimensionality of the integration domain.
21 | N (int, optional): Total number of sample points to use for the integration. Should be odd. Defaults to 3 points per dimension if None is given.
22 | integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It also determines the numerical backend if possible.
23 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from integration_domain. Defaults to the backend from the latest call to set_up_backend or "torch" for backwards compatibility.
24 |
25 | Returns:
26 | backend-specific number: Integral value
27 | """
28 | return super().integrate(fn, dim, N, integration_domain, backend)
29 |
30 | @staticmethod
31 | def _apply_composite_rule(cur_dim_areas, dim, hs):
32 | """Apply composite Simpson quadrature.
33 | cur_dim_areas will contain the areas per dimension
34 | """
35 | # We collapse dimension by dimension
36 | for cur_dim in range(dim):
37 | cur_dim_areas = (
38 | hs[cur_dim]
39 | / 3.0
40 | * (
41 | cur_dim_areas[..., 0:-2][..., ::2]
42 | + 4 * cur_dim_areas[..., 1:-1][..., ::2]
43 | + cur_dim_areas[..., 2:][..., ::2]
44 | )
45 | )
46 | cur_dim_areas = anp.sum(cur_dim_areas, axis=dim - cur_dim - 1)
47 | return cur_dim_areas
48 |
49 | @staticmethod
50 | def _get_minimal_N(dim):
51 | """Get the minimal number of points N for the integrator rule"""
52 | return 3**dim
53 |
54 | @staticmethod
55 | def _adjust_N(dim, N):
56 | """Adjusts the current N to an odd integer >=3, if N is not that already.
57 |
58 | Args:
59 | dim (int): Dimensionality of the integration domain.
60 | N (int): Total number of sample points to use for the integration.
61 |
62 | Returns:
63 | int: An odd N >3.
64 | """
65 | n_per_dim = int(N ** (1.0 / dim) + 1e-8)
66 | logger.debug("Checking if N per dim is >=3 and odd.")
67 |
68 | # Simpson's rule requires odd N per dim >3 for correctness. There is a more
69 | # complex rule that works for even N as well but it is not implemented here.
70 | if n_per_dim < 3:
71 | warnings.warn(
72 | "N per dimension cannot be lower than 3. "
73 | "N per dim will now be changed to 3."
74 | )
75 | N = 3**dim
76 | elif n_per_dim % 2 != 1:
77 | warnings.warn(
78 | "N per dimension cannot be even due to necessary subdivisions. "
79 | "N per dim will now be changed to the next lower integer, i.e. "
80 | f"{n_per_dim} -> {n_per_dim - 1}."
81 | )
82 | N = (n_per_dim - 1) ** (dim)
83 | return N
84 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/trapezoid.py:
--------------------------------------------------------------------------------
1 | from autoray import numpy as anp
2 |
3 | from .newton_cotes import NewtonCotes
4 |
5 |
6 | class Trapezoid(NewtonCotes):
7 | """Trapezoidal rule. See https://en.wikipedia.org/wiki/Newton%E2%80%93Cotes_formulas#Closed_Newton%E2%80%93Cotes_formulas ."""
8 |
9 | def __init__(self):
10 | super().__init__()
11 |
12 | def integrate(self, fn, dim, N=1000, integration_domain=None, backend=None):
13 | """Integrates the passed function on the passed domain using the trapezoid rule.
14 |
15 | Args:
16 | fn (func): The function to integrate over.
17 | dim (int): Dimensionality of the function to integrate.
18 | N (int, optional): Total number of sample points to use for the integration. Defaults to 1000.
19 | integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It also determines the numerical backend if possible.
20 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from integration_domain. Defaults to the backend from the latest call to set_up_backend or "torch" for backwards compatibility.
21 |
22 | Returns:
23 | backend-specific number: Integral value
24 | """
25 | return super().integrate(fn, dim, N, integration_domain, backend)
26 |
27 | @staticmethod
28 | def _apply_composite_rule(cur_dim_areas, dim, hs):
29 | """Apply composite Trapezoid quadrature.
30 |
31 | cur_dim_areas will contain the areas per dimension
32 | """
33 | # We collapse dimension by dimension
34 | for cur_dim in range(dim):
35 | cur_dim_areas = (
36 | hs[cur_dim] / 2.0 * (cur_dim_areas[..., 0:-1] + cur_dim_areas[..., 1:])
37 | )
38 | cur_dim_areas = anp.sum(cur_dim_areas, axis=dim - cur_dim - 1)
39 | return cur_dim_areas
40 |
41 | @staticmethod
42 | def _adjust_N(dim, N):
43 | # Nothing to do for Trapezoid
44 | return N
45 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility functions for the integrator implementations including extensions for
3 | autoray, which are registered when importing this file
4 | """
5 | import sys, time
6 | from pathlib import Path
7 | # Change the path to import from the parent folder.
8 | # A relative import currently does not work when executing the tests.
9 | sys.path.append(str(Path(__file__).absolute().parent.parent))
10 |
11 | from autoray import numpy as anp
12 | from autoray import infer_backend, register_function
13 | from functools import partial
14 | from loguru import logger
15 |
16 | # from ..utils.set_precision import _get_precision
17 | from utils.set_precision import _get_precision
18 | from utils.set_up_backend import _get_default_backend
19 |
20 |
21 | def _linspace_with_grads(start, stop, N, requires_grad):
22 | """Creates an equally spaced 1D grid while keeping gradients
23 | in regard to inputs.
24 | Args:
25 | start (backend tensor): Start point (inclusive).
26 | stop (backend tensor): End point (inclusive).
27 | N (int): Number of points.
28 | requires_grad (bool): Indicates if output should be recorded for backpropagation in Torch.
29 | Returns:
30 | backend tensor: Equally spaced 1D grid
31 | """
32 | # The requires_grad case is only needed for Torch.
33 | if requires_grad:
34 | # Create 0 to 1 spaced grid
35 | grid = anp.linspace(
36 | anp.array(0.0, like=start), anp.array(1.0, like=start), N, dtype=start.dtype
37 | )
38 |
39 | # Scale to desired range, thus keeping gradients
40 | grid *= stop - start
41 | grid += start
42 |
43 | return grid
44 | else:
45 | if infer_backend(start) == "tensorflow":
46 | # Tensorflow determines the dtype automatically and doesn't support
47 | # the dtype argument here
48 | return anp.linspace(start, stop, N)
49 | return anp.linspace(start, stop, N, dtype=start.dtype)
50 |
51 | add_time = 0
52 | def _add_at_indices(target, indices, source, is_sorted=False):
53 | """
54 | Add source[i] to target at target[indices[i]] for each index i in-place.
55 | For example, with targets=[0,0,0] indices=[2,1,1,2] and source=[a,b,c,d],
56 | targets will be changed to [0,b+c,a+d].
57 | This function supports only numpy and torch.
58 |
59 | Args:
60 | target (backend tensor): Tensor to which the source values are added
61 | indices (int backend tensor): Indices into target for each value in source
62 | source (backend tensor): Values which are added to target
63 | is_sorted (bool, optional): Set this to True if indices is monotonically increasing to skip a redundant sorting step with the numpy backend. Defaults to False.
64 | """
65 | global add_time
66 | st_time = time.time()
67 | backend = infer_backend(target)
68 | if backend == "torch":
69 | target.scatter_add_(dim=0, index=indices, src=source)
70 | elif backend == "numpy":
71 | # Use indicator matrices to reduce the Python interpreter overhead
72 | # Based on VegasFlow's consume_array_into_indices function
73 | # https://github.com/N3PDF/vegasflow/blob/21209c928d07c00ae4f789d03b83e518621f174a/src/vegasflow/utils.py#L16
74 | if not is_sorted:
75 | # Sort the indices and corresponding source array
76 | sort_permutation = anp.argsort(indices)
77 | indices = indices[sort_permutation]
78 | source = source[sort_permutation]
79 | # Maximum number of columns for the indicator matrices.
80 | # A higher number leads to more redundant comparisons and higher memory
81 | # usage but reduces the Python interpreter overhead.
82 | max_indicator_width = 500
83 | zero = anp.array(0.0, dtype=target.dtype, like=backend)
84 | num_indices = indices.shape[0]
85 | for i1 in range(0, num_indices, max_indicator_width):
86 | # Create an indicator matrix for source indices in {i1, i1+1, …, i2-1}
87 | # and corresponding target array indices in {t1, t1+1, …, t2-1}.
88 | # All other target array indices are irrelevant: because the indices
89 | # array is sorted, all values in indices[i1:i2] are bound by t1 and t2.
90 | i2 = min(i1 + max_indicator_width, num_indices)
91 | t1, t2 = indices[i1], indices[i2 - 1] + 1
92 | target_indices = anp.arange(t1, t2, dtype=indices.dtype, like=backend)
93 | indicator = anp.equal(indices[i1:i2], target_indices.reshape([t2 - t1, 1]))
94 | # Create a matrix which is zero everywhere except at entries where
95 | # the corresponding value from source should be added to the
96 | # corresponding entry in target, sum these source values, and add
97 | # the resulting vector to target
98 | target[t1:t2] += anp.sum(anp.where(indicator, source[i1:i2], zero), axis=1)
99 | else:
100 | raise NotImplementedError(f"Unsupported numerical backend: {backend}")
101 |
102 | en_time = time.time()
103 | add_time += en_time - st_time
104 | # add_time += 0.1
105 |
106 |
107 | def _setup_integration_domain(dim, integration_domain, backend):
108 | """Sets up the integration domain if unspecified by the user.
109 | Args:
110 | dim (int): Dimensionality of the integration domain.
111 | integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It also determines the numerical backend if possible.
112 | backend (string or None): Numerical backend. This argument is ignored if the backend can be inferred from integration_domain. If set to None, use the backend from the latest call to set_up_backend or "torch" for backwards compatibility.
113 | Returns:
114 | backend tensor: Integration domain.
115 | """
116 | logger.debug("Setting up integration domain.")
117 |
118 | # If no integration_domain is specified, create [-1,1]^d bounds
119 | if integration_domain is None:
120 | integration_domain = [[-1.0, 1.0]] * dim
121 |
122 | # Convert integration_domain to a tensor if needed
123 | if infer_backend(integration_domain) == "builtins":
124 | # Cast all integration domain values to Python3 float because
125 | # some numerical backends create a tensor based on the Python3 types
126 | integration_domain = [
127 | [float(b) for b in bounds] for bounds in integration_domain
128 | ]
129 | if backend is None:
130 | # Get a globally default backend
131 | backend = _get_default_backend()
132 | dtype_arg = _get_precision(backend)
133 | if dtype_arg is not None:
134 | # For NumPy and Tensorflow there is no global dtype, so set the
135 | # configured default dtype here
136 | integration_domain = anp.array(
137 | integration_domain, like=backend, dtype=dtype_arg
138 | )
139 | else:
140 | integration_domain = anp.array(integration_domain, like=backend)
141 |
142 | if integration_domain.shape != (dim, 2):
143 | raise ValueError(
144 | "The integration domain has an unexpected shape. "
145 | f"Expected {(dim, 2)}, got {integration_domain.shape}"
146 | )
147 | return integration_domain
148 |
149 |
150 | def _check_integration_domain(integration_domain):
151 | """
152 | Check if the integration domain has a valid shape and determine the dimension.
153 |
154 | Args:
155 | integration_domain (list or backend tensor): Integration domain, e.g. [[-1,1],[0,1]].
156 | Returns:
157 | int: Dimension represented by the domain
158 | """
159 | if infer_backend(integration_domain) == "builtins":
160 | dim = len(integration_domain)
161 | if dim < 1:
162 | raise ValueError("len(integration_domain) needs to be 1 or larger.")
163 |
164 | for bounds in integration_domain:
165 | if len(bounds) != 2:
166 | raise ValueError(
167 | bounds,
168 | " in ",
169 | integration_domain,
170 | " does not specify a valid integration bound.",
171 | )
172 | if bounds[0] > bounds[1]:
173 | raise ValueError(
174 | bounds,
175 | " in ",
176 | integration_domain,
177 | " does not specify a valid integration bound.",
178 | )
179 | return dim
180 | else:
181 | if len(integration_domain.shape) != 2:
182 | raise ValueError("The integration_domain tensor has an invalid shape")
183 | dim, num_bounds = integration_domain.shape
184 | if dim < 1:
185 | raise ValueError("integration_domain.shape[0] needs to be 1 or larger.")
186 | if num_bounds != 2:
187 | raise ValueError("integration_domain must have 2 values per boundary")
188 | # Skip the values check if an integrator.integrate method is JIT
189 | # compiled with JAX
190 | if any(
191 | nam in type(integration_domain).__name__ for nam in ["Jaxpr", "JVPTracer"]
192 | ):
193 | return dim
194 | boundaries_are_invalid = (
195 | anp.min(integration_domain[:, 1] - integration_domain[:, 0]) < 0.0
196 | )
197 | # Skip the values check if an integrator.integrate method is
198 | # compiled with tensorflow.function
199 | if type(boundaries_are_invalid).__name__ == "Tensor":
200 | return dim
201 | if boundaries_are_invalid:
202 | raise ValueError("integration_domain has invalid boundary values")
203 | return dim
204 |
205 |
206 | # Register anp.repeat for torch
207 | @partial(register_function, "torch", "repeat")
208 | def _torch_repeat(a, repeats, axis=None):
209 | import torch
210 |
211 | # torch.repeat_interleave corresponds to np.repeat and should not be
212 | # confused with torch.Tensor.repeat.
213 | return torch.repeat_interleave(a, repeats, dim=axis)
214 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/vegas_mul_stratification.py:
--------------------------------------------------------------------------------
1 | from autoray import numpy as anp
2 | from autoray import astype
3 |
4 | from .utils import _add_at_indices
5 | import torch
6 | import time
7 | class VEGASMultiStratification:
8 |
9 | def __init__(self, n, N_increment, dim, rng, backend, dtype, beta=0.75):
10 | """Initialize the VEGAS stratification.
11 |
12 | Args:
13 | n (int): Number of integrators
14 | N_increment (int): Number of evaluations per iteration.
15 | dim (int): Dimensionality
16 | rng (RNG): Random number generator
17 | backend (string): Numerical backend
18 | dtype (backend dtype): dtype used for the calculations
19 | beta (float, optional): Beta parameter from VEGAS Enhanced. Defaults to 0.75.
20 | """
21 | self.n = n
22 | self.rng = rng
23 | self.dim = dim
24 | # stratification steps per dim, EQ 41
25 | self.N_strat = int((N_increment / 4.0) ** (1.0 / dim))
26 | self.N_strat = 1000 if self.N_strat > 1000 else self.N_strat
27 | self.beta = beta # variable controlling adaptiveness in stratification 0 to 1
28 | self.N_cubes = self.N_strat**self.dim # total number of subdomains
29 | self.V_cubes = (1.0 / self.N_strat) ** self.dim # volume of hypercubes
30 |
31 | self.dtype = dtype
32 | self.backend = backend
33 |
34 |
35 | # jacobian times f eval and jacobian^2 times f
36 | # (self.n, self.N_cubes)
37 | self.JF = anp.zeros([self.n, self.N_cubes], dtype=self.dtype, like=backend)
38 | self.JF2 = anp.zeros([self.n, self.N_cubes], dtype=self.dtype, like=backend)
39 |
40 | # dampened counts
41 | # (self.n, self.N_cubes)
42 | self.dh = (
43 | anp.ones([self.n, self.N_cubes], dtype=self.dtype, like=backend)
44 | * 1.0
45 | / self.N_cubes
46 | )
47 |
48 | # current index counts as floating point numbers
49 | # (self.n, self.N_cubes)
50 | self.strat_counts = anp.zeros([self.n, self.N_cubes], dtype=self.dtype, like=backend)
51 |
52 |
53 | nevals_arange = torch.arange(self.N_cubes, dtype=self.dtype)
54 | # (N_cubes, dim)
55 | self.positions = self._get_indices(nevals_arange)
56 |
57 |
58 | def accumulate_weight(self, nevals, weight_all_cubes):
59 | """Accumulate weights for the cubes.
60 |
61 | Args:
62 | nevals (backend tensor): Number of evals belonging to each cube (sorted). (n, N_cube)
63 | weight_all_cubes (backend tensor): Function values. (n, total bla)
64 | 【must be sorted! according to indices】
65 |
66 |
67 | Returns:
68 | backend tensor, backend tensor: Computed JF and JF2 (n, N_cube)
69 | """
70 | # indices maps each index of weight_all_cubes to the corresponding
71 | # hypercube index.
72 |
73 | N_cubes_arange = anp.arange(self.N_cubes, dtype=nevals.dtype, like=self.backend)
74 |
75 |
76 | N_cubes_arange_n = anp.repeat(N_cubes_arange.view(1, -1), self.n, axis=0).view(-1)
77 | indices = anp.repeat(N_cubes_arange_n, nevals.view(-1))
78 |
79 | indices = indices.reshape(self.n, -1)
80 |
81 |
82 |
83 | # Reset JF and JF2, and accumulate the weights and squared weights
84 | # into them.
85 | self.JF = anp.zeros([self.n, self.N_cubes], dtype=self.dtype, like=self.backend)
86 | self.JF2 = anp.zeros([self.n, self.N_cubes], dtype=self.dtype, like=self.backend)
87 |
88 |
89 | integrator_id = torch.arange(self.n, dtype=int)
90 | integrator_id = integrator_id.reshape(-1, 1)
91 |
92 | # (n, total_bla)
93 | integrator_id = anp.repeat(integrator_id, indices.shape[1], axis=1)
94 |
95 |
96 | JF_shape = self.JF.shape
97 | weight_all_cubes = weight_all_cubes.view(-1)
98 | # (0,0,0……, n-1, n-1)
99 | integrator_id = integrator_id.view(-1)
100 | # (0,0,0, ……, N_cubes-1, N_cubes-1, ……, 0, 0, 0, N_cubes-1)
101 | indices = indices.view(-1)
102 | idx = integrator_id * self.N_cubes + indices
103 |
104 | self.JF = self.JF.view(-1)
105 | self.JF2 = self.JF2.view(-1)
106 |
107 | # self.JF.index_add_(dim=0, index=idx, source=weight_all_cubes)
108 | # self.JF2.index_add_(dim=0, index=idx, source=weight_all_cubes**2)
109 | self.JF.scatter_add_(dim=0, index=idx, src=weight_all_cubes)
110 | self.JF2.scatter_add_(dim=0, index=idx, src=weight_all_cubes ** 2)
111 |
112 | self.JF = self.JF.view(JF_shape)
113 | self.JF2 = self.JF2.view(JF_shape)
114 |
115 |
116 | # Store counts
117 | self.strat_counts = astype(nevals, self.dtype)
118 |
119 | return self.JF, self.JF2
120 |
121 | def update_DH(self):
122 | """Update the dampened sample counts."""
123 |
124 | # EQ 42
125 | V2 = self.V_cubes * self.V_cubes
126 | d_tmp = (
127 | V2 * self.JF2 / self.strat_counts
128 | - (self.V_cubes * self.JF / self.strat_counts) ** 2
129 | )
130 | # Sometimes rounding errors produce negative values very close to 0
131 | d_tmp[d_tmp < 0.0] = 0.0
132 |
133 | # (n, N_cubes)
134 | self.dh = d_tmp**self.beta
135 |
136 | # Normalize dampening
137 | # (n, 1)
138 | d_sum = anp.sum(self.dh,axis=1).view(-1,1)
139 |
140 |
141 | assert torch.count_nonzero(d_sum) ==d_sum.shape[0]
142 | self.dh = self.dh / d_sum
143 |
144 |
145 |
146 |
147 | def get_NH(self, nevals_exp):
148 | """Recalculate sample points per hypercube, EQ 44.
149 |
150 | Args:
151 | nevals_exp (int): Expected number of evaluations.
152 |
153 | Returns:
154 | backend tensor: Stratified sample counts per cube.
155 | """
156 | # (n, N_cubes)
157 |
158 | st_time = time.time()
159 | nh = anp.floor(self.dh * nevals_exp)
160 | nh = anp.clip(nh, 2, None)
161 | en_time = time.time()
162 | print("get nh originally took ", en_time - st_time)
163 |
164 |
165 |
166 |
167 | st_time = time.time()
168 |
169 | cur_nevals = torch.sum(nh, dim=1)
170 | max_nevals = cur_nevals.max()
171 |
172 | delta_nevals = max_nevals - cur_nevals
173 |
174 |
175 | assert delta_nevals.min() >=0
176 | delta_nevals = delta_nevals.int()
177 |
178 | nh = astype(nh, "int64")
179 |
180 |
181 | pst = time.time()
182 | for i in range(self.n):
183 | if delta_nevals[i] > 0:
184 |
185 | # ids = ids_[:delta_nevals[i]]
186 | weights = self.dh[i,:]
187 | ids = torch.multinomial(weights, num_samples=delta_nevals[i], replacement=True)
188 |
189 | # nh[i,:].index_add_(0, ids, torch.ones_like(ids, dtype=nh.dtype))
190 | nh[i, :].scatter_add_(0, ids, torch.ones_like(ids, dtype=nh.dtype))
191 | print("For took ",time.time() - pst)
192 |
193 | cur_nevals = torch.sum(nh, dim=1)
194 | delta_nevals = max_nevals - cur_nevals
195 | assert delta_nevals.count_nonzero() ==0
196 | en_time = time.time()
197 | print("get nh additionally took ",en_time-st_time)
198 | return astype(nh, "int64")
199 |
200 | def _get_indices(self, idx):
201 | """Maps point to stratified point.
202 |
203 | Args:
204 | idx (int backend tensor): Target points indices.
205 |
206 | Returns:
207 | int backend tensor: Mapped points.
208 | """
209 | # A commented-out alternative way for mapped points calculation if
210 | # idx is anp.arange(len(nevals), like=nevals).
211 | # torch.meshgrid's indexing argument was added in version 1.10.1,
212 | # so don't use it yet.
213 | """
214 | grid_1d = anp.arange(self.N_strat, like=self.backend)
215 | points = anp.meshgrid(*([grid_1d] * self.dim), indexing="xy", like=self.backend)
216 | points = anp.stack(
217 | [mg.ravel() for mg in points], axis=1, like=self.backend
218 | )
219 | return points
220 | """
221 | # Repeat idx via broadcasting and divide it by self.N_strat ** d
222 | # for all dimensions d
223 | points = anp.reshape(idx, [idx.shape[0], 1])
224 | strides = self.N_strat ** anp.arange(self.dim, like=points)
225 | if self.backend == "torch":
226 | # Torch shows a compatibility warning with //, so use torch.div
227 | # instead
228 | points = anp.div(points, strides, rounding_mode="floor")
229 | else:
230 | points = points // strides
231 | # Calculate the component-wise remainder: points mod self.N_strat
232 | points[:, :-1] = points[:, :-1] - self.N_strat * points[:, 1:]
233 |
234 | return points
235 |
236 | def get_Y(self, nevals):
237 | """Compute randomly sampled points.
238 |
239 | Args:
240 | nevals (int backend tensor): Number of samples to draw per stratification cube. (n, N_cubes)
241 |
242 | Returns:
243 | backend tensor: Sampled points.
244 | """
245 | # Get integer positions for each hypercube
246 |
247 | assert self.N_cubes == nevals.shape[1], print("Not every cube is sampled! ", self.N_cubes, nevals.shape[1])
248 | nevals_arange = torch.arange(self.N_cubes, dtype=int)
249 |
250 | # (n, self.N_cubes)
251 | print("nevals.shape", nevals.shape)
252 |
253 | N_cubes_arange_n = anp.repeat(nevals_arange.view(1, -1), self.n, axis=0).view(-1)
254 | indices = anp.repeat(N_cubes_arange_n, nevals.view(-1))
255 | positions = self.positions[indices]
256 | # (n, bla, dim)
257 | positions = positions.reshape(self.n, -1, self.dim)
258 | # (n, bla, dim)
259 | random_uni = self.rng.uniform(
260 | size=[positions.shape[0],positions.shape[1], positions.shape[2]], dtype=self.dtype
261 | )
262 | positions = (positions + random_uni) / self.N_strat
263 | positions[positions >= 1.0] = 0.999999
264 | positions_list = positions
265 |
266 | return positions_list
267 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/integration/vegas_stratification.py:
--------------------------------------------------------------------------------
1 | from autoray import numpy as anp
2 | from autoray import astype
3 |
4 | from .utils import _add_at_indices
5 |
6 |
7 | class VEGASStratification:
8 | """The stratification used for VEGAS Enhanced. Refer to https://arxiv.org/abs/2009.05112 .
9 | Implementation inspired by https://github.com/ycwu1030/CIGAR/ .
10 | EQ refers to equation in the above paper.
11 | """
12 |
13 | def __init__(self, N_increment, dim, rng, backend, dtype, beta=0.75):
14 | """Initialize the VEGAS stratification.
15 |
16 | Args:
17 | N_increment (int): Number of evaluations per iteration.
18 | dim (int): Dimensionality
19 | rng (RNG): Random number generator
20 | backend (string): Numerical backend
21 | dtype (backend dtype): dtype used for the calculations
22 | beta (float, optional): Beta parameter from VEGAS Enhanced. Defaults to 0.75.
23 | """
24 | self.rng = rng
25 | self.dim = dim
26 | # stratification steps per dim, EQ 41
27 | self.N_strat = int((N_increment / 4.0) ** (1.0 / dim))
28 | self.N_strat = 1000 if self.N_strat > 1000 else self.N_strat
29 | self.beta = beta # variable controlling adaptiveness in stratification 0 to 1
30 | self.N_cubes = self.N_strat**self.dim # total number of subdomains
31 | self.V_cubes = (1.0 / self.N_strat) ** self.dim # volume of hypercubes
32 |
33 | self.dtype = dtype
34 | self.backend = backend
35 |
36 | # jacobian times f eval and jacobian^2 times f
37 | self.JF = anp.zeros([self.N_cubes], dtype=self.dtype, like=backend)
38 | self.JF2 = anp.zeros([self.N_cubes], dtype=self.dtype, like=backend)
39 |
40 | # dampened counts
41 | self.dh = (
42 | anp.ones([self.N_cubes], dtype=self.dtype, like=backend)
43 | * 1.0
44 | / self.N_cubes
45 | )
46 |
47 | # current index counts as floating point numbers
48 | self.strat_counts = anp.zeros([self.N_cubes], dtype=self.dtype, like=backend)
49 |
50 | def accumulate_weight(self, nevals, weight_all_cubes):
51 | """Accumulate weights for the cubes.
52 |
53 | Args:
54 | nevals (backend tensor): Number of evals belonging to each cube (sorted).
55 | weight_all_cubes (backend tensor): Function values.
56 |
57 | Returns:
58 | backend tensor, backend tensor: Computed JF and JF2
59 | """
60 | # indices maps each index of weight_all_cubes to the corresponding
61 | # hypercube index.
62 | N_cubes_arange = anp.arange(self.N_cubes, dtype=nevals.dtype, like=self.backend)
63 | indices = anp.repeat(N_cubes_arange, nevals)
64 | # Reset JF and JF2, and accumulate the weights and squared weights
65 | # into them.
66 | self.JF = anp.zeros([self.N_cubes], dtype=self.dtype, like=self.backend)
67 | self.JF2 = anp.zeros([self.N_cubes], dtype=self.dtype, like=self.backend)
68 | _add_at_indices(self.JF, indices, weight_all_cubes, is_sorted=True)
69 | _add_at_indices(self.JF2, indices, weight_all_cubes**2.0, is_sorted=True)
70 |
71 | # Store counts
72 | self.strat_counts = astype(nevals, self.dtype)
73 |
74 | return self.JF, self.JF2
75 |
76 | def update_DH(self):
77 | """Update the dampened sample counts."""
78 | d_sum = 0
79 | d_tmp = 0
80 |
81 | # EQ 42
82 | V2 = self.V_cubes * self.V_cubes
83 | d_tmp = (
84 | V2 * self.JF2 / self.strat_counts
85 | - (self.V_cubes * self.JF / self.strat_counts) ** 2
86 | )
87 | # Sometimes rounding errors produce negative values very close to 0
88 | d_tmp[d_tmp < 0.0] = 0.0
89 |
90 | self.dh = d_tmp**self.beta
91 |
92 | # Normalize dampening
93 | d_sum = anp.sum(self.dh)
94 | if d_sum != 0:
95 | self.dh = self.dh / d_sum
96 |
97 | def get_NH(self, nevals_exp):
98 | """Recalculate sample points per hypercube, EQ 44.
99 |
100 | Args:
101 | nevals_exp (int): Expected number of evaluations.
102 |
103 | Returns:
104 | backend tensor: Stratified sample counts per cube.
105 | """
106 | nh = anp.floor(self.dh * nevals_exp)
107 | nh = anp.clip(nh, 2, None)
108 | return astype(nh, "int64")
109 |
110 | def _get_indices(self, idx):
111 | """Maps point to stratified point.
112 |
113 | Args:
114 | idx (int backend tensor): Target points indices.
115 |
116 | Returns:
117 | int backend tensor: Mapped points.
118 | """
119 | # A commented-out alternative way for mapped points calculation if
120 | # idx is anp.arange(len(nevals), like=nevals).
121 | # torch.meshgrid's indexing argument was added in version 1.10.1,
122 | # so don't use it yet.
123 | """
124 | grid_1d = anp.arange(self.N_strat, like=self.backend)
125 | points = anp.meshgrid(*([grid_1d] * self.dim), indexing="xy", like=self.backend)
126 | points = anp.stack(
127 | [mg.ravel() for mg in points], axis=1, like=self.backend
128 | )
129 | return points
130 | """
131 | # Repeat idx via broadcasting and divide it by self.N_strat ** d
132 | # for all dimensions d
133 | points = anp.reshape(idx, [idx.shape[0], 1])
134 | strides = self.N_strat ** anp.arange(self.dim, like=points)
135 | if self.backend == "torch":
136 | # Torch shows a compatibility warning with //, so use torch.div
137 | # instead
138 | points = anp.div(points, strides, rounding_mode="floor")
139 | else:
140 | points = points // strides
141 | # Calculate the component-wise remainder: points mod self.N_strat
142 | points[:, :-1] = points[:, :-1] - self.N_strat * points[:, 1:]
143 | return points
144 |
145 | def get_Y(self, nevals):
146 | """Compute randomly sampled points.
147 |
148 | Args:
149 | nevals (int backend tensor): Number of samples to draw per stratification cube.
150 |
151 | Returns:
152 | backend tensor: Sampled points.
153 | """
154 | # Get integer positions for each hypercube
155 | nevals_arange = anp.arange(len(nevals), dtype=nevals.dtype, like=nevals)
156 | positions = self._get_indices(nevals_arange)
157 |
158 | # For each hypercube i, repeat its position nevals[i] times
159 | position_indices = anp.repeat(nevals_arange, nevals)
160 | positions = positions[position_indices, :]
161 |
162 | # Convert the positions to float, add random offsets to them and scale
163 | # the result so that each point is in [0, 1)^dim
164 | positions = astype(positions, self.dtype)
165 | random_uni = self.rng.uniform(
166 | size=[positions.shape[0], self.dim], dtype=self.dtype
167 | )
168 | positions = (positions + random_uni) / self.N_strat
169 | # Due to rounding errors points are sometimes 1.0; replace them with
170 | # a value close to 1
171 | positions[positions >= 1.0] = 0.999999
172 | return positions
173 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/plots/plot_convergence.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 |
5 | def plot_convergence(evals, fvals, ground_truth, labels, dpi=150):
6 | """Plots errors vs. function evaluations (fevals) and shows the convergence rate.
7 |
8 | Args:
9 | evals (list of np.array): Number of evaluations, for each method a np.array of ints.
10 | fvals (list of np.array): Function values for evals.
11 | ground_truth (np.array): Ground truth values.
12 | labels (list): Method names.
13 | dpi (int, optional): Plot dpi. Defaults to 150.
14 | """
15 | plt.figure(dpi=dpi)
16 | for evals_item, f_item, label in zip(evals, fvals, labels):
17 | evals_item = np.array(evals_item)
18 | abs_err = np.abs(np.asarray(f_item) - np.asarray(ground_truth))
19 | abs_err_delta = np.mean(np.abs((abs_err[:-1]) / (abs_err[1:] + 1e-16)))
20 | label = label + "\nConvergence Rate: " + str.format("{:.2e}", abs_err_delta)
21 | plt.semilogy(evals_item, abs_err, label=label)
22 |
23 | plt.legend(fontsize=6)
24 | plt.xlabel("# of function evaluations")
25 | plt.ylabel("Absolute error")
26 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/plots/plot_runtime.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 |
4 | def plot_runtime(evals, runtime, labels, dpi=150, y_axis_name="Runtime [s]"):
5 | """Plots the runtime vs. function evaluations (fevals).
6 |
7 | Args:
8 | evals (list of np.array): Number of evaluations, for each method a np.array of fevals.
9 | runtime (list of np.array): Runtime for evals.
10 | labels (list): Method names.
11 | dpi (int, optional): Plot dpi. Defaults to 150.
12 | y_axis_name (str, optional): Name for y axis. Deafults to "Runtime [s]".
13 | """
14 | plt.figure(dpi=dpi)
15 | for evals_item, rt, label in zip(evals, runtime, labels):
16 | plt.semilogy(evals_item, rt, label=label)
17 | plt.legend(fontsize=6)
18 | plt.xlabel("Number of evaluations")
19 | plt.ylabel(y_axis_name)
20 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/BatchMulVegas_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 | import time
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype, astype
7 | import timeit
8 | import cProfile
9 | import pstats
10 | import torch
11 | from unittest.mock import patch
12 |
13 | from integration.vegas import VEGAS
14 | from integration.rng import RNG
15 | from integration.BatchMulVegas import BatchMulVEGAS
16 |
17 | import integration.utils as utils
18 |
19 |
20 |
21 | DEVICE= "cuda"
22 |
23 |
24 | from helper_functions import (
25 | compute_integration_test_errors,
26 | setup_test_for_backend,
27 | get_test_functions
28 |
29 | )
30 |
31 |
32 | def f_batch(inp):
33 | print(inp.shape)
34 | inp[:,2] = inp[:,2] * 2
35 | print(torch.sum(inp, axis=1).shape)
36 | return torch.sum(inp, axis=1)
37 | # print(torch.prod(inp, axis=1).shape)
38 | # return torch.prod(inp, axis=1)
39 |
40 | def _run_simple_funcs_of_BatchMulVegas():
41 | z = BatchMulVEGAS()
42 |
43 | dim = 10
44 | n = 200
45 | N = 200000
46 | # legal_tensors = torch.Tensor(n * [dim * [[0,1]] ]).to('cuda')
47 | legal_tensors = torch.Tensor(n * [dim * [[0,1]] ]).to(DEVICE)
48 | legal_tensors[0,:] = torch.Tensor([dim * [[0,0.5]]]).to(DEVICE)
49 | legal_tensors[1,:] = torch.Tensor([dim * [[0.2,0.5]]]).to(DEVICE)
50 | print(legal_tensors.shape)
51 |
52 | # f_batch = get_test_functions(dim, "torch")[0]
53 |
54 | full_integration_domain = torch.Tensor(dim * [[0,1]])
55 |
56 |
57 |
58 | z.setValues(f_batch,
59 | dim=dim,
60 | N=N,
61 | n=n,
62 | integration_domains=legal_tensors,
63 | rng=None,
64 | seed=1234,
65 | reuse_sample_points=False
66 | )
67 |
68 | # z.integrate()
69 | # ret = z.integrate1()
70 | res = z.integrate()
71 | print(res)
72 | # print("see ret")
73 | # print(ret)
74 |
75 |
76 | def _run_example_integrations(backend, dtype_name):
77 | """Test the integrate method in VEGAS for the given backend and example test functions using compute_integration_test_errors"""
78 | print(f"Testing VEGAS+ with example functions with {backend}, {dtype_name}")
79 | vegas = VEGAS()
80 |
81 | # 1D Tests
82 | N = 10000
83 | errors, _ = compute_integration_test_errors(
84 | vegas.integrate,
85 | {"N": N, "dim": 1, "seed": 0},
86 | dim=1,
87 | use_complex=False,
88 | backend=backend,
89 | )
90 | print("1D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
91 | for error in errors[:3]:
92 | assert error < 5e-3
93 |
94 | for error in errors:
95 | assert error < 9.0
96 |
97 | for error in errors[6:]:
98 | assert error < 6e-3
99 |
100 | # 3D Tests
101 | N = 10000
102 | errors, _ = compute_integration_test_errors(
103 | vegas.integrate,
104 | {"N": N, "dim": 3, "seed": 0},
105 | dim=3,
106 | use_complex=False,
107 | backend=backend,
108 | )
109 | print("3D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
110 | for error in errors:
111 | assert error < 0.61
112 |
113 | # 10D Tests
114 | N = 10000
115 | errors, _ = compute_integration_test_errors(
116 | vegas.integrate,
117 | {"N": N, "dim": 10, "seed": 0},
118 | dim=10,
119 | use_complex=False,
120 | backend=backend,
121 | )
122 | print("10D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
123 | for error in errors:
124 | assert error < 12.5
125 |
126 |
127 | def _run_BatchVegas_tests(backend, dtype_name):
128 | utils.add_time = 0
129 | st = time.time()
130 | """Test if VEGAS+ works with example functions and is accurate as expected"""
131 | # _run_vegas_accuracy_checks(backend, dtype_name)
132 | _run_simple_funcs_of_BatchMulVegas()
133 | print("Total add_time is ",utils.add_time)
134 | en = time.time()
135 | print("Total time is ", en-st)
136 | # _run_example_integrations(backend, dtype_name)
137 |
138 | test_integrate_torch = setup_test_for_backend(_run_BatchVegas_tests, "torch", "float64")
139 |
140 |
141 | if __name__ == "__main__":
142 | # used to run this test individually
143 | # test_integrate_numpy()
144 |
145 | profile_torch = False
146 |
147 | if profile_torch:
148 | profiler = cProfile.Profile()
149 | profiler.enable()
150 | start = timeit.default_timer()
151 | test_integrate_torch()
152 | profiler.disable()
153 | stats = pstats.Stats(profiler).sort_stats("tottime")
154 | stats.print_stats()
155 | stop = timeit.default_timer()
156 | print("Test ran for ", stop - start, " seconds.")
157 | else:
158 | test_integrate_torch()
159 |
160 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/BatchVegas_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 | import time
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype, astype
7 | import timeit
8 | import cProfile
9 | import pstats
10 | import torch
11 | from unittest.mock import patch
12 |
13 | from integration.vegas import VEGAS
14 | from integration.rng import RNG
15 | from integration.BatchVegas import BatchVEGAS
16 |
17 | import integration.utils as utils
18 |
19 | DEVICE= "cuda"
20 |
21 | from helper_functions import (
22 | compute_integration_test_errors,
23 | setup_test_for_backend,
24 | get_test_functions
25 |
26 | )
27 |
28 |
29 | def f_batch(inp):
30 | print(inp.shape)
31 | inp[:, 2] = inp[:, 2] * 2
32 | print(torch.sum(inp, axis=1).shape)
33 | return torch.sum(inp, axis=1)
34 | # print(torch.prod(inp, axis=1).shape)
35 | # return torch.prod(inp, axis=1)
36 |
37 | def _run_simple_funcs_of_BatchVegas():
38 | z = BatchVEGAS()
39 |
40 | dim = 10
41 | n = 200
42 | N = 200000
43 | # legal_tensors = torch.Tensor(n * [dim * [[0,1]] ]).to('cuda')
44 | legal_tensors = torch.Tensor(n * [dim * [[0,1]] ]).to(DEVICE)
45 | legal_tensors[0, :] = torch.Tensor([dim * [[0, 0.5]]]).to(DEVICE)
46 | legal_tensors[1, :] = torch.Tensor([dim * [[0.2, 0.5]]]).to(DEVICE)
47 | print(legal_tensors.shape)
48 |
49 | # f_batch = get_test_functions(dim, "torch")[0]
50 |
51 | full_integration_domain = torch.Tensor(dim * [[0,1]])
52 |
53 |
54 | vegas = VEGAS()
55 | bigN = 100000 * 40
56 |
57 | domain_starts = full_integration_domain[:, 0]
58 | domain_sizes = full_integration_domain[:, 1] - domain_starts
59 | domain_volume = torch.prod(domain_sizes)
60 | result = vegas.integrate(f_batch, dim=dim,
61 | N=bigN,
62 | integration_domain=full_integration_domain,
63 | # use_warmup=True,
64 | use_warmup=True,
65 | use_grid_improve=True,
66 | max_iterations=20
67 | # backend='torch'
68 | )
69 | print('result is ', result)
70 |
71 |
72 | z.setValues(f_batch,
73 | dim=dim,
74 | N=N,
75 | n=n,
76 | integration_domains=legal_tensors,
77 | rng=None,
78 | seed=1234,
79 | reuse_sample_points=True,
80 | target_map=vegas.map,
81 | target_domain_starts = domain_starts,
82 | target_domain_sizes = domain_sizes
83 | )
84 |
85 |
86 | # z.setValues(f_batch,
87 | # dim=dim,
88 | # N=N,
89 | # n=n,
90 | # integration_domains=legal_tensors,
91 | # rng=None,
92 | # seed=1234
93 | # )
94 | # z.integrate()
95 | ret = z.integrate1()
96 |
97 | print("see ret")
98 | print(ret)
99 | # BVegas.
100 |
101 | def _run_example_integrations(backend, dtype_name):
102 | """Test the integrate method in VEGAS for the given backend and example test functions using compute_integration_test_errors"""
103 | print(f"Testing VEGAS+ with example functions with {backend}, {dtype_name}")
104 | vegas = VEGAS()
105 |
106 | # 1D Tests
107 | N = 10000
108 | errors, _ = compute_integration_test_errors(
109 | vegas.integrate,
110 | {"N": N, "dim": 1, "seed": 0},
111 | dim=1,
112 | use_complex=False,
113 | backend=backend,
114 | )
115 | print("1D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
116 | for error in errors[:3]:
117 | assert error < 5e-3
118 |
119 | for error in errors:
120 | assert error < 9.0
121 |
122 | for error in errors[6:]:
123 | assert error < 6e-3
124 |
125 | # 3D Tests
126 | N = 10000
127 | errors, _ = compute_integration_test_errors(
128 | vegas.integrate,
129 | {"N": N, "dim": 3, "seed": 0},
130 | dim=3,
131 | use_complex=False,
132 | backend=backend,
133 | )
134 | print("3D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
135 | for error in errors:
136 | assert error < 0.61
137 |
138 | # 10D Tests
139 | N = 10000
140 | errors, _ = compute_integration_test_errors(
141 | vegas.integrate,
142 | {"N": N, "dim": 10, "seed": 0},
143 | dim=10,
144 | use_complex=False,
145 | backend=backend,
146 | )
147 | print("10D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
148 | for error in errors:
149 | assert error < 12.5
150 |
151 |
152 | def _run_BatchVegas_tests(backend, dtype_name):
153 | utils.add_time = 0
154 | st = time.time()
155 | """Test if VEGAS+ works with example functions and is accurate as expected"""
156 | # _run_vegas_accuracy_checks(backend, dtype_name)
157 | _run_simple_funcs_of_BatchVegas()
158 | print("Total add_time is ",utils.add_time)
159 | en = time.time()
160 | print("Total time is ", en-st)
161 | # _run_example_integrations(backend, dtype_name)
162 |
163 | test_integrate_torch = setup_test_for_backend(_run_BatchVegas_tests, "torch", "float64")
164 |
165 |
166 | if __name__ == "__main__":
167 | # used to run this test individually
168 | # test_integrate_numpy()
169 |
170 | profile_torch = False
171 |
172 | if profile_torch:
173 | profiler = cProfile.Profile()
174 | profiler.enable()
175 | start = timeit.default_timer()
176 | test_integrate_torch()
177 | profiler.disable()
178 | stats = pstats.Stats(profiler).sort_stats("tottime")
179 | stats.print_stats()
180 | stop = timeit.default_timer()
181 | print("Test ran for ", stop - start, " seconds.")
182 | else:
183 | test_integrate_torch()
184 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/boole_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | import warnings
6 |
7 | from integration.boole import Boole
8 | from helper_functions import (
9 | compute_integration_test_errors,
10 | setup_test_for_backend,
11 | )
12 |
13 |
14 | def _run_boole_tests(backend, _precision):
15 | """Test the integrate function in integration.Boole for the given backend.
16 | Note: For now the 10-D test is diabled due to lack of GPU memory on some computers."""
17 |
18 | bl = Boole()
19 | # 1D Tests
20 | N = 401
21 |
22 | errors, funcs = compute_integration_test_errors(
23 | bl.integrate, {"N": N, "dim": 1}, dim=1, use_complex=True, backend=backend
24 | )
25 | print(f"1D Boole Test passed. N: {N}, backend: {backend}, Errors: {errors}")
26 | # Polynomials up to degree 5 can be integrated almost exactly with Boole.
27 | for err, test_function in zip(errors, funcs):
28 | assert test_function.get_order() > 5 or err < 6.33e-11
29 | for error in errors:
30 | assert error < 6.33e-11
31 |
32 | # 3D Tests
33 | N = 1076890 # N = 102.5 per dim (will change to 101 if all works)
34 | with warnings.catch_warnings():
35 | warnings.simplefilter("ignore")
36 | errors, funcs = compute_integration_test_errors(
37 | bl.integrate, {"N": N, "dim": 3}, dim=3, use_complex=True, backend=backend
38 | )
39 | print(f"3D Boole Test passed. N: {N}, backend: {backend}, Errors: {errors}")
40 | for err, test_function in zip(errors, funcs):
41 | assert test_function.get_order() > 5 or err < 2e-13
42 | for error in errors:
43 | assert error < 5e-6
44 |
45 | # 10D Tests
46 | # Have been disabled for now because it is too GPU-heavy
47 | # N = 5 ** 10
48 | # errors = compute_test_errors(bl.integrate, {"N": N, "dim": 10}, dim=10, use_complex=True)
49 | # print("10D Boole Test: Passed N =", N, "\n", "Errors: ", errors)
50 | # for error in errors:
51 | # assert error < 5e-9
52 |
53 |
54 | test_integrate_numpy = setup_test_for_backend(_run_boole_tests, "numpy", "float64")
55 | test_integrate_torch = setup_test_for_backend(_run_boole_tests, "torch", "float64")
56 | test_integrate_jax = setup_test_for_backend(_run_boole_tests, "jax", "float64")
57 | test_integrate_tensorflow = setup_test_for_backend(
58 | _run_boole_tests, "tensorflow", "float64"
59 | )
60 |
61 |
62 | if __name__ == "__main__":
63 | # used to run this test individually
64 | test_integrate_numpy()
65 | test_integrate_torch()
66 | test_integrate_jax()
67 | test_integrate_tensorflow()
68 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/gradient_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import to_numpy, to_backend_dtype, get_dtype_name
7 | import numpy as np
8 |
9 | from integration.vegas import VEGAS
10 | from integration.monte_carlo import MonteCarlo
11 | from integration.trapezoid import Trapezoid
12 | from integration.simpson import Simpson
13 | from integration.boole import Boole
14 |
15 | from helper_functions import setup_test_for_backend
16 |
17 |
18 | def _v_function(x):
19 | """
20 | V shaped test function 2 |x|.
21 | Gradient in positive x should be 2,
22 | Gradient in negative x should be -2
23 | for -1 to 1 domain.
24 | """
25 | return 2 * anp.abs(x)
26 |
27 |
28 | def _polynomial_function(x):
29 | """
30 | 2D test function 3 x_1 ^ 2 + 2 x_0 + 1.
31 | The four gradient components are integrals of the function over the
32 | integration domain rectangle's four sides multiplied by the factors
33 | -1, -1, 1 and 1 for the sides -X_1, -X_2, X_1 and X_2 respectively.
34 | For example, with integration_domain [[0.0, 1.0], [0.0, 2.0]],
35 | the gradient of the integral with respect to this domain is
36 | [[-10.0, 14.0], [-2.0, 14.0]].
37 | """
38 | return 1.0 + 2.0 * x[:, 0] + 3.0 * x[:, 1] ** 2
39 |
40 |
41 | def _polynomial_function_parameterized(x, coeffs):
42 | """
43 | 2D test function coeffs_2 x_1 ^ 2 + coeffs_1 x_0 + coeffs_0.
44 | """
45 | return coeffs[0] + coeffs[1] * x[:, 0] + coeffs[2] * x[:, 1] ** 2
46 |
47 |
48 | def _v_function_parameterized(x, c):
49 | """
50 | V shaped test function 2 |x + c|.
51 | """
52 | return 2 * anp.abs(x + c)
53 |
54 |
55 | def _calculate_gradient(backend, param, func, dtype_name):
56 | """Backend-specific gradient calculation
57 |
58 | Args:
59 | backend (string): Numerical backend, e.g. "torch"
60 | param (list or float): Parameter value(s) for func. The gradient of func is calculated over param.
61 | func (function): A function which receives param and should be derived
62 | dtype_name (string): Floating point precision
63 |
64 | Returns:
65 | backend tensor: Gradient of func over param
66 | backend tensor: Value of func at param
67 | """
68 | if backend == "torch":
69 | import torch
70 |
71 | # Set up param for gradient calculation
72 | param = torch.tensor(param)
73 | param.requires_grad = True
74 |
75 | # Compute the value of func at param
76 | result = func(param)
77 |
78 | # Check for presence of gradient
79 | assert hasattr(result, "grad_fn")
80 |
81 | # Backpropagate to get the gradient of func over param
82 | result.backward()
83 | gradient = param.grad
84 |
85 | elif backend == "jax":
86 | import jax
87 |
88 | # Convert param to a JAX tensor
89 | param = anp.array(param, like="jax")
90 |
91 | # Calculate the value and gradient
92 | value_and_grad_func = jax.value_and_grad(func)
93 | result, gradient = value_and_grad_func(param)
94 |
95 | elif backend == "tensorflow":
96 | import tensorflow as tf
97 |
98 | # Set up param as Variable
99 | dtype = to_backend_dtype(dtype_name, like=backend)
100 | param = tf.Variable(param, dtype=dtype)
101 |
102 | # Calculate the value and gradient
103 | with tf.GradientTape() as tape:
104 | result = func(param)
105 | gradient = tape.gradient(result, param)
106 |
107 | else:
108 | raise ValueError(f"No gradient calculation for the backend {backend}")
109 |
110 | assert get_dtype_name(result) == dtype_name
111 | assert get_dtype_name(gradient) == dtype_name
112 | assert gradient.shape == param.shape
113 | return to_numpy(gradient), to_numpy(result)
114 |
115 |
116 | def _calculate_gradient_over_domain(
117 | backend, integration_domain, integrate, integrate_kwargs, dtype_name
118 | ):
119 | """Backend-specific calculation of the gradient of integrate over integration_domain
120 |
121 | Args:
122 | backend (string): Numerical backend, e.g. "torch"
123 | integration_domain (list): Integration domain
124 | integrate (function): A integrator's integrate method
125 | integrate_kwargs (dict): Arguments for integrate except integration_domain
126 | dtype_name (string): Floating point precision
127 |
128 | Returns:
129 | backend tensor: Gradient with respect to integration_domain
130 | backend tensor: Integral result
131 | """
132 | return _calculate_gradient(
133 | backend,
134 | integration_domain,
135 | lambda dom: integrate(integration_domain=dom, **integrate_kwargs),
136 | dtype_name,
137 | )
138 |
139 |
140 | def _calculate_gradient_over_param(
141 | backend, param, integrand_with_param, integrate, integrate_kwargs, dtype_name
142 | ):
143 | """Backend-specific calculation of the gradient of integrate over an integrand parameter
144 |
145 | Args:
146 | backend (string): Numerical backend, e.g. "torch"
147 | param (list or float): Parameter value(s) for the integrand. The gradient of integrate is calculated over param.
148 | integrand_with_param (function): An integrand function which receives sample points and param
149 | integrate (function): A integrator's integrate method
150 | integrate_kwargs (dict): Arguments for integrate except fn (the integrand)
151 | dtype_name (string): Floating point precision
152 |
153 | Returns:
154 | backend tensor: Gradient with respect to param
155 | backend tensor: Integral result
156 | """
157 | return _calculate_gradient(
158 | backend,
159 | param,
160 | lambda par: integrate(
161 | lambda x: integrand_with_param(x, par), **integrate_kwargs
162 | ),
163 | dtype_name,
164 | )
165 |
166 |
167 | def _run_gradient_tests(backend, dtype_name):
168 | """
169 | Test if the implemented integrators
170 | maintain gradients and if the gradients are consistent and correct
171 | """
172 | # Define integrators and numbers of evaluation points
173 | integrators = [Trapezoid(), Simpson(), Boole(), MonteCarlo(), VEGAS()]
174 | Ns_1d = [149, 149, 149, 99997, 99997]
175 | Ns_2d = [549, 121, 81, 99997, 99997]
176 | for integrator, N_1d, N_2d in zip(integrators, Ns_1d, Ns_2d):
177 | integrator_name = type(integrator).__name__
178 | requires_seed = integrator_name in ["MonteCarlo", "VEGAS"]
179 | if backend != "torch" and integrator_name == "VEGAS":
180 | # Currently VEGAS supports only Torch.
181 | continue
182 |
183 | print(
184 | f"Calculating gradients; backend: {backend}, integrator: {integrator_name}"
185 | )
186 |
187 | print("Calculating gradients of the one-dimensional V-shaped function")
188 | integrate_kwargs = {"fn": _v_function, "dim": 1, "N": N_1d}
189 | if requires_seed:
190 | integrate_kwargs["seed"] = 0
191 | gradient, integral = _calculate_gradient_over_domain(
192 | backend,
193 | [[-1.0, 1.0]],
194 | integrator.integrate,
195 | integrate_kwargs,
196 | dtype_name,
197 | )
198 | # Check if the integral and gradient are accurate enough
199 | assert np.abs(integral - 2.0) < 1e-2
200 | assert np.all(np.abs(gradient - np.array([-2.0, 2.0])) < 2e-2)
201 |
202 | print("Calculating gradients of a 2D polynomial over the integration domain")
203 | integrate_kwargs = {"fn": _polynomial_function, "dim": 2, "N": N_2d}
204 | if requires_seed:
205 | integrate_kwargs["seed"] = 0
206 | gradient, integral = _calculate_gradient_over_domain(
207 | backend,
208 | [[0.0, 1.0], [0.0, 2.0]],
209 | integrator.integrate,
210 | integrate_kwargs,
211 | dtype_name,
212 | )
213 | # Check if the integral and gradient are accurate enough
214 | assert np.abs(integral - 12.0) < 8e-2
215 | assert np.all(np.abs(gradient - np.array([[-10.0, 14.0], [-2.0, 14.0]])) < 0.1)
216 |
217 | print("Calculating gradients of a 2D polynomial over polynomial coefficients")
218 | param = [1.0, 2.0, 3.0]
219 | integrate_kwargs = {
220 | "integration_domain": [[0.0, 1.0], [0.0, 2.0]],
221 | "dim": 2,
222 | "N": N_2d,
223 | "backend": backend,
224 | }
225 | if requires_seed:
226 | integrate_kwargs["seed"] = 0
227 | gradient, integral = _calculate_gradient_over_param(
228 | backend,
229 | param,
230 | _polynomial_function_parameterized,
231 | integrator.integrate,
232 | integrate_kwargs,
233 | dtype_name,
234 | )
235 | # Check if the integral and gradient are accurate enough
236 | assert np.abs(integral - 12.0) < 8e-2
237 | assert np.all(np.abs(gradient - np.array([2.0, 1.0, 8.0 / 3.0])) < 5e-2)
238 |
239 | print("Calculating gradients of a V-shaped function over an offset")
240 | param = 2.0
241 | integrate_kwargs = {
242 | "integration_domain": [[-5.0, 3.0]],
243 | "dim": 1,
244 | "N": N_1d,
245 | "backend": backend,
246 | }
247 | if requires_seed:
248 | integrate_kwargs["seed"] = 0
249 | gradient, integral = _calculate_gradient_over_param(
250 | backend,
251 | param,
252 | _v_function_parameterized,
253 | integrator.integrate,
254 | integrate_kwargs,
255 | dtype_name,
256 | )
257 | # Check if the integral and gradient are accurate enough
258 | assert np.abs(integral - 34.0) < 0.2
259 | assert np.abs(gradient - 4.0) < 0.1
260 |
261 |
262 | test_gradients_torch = setup_test_for_backend(_run_gradient_tests, "torch", "float64")
263 | test_gradients_jax = setup_test_for_backend(_run_gradient_tests, "jax", "float64")
264 | test_gradients_tensorflow = setup_test_for_backend(
265 | _run_gradient_tests, "tensorflow", "float64"
266 | )
267 |
268 | if __name__ == "__main__":
269 | # used to run this test individually
270 | test_gradients_torch()
271 | test_gradients_jax()
272 | test_gradients_tensorflow()
273 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/helper_functions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from integration_test_functions import Polynomial, Exponential, Sinusoid
5 | from utils.set_up_backend import set_up_backend
6 | from utils.set_log_level import set_log_level
7 |
8 |
9 | def get_test_functions(dim, backend):
10 | """Here we define a bunch of functions that will be used for testing.
11 |
12 | Args:
13 | dim (int): Dimensionality of test functions to use.
14 | backend (string): Numerical backend used for the integration
15 | """
16 | if dim == 1:
17 | return [
18 | # Real numbers
19 | Polynomial(4.0, [2.0], is_complex=False, backend=backend), # y = 2
20 | Polynomial(0, [0, 1], is_complex=False, backend=backend), # y = x
21 | Polynomial(
22 | 2 / 3, [0, 0, 2], domain=[[0, 1]], is_complex=False, backend=backend
23 | ), # y = 2x^2
24 | # y = -3x^3+2x^2-x+3
25 | Polynomial(
26 | 27.75,
27 | [3, -1, 2, -3],
28 | domain=[[-2, 1]],
29 | is_complex=False,
30 | backend=backend,
31 | ),
32 | # y = 7x^4-3x^3+2x^2-x+3
33 | Polynomial(
34 | 44648.0 / 15.0,
35 | [3, -1, 2, -3, 7],
36 | domain=[[-4, 4]],
37 | is_complex=False,
38 | backend=backend,
39 | ),
40 | # # y = -x^5+7x^4-3x^3+2x^2-x+3
41 | Polynomial(
42 | 8939.0 / 60.0,
43 | [3, -1, 2, -3, 7, -1],
44 | domain=[[2, 3]],
45 | is_complex=False,
46 | backend=backend,
47 | ),
48 | Exponential(
49 | np.exp(1) - np.exp(-2),
50 | domain=[[-2, 1]],
51 | is_complex=False,
52 | backend=backend,
53 | ),
54 | Exponential(
55 | (np.exp(2) - 1.0) / np.exp(3),
56 | domain=[[-3, -1]],
57 | is_complex=False,
58 | backend=backend,
59 | ),
60 | Sinusoid(
61 | 2 * np.sin(1) * np.sin(1),
62 | domain=[[0, 2]],
63 | is_complex=False,
64 | backend=backend,
65 | ),
66 | #
67 | # Complex numbers
68 | Polynomial(4.0j, [2.0j], is_complex=True, backend=backend), # y = 2j
69 | Polynomial(0, [0, 1j], is_complex=True, backend=backend), # y = xj
70 | # y=7x^4-3jx^3+2x^2-jx+3
71 | Polynomial(
72 | 44648.0 / 15.0,
73 | [3, -1j, 2, -3j, 7],
74 | domain=[[-4, 4]],
75 | is_complex=True,
76 | backend=backend,
77 | ),
78 | ]
79 | elif dim == 3:
80 | return [
81 | # Real numbers
82 | Polynomial(
83 | 48.0, [2.0], dim=3, is_complex=False, backend=backend
84 | ), # f(x,y,z) = 2
85 | Polynomial(
86 | 0, [0, 1], dim=3, is_complex=False, backend=backend
87 | ), # f(x,y,z) = x + y + z
88 | # f(x,y,z) = x^2+y^2+z^2
89 | Polynomial(8.0, coeffs=[0, 0, 1], dim=3, is_complex=False, backend=backend),
90 | # e^x+e^y+e^z
91 | Exponential(
92 | 27 * (np.exp(3) - 1) / np.exp(2),
93 | dim=3,
94 | domain=[[-2, 1], [-2, 1], [-2, 1]],
95 | is_complex=False,
96 | backend=backend,
97 | ),
98 | Sinusoid(
99 | 24 * np.sin(1) ** 2,
100 | dim=3,
101 | domain=[[0, 2], [0, 2], [0, 2]],
102 | is_complex=False,
103 | backend=backend,
104 | ),
105 | # e^x+e^y+e^z
106 | Exponential(
107 | 1.756,
108 | dim=3,
109 | domain=[[-0.05, 0.1], [-0.25, 0.2], [-np.exp(1), np.exp(1)]],
110 | is_complex=False,
111 | backend=backend,
112 | ),
113 | #
114 | # Complex numbers
115 | Polynomial(
116 | 48.0j, [2.0j], dim=3, is_complex=True, backend=backend
117 | ), # f(x,y,z) = 2j
118 | Polynomial(
119 | 0, [0, 1.0j], dim=3, is_complex=True, backend=backend
120 | ), # f(x,y,z) = xj
121 | Polynomial(
122 | 8.0j, coeffs=[0, 0, 1.0j], dim=3, is_complex=True, backend=backend
123 | ), # j*x^2+j*y^2+j*z^2
124 | ]
125 | elif dim == 10:
126 | return [
127 | # Real numbers
128 | # f(x_1, ..., x_10) = x_1^2+x_2^2+...
129 | Polynomial(
130 | 3413.33333333,
131 | coeffs=[0, 0, 1],
132 | dim=10,
133 | is_complex=False,
134 | backend=backend,
135 | ),
136 | # Complex numbers
137 | # f(x_1, ..., x_10) = j*x_1^2+j*x_2^2+...
138 | Polynomial(
139 | 3413.33333333j,
140 | coeffs=[0, 0, 1.0j],
141 | dim=10,
142 | is_complex=True,
143 | backend=backend,
144 | ),
145 | ]
146 | else:
147 | raise ValueError("Not testing functions implemented for dim " + str(dim))
148 |
149 |
150 | def compute_integration_test_errors(
151 | integrator,
152 | integrator_args,
153 | dim,
154 | use_complex,
155 | backend,
156 | ):
157 | """Computes errors on all test functions for given dimension and integrator.
158 |
159 | Args:
160 | integrator (torchquad.base_integrator): Integrator to use.
161 | integrator_args (dict): Arguments for the integrator.
162 | dim (int): Dimensionality of the example functions to choose.
163 | use_complex (Boolean): If True, skip complex example functions.
164 | backend (string): Numerical backend for the example functions.
165 |
166 | Returns:
167 | (list, list): Absolute errors on all example functions and the chosen
168 | example functions
169 | """
170 | errors = []
171 | chosen_functions = []
172 |
173 | # Compute integration errors on the chosen functions and remember those
174 | # functions
175 | for test_function in get_test_functions(dim, backend):
176 | if not use_complex and test_function.is_complex:
177 | continue
178 | if backend == "torch":
179 | errors.append(
180 | np.abs(
181 | test_function.evaluate(integrator, integrator_args)
182 | .cpu()
183 | .detach()
184 | .numpy()
185 | - test_function.expected_result
186 | )
187 | )
188 | else:
189 | errors.append(
190 | np.abs(
191 | test_function.evaluate(integrator, integrator_args)
192 | - test_function.expected_result
193 | )
194 | )
195 | chosen_functions.append(test_function)
196 |
197 | return errors, chosen_functions
198 |
199 |
200 | def setup_test_for_backend(test_func, backend, dtype_name):
201 | """
202 | Create a function to execute a test function with the given numerical backend.
203 | If the backend is not installed, skip the test.
204 |
205 | Args:
206 | test_func (function(backend, dtype_name)): The function which runs tests
207 | backend (string): The numerical backend
208 | dtype_name ("float32", "float64" or None): Floating point precision. If None, the global precision is not changed.
209 |
210 | Returns:
211 | function: A test function for Pytest
212 | """
213 |
214 | def func():
215 | pytest.importorskip(backend)
216 | set_log_level("INFO")
217 | set_up_backend(backend, dtype_name)
218 | if dtype_name is None:
219 | return test_func(backend)
220 | return test_func(backend, dtype_name)
221 |
222 | return func
223 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/integration_grid_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype
7 |
8 | from integration.integration_grid import IntegrationGrid
9 | from helper_functions import setup_test_for_backend
10 |
11 |
12 | def _check_grid_validity(grid, integration_domain, N, eps):
13 | """Check if a specific grid object contains illegal values"""
14 | assert grid._N == int(
15 | N ** (1 / len(integration_domain)) + 1e-8
16 | ), "Incorrect number of points per dimension"
17 | assert grid.points.shape == (
18 | int(N),
19 | integration_domain.shape[0],
20 | ), "Incorrect number of calculated points"
21 | assert (
22 | grid.points.dtype == integration_domain.dtype
23 | ), "Grid points have an incorrect dtype"
24 | assert (
25 | grid.h.dtype == integration_domain.dtype
26 | ), "Mesh widths have an incorrect dtype"
27 | for dim in range(len(integration_domain)):
28 | domain_width = integration_domain[dim][1] - integration_domain[dim][0]
29 | assert (
30 | anp.abs(grid.h[dim] - domain_width / (grid._N - 1)) < eps
31 | ), "Incorrect mesh width"
32 | assert (
33 | anp.min(grid.points[:, dim]) >= integration_domain[dim][0]
34 | ), "Points are outside of the integration domain"
35 | assert (
36 | anp.max(grid.points[:, dim]) <= integration_domain[dim][1]
37 | ), "Points are outside of the integration domain"
38 |
39 |
40 | def _run_integration_grid_tests(backend, dtype_name):
41 | """
42 | Test IntegrationGrid in integration.integration_grid for illegal values with various input arguments
43 | """
44 | if backend == "torch":
45 | import torch
46 |
47 | torch.set_printoptions(10)
48 |
49 | # Generate a grid in different dimensions with different N on different domains
50 | eps = 2e-8 # error bound
51 | dtype = to_backend_dtype(dtype_name, like=backend)
52 |
53 | # Test 1: N is float, 1-D
54 | # Test 2: N is int, 3-D
55 | # Test 3: N is float, 3-D
56 | Ns = [10.0, 4**3, 4.0**3]
57 | domains = [
58 | [[0.0, 1.0]],
59 | [[0.0, 2.0], [-2.0, 1.0], [0.5, 1.0]],
60 | [[0.0, 2.0], [-2.0, 1.0], [0.5, 1.0]],
61 | ]
62 | for N, dom in zip(Ns, domains):
63 | integration_domain = anp.array(dom, dtype=dtype, like=backend)
64 | grid = IntegrationGrid(N, integration_domain)
65 | _check_grid_validity(grid, integration_domain, N, eps)
66 |
67 |
68 | test_integration_grid_numpy = setup_test_for_backend(
69 | _run_integration_grid_tests, "numpy", "float64"
70 | )
71 | test_integration_grid_torch = setup_test_for_backend(
72 | _run_integration_grid_tests, "torch", "float64"
73 | )
74 | test_integration_grid_jax = setup_test_for_backend(
75 | _run_integration_grid_tests, "jax", "float64"
76 | )
77 | test_integration_grid_tensorflow = setup_test_for_backend(
78 | _run_integration_grid_tests, "tensorflow", "float64"
79 | )
80 |
81 |
82 | if __name__ == "__main__":
83 | test_integration_grid_numpy()
84 | test_integration_grid_torch()
85 | test_integration_grid_jax()
86 | test_integration_grid_tensorflow()
87 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/integration_test_functions.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import infer_backend
7 | from numpy import inf
8 | from loguru import logger
9 |
10 | from integration.utils import _setup_integration_domain
11 |
12 |
13 | class IntegrationTestFunction:
14 | """Wrapper class for test functions."""
15 |
16 | domain = None # Domain that is integrated over
17 | dim = None # Expected input dimension of the function
18 | expected_result = None # What the true integral solution is
19 | # Order of the function if applicable, can be used to infer expected convergence order
20 | order = None
21 | f = None # Function to evaluate
22 | is_complex = False # If the test function contains complex numbers
23 |
24 | def __init__(
25 | self, expected_result, dim=1, domain=None, is_complex=False, backend="torch"
26 | ):
27 | """Initializes domain and stores variables.
28 |
29 | Args:
30 | expected_result (float): Expected integration result.
31 | dim (int, optional): Dimensionality of investigated function. Defaults to 1.
32 | domain (list, optional): Integration domain, e.g. [[0,1],[1,2]]. Defaults to None.
33 | is_complex (Boolean): If the test function contains complex numbers. Defaults to False.
34 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from domain. Defaults to "torch".
35 | """
36 | self.dim = dim
37 | self.expected_result = expected_result
38 |
39 | self.is_complex = is_complex
40 | self.domain = _setup_integration_domain(dim, domain, backend)
41 | logger.debug("Initialized Test function with ")
42 | logger.debug(
43 | "dim="
44 | + str(self.dim)
45 | + "| domain="
46 | + str(self.domain)
47 | + "| expected_result="
48 | + str(expected_result)
49 | )
50 |
51 | def evaluate(self, integrator, integration_args):
52 | """Evaluates the passed integration functions with arguments.
53 |
54 | Args:
55 | integrator (func): Integration function to call.
56 | integration_args (list): Arguments to pass to integrator.
57 |
58 | Returns:
59 | float: Integration result
60 | """
61 |
62 | def integrand(x):
63 | assert infer_backend(self.domain) == infer_backend(x), (
64 | "Integration domain and points have a different backend:"
65 | f" {infer_backend(self.domain)} and {infer_backend(x)}"
66 | )
67 | assert self.domain.dtype == x.dtype, (
68 | "Integration domain and points have a different dtype:"
69 | f" {self.domain.dtype} and {x.dtype}"
70 | )
71 | return self.f(x)
72 |
73 | return integrator(
74 | fn=integrand, integration_domain=self.domain, **integration_args
75 | )
76 |
77 | def get_order(self):
78 | """Get the order (polynomial degree) of the function
79 |
80 | Returns:
81 | float: Order of the function or infinity if it is not a finite polynomial
82 | """
83 | return inf if self.order is None else self.order
84 |
85 |
86 | class Polynomial(IntegrationTestFunction):
87 | def __init__(
88 | self,
89 | expected_result=None,
90 | coeffs=[2],
91 | dim=1,
92 | domain=None,
93 | is_complex=False,
94 | backend="torch",
95 | ):
96 | """Creates an n-dimensional, degree-K poylnomial test function.
97 |
98 | Args:
99 | expected_result (backend tensor): Expected result. Required to compute errors.
100 | coeffs (list, optional): Polynomial coefficients. Are the same for each dim. Defaults to [2].
101 | dim (int, optional): Polynomial dimensionality. Defaults to 1.
102 | domain (list, optional): Integration domain. Defaults to [-1.0, 1.0]^dim.
103 | is_complex (Boolean): If the test function contains complex numbers. Defaults to False.
104 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from domain. Defaults to "torch".
105 | """
106 | super().__init__(expected_result, dim, domain, is_complex, backend)
107 | if backend == "tensorflow":
108 | # Ensure than all coefficients are either Python3 float or Python3
109 | # complex since tensorflow requires this.
110 | if is_complex:
111 | coeffs = list(map(complex, coeffs))
112 | else:
113 | coeffs = list(map(float, coeffs))
114 | if not is_complex:
115 | self.coeffs = anp.array(coeffs, like=self.domain, dtype=self.domain.dtype)
116 | else:
117 | self.coeffs = anp.array(coeffs, like=self.domain)
118 | self.order = len(coeffs) - 1 # polynomial order is defined by the coeffs
119 | self.f = self._poly
120 |
121 | def _poly(self, x):
122 | # Compute all relevant x^k
123 | # The shape of exponentials is (dim, N, order+1)
124 | if infer_backend(x) != "tensorflow":
125 | exponentials = x.reshape(x.shape + (1,)) ** anp.linspace(
126 | 0, self.order, self.order + 1, like=x, dtype=x.dtype
127 | )
128 | assert exponentials.dtype == x.dtype
129 | else:
130 | # Tensorflow's exponentiation gives float64 values if x are float32
131 | # and the exponent are integer
132 | ks = anp.array(range(self.order + 1), dtype=x.dtype, like=x)
133 | exponentials = x.reshape(x.shape + (1,)) ** ks
134 | assert exponentials.dtype == x.dtype
135 | if exponentials.dtype != self.coeffs.dtype:
136 | # Tensorflow does not automatically cast float32 to complex128,
137 | # so we do it here explicitly.
138 | assert self.is_complex
139 | exponentials = anp.cast(exponentials, self.coeffs.dtype)
140 |
141 | # multiply by coefficients
142 | exponentials = anp.multiply(exponentials, self.coeffs)
143 |
144 | # Collapse dimensions
145 | exponentials = anp.sum(exponentials, axis=2)
146 |
147 | # sum all values for each dim
148 | return anp.sum(exponentials, axis=1)
149 |
150 |
151 | class Exponential(IntegrationTestFunction):
152 | def __init__(
153 | self,
154 | expected_result=None,
155 | dim=1,
156 | domain=None,
157 | is_complex=False,
158 | backend="torch",
159 | ):
160 | """Creates an n-dimensional exponential test function.
161 |
162 | Args:
163 | expected_result (backend tensor): Expected result. Required to compute errors.
164 | dim (int, optional): Input dimension. Defaults to 1.
165 | domain (list, optional): Integration domain. Defaults to [-1.0, 1.0]^dim.
166 | is_complex (Boolean): If the test function contains complex numbers. Defaults to False.
167 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from domain. Defaults to "torch".
168 | """
169 | super().__init__(expected_result, dim, domain, is_complex, backend)
170 | self.f = self._exp
171 |
172 | def _exp(self, x):
173 | # compute e^x
174 | return anp.sum(anp.exp(x), axis=1)
175 |
176 |
177 | class Sinusoid(IntegrationTestFunction):
178 | def __init__(
179 | self,
180 | expected_result=None,
181 | dim=1,
182 | domain=None,
183 | is_complex=False,
184 | backend="torch",
185 | ):
186 | """Creates an n-dimensional sinusoidal test function.
187 |
188 | Args:
189 | expected_result (backend tensor): Expected result. Required to compute errors.
190 | dim (int, optional): Input dimension. Defaults to 1.
191 | domain (list, optional): Integration domain. Defaults to [-1.0, 1.0]^dim.
192 | is_complex (Boolean): If the test function contains complex numbers. Defaults to False.
193 | backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from domain. Defaults to "torch".
194 | """
195 | super().__init__(expected_result, dim, domain, is_complex, backend)
196 | self.f = self._sinusoid
197 |
198 | def _sinusoid(self, x):
199 | return anp.sum(anp.sin(x), axis=1)
200 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/integrator_types_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Additional integration tests to check if dtypes, shapes and similar
4 | backend-specific properties
5 | """
6 | import sys
7 |
8 | sys.path.append("../")
9 |
10 | from autoray import numpy as anp
11 | from autoray import infer_backend, get_dtype_name, to_backend_dtype
12 | from itertools import product
13 |
14 | from integration.trapezoid import Trapezoid
15 | from integration.simpson import Simpson
16 | from integration.boole import Boole
17 | from integration.monte_carlo import MonteCarlo
18 | from integration.vegas import VEGAS
19 | from utils.set_precision import set_precision
20 | from helper_functions import setup_test_for_backend
21 |
22 |
23 | def _run_simple_integrations(backend):
24 | """
25 | Integrate a simple 2D constant function to check the following:
26 | * The integrators do not crash with the numerical backend
27 | * The evaluation points have the correct backend, dtype and shape
28 | * The integration_domain argument dtype takes precedence over a globally
29 | configured dtype
30 | * The globally configured dtype or the backend's default dtype is used if
31 | the integration_domain argument is a list
32 | * MonteCarlo and the Newton Cotes composite integrators integrate a
33 | constant function (almost) exactly.
34 | """
35 | integrators_all = [Trapezoid(), Simpson(), Boole(), MonteCarlo(), VEGAS()]
36 | Ns_all = [13**2, 13**2, 13**2, 20, 1000]
37 |
38 | expected_dtype_name = None
39 |
40 | # Test only integrand output dtypes which are the same as the input dtype
41 | def fn_const(x):
42 | assert infer_backend(x) == backend
43 | assert get_dtype_name(x) == expected_dtype_name
44 | assert len(x.shape) == 2 and x.shape[1] == 2
45 | return 0.0 * x[:, 0] - 2.0
46 |
47 | for dtype_global, dtype_arg, (integrator, N) in product(
48 | ["float32", "float64"],
49 | [None, "float32", "float64"],
50 | zip(integrators_all, Ns_all),
51 | ):
52 | # JAX ignores the dtype argument when an array is created and always
53 | # uses the global precision.
54 | if (backend, dtype_global, dtype_arg) in [
55 | ("jax", "float32", "float64"),
56 | ("jax", "float64", "float32"),
57 | ]:
58 | continue
59 | integrator_name = type(integrator).__name__
60 | # VEGAS supports only numpy and torch
61 | if integrator_name == "VEGAS" and backend in ["jax", "tensorflow"]:
62 | continue
63 |
64 | # Set the global precision
65 | set_precision(dtype_global, backend=backend)
66 |
67 | integration_domain = [[0.0, 1.0], [-2.0, 0.0]]
68 | if dtype_arg is not None:
69 | # Set the integration_domain dtype which should have higher priority
70 | # than the global dtype
71 | integration_domain = anp.array(
72 | integration_domain,
73 | dtype=to_backend_dtype(dtype_arg, like=backend),
74 | like=backend,
75 | )
76 | assert infer_backend(integration_domain) == backend
77 | assert get_dtype_name(integration_domain) == dtype_arg
78 | expected_dtype_name = dtype_arg
79 | else:
80 | expected_dtype_name = dtype_global
81 |
82 | print(
83 | f"[2mTesting {integrator_name} with {backend}, argument dtype"
84 | f" {dtype_arg}, global/default dtype {dtype_global}[m"
85 | )
86 | if integrator_name in ["MonteCarlo", "VEGAS"]:
87 | extra_kwargs = {"seed": 0}
88 | else:
89 | extra_kwargs = {}
90 | result = integrator.integrate(
91 | fn=fn_const,
92 | dim=2,
93 | N=N,
94 | integration_domain=integration_domain,
95 | backend=backend,
96 | **extra_kwargs,
97 | )
98 | assert infer_backend(result) == backend
99 | assert get_dtype_name(result) == expected_dtype_name
100 | # VEGAS seems to be bad at integrating constant functions currently
101 | max_error = 0.03 if integrator_name == "VEGAS" else 1e-5
102 | assert anp.abs(result - (-4.0)) < max_error
103 |
104 |
105 | test_integrate_numpy = setup_test_for_backend(_run_simple_integrations, "numpy", None)
106 | test_integrate_torch = setup_test_for_backend(_run_simple_integrations, "torch", None)
107 | test_integrate_jax = setup_test_for_backend(_run_simple_integrations, "jax", None)
108 | test_integrate_tensorflow = setup_test_for_backend(
109 | _run_simple_integrations, "tensorflow", None
110 | )
111 |
112 |
113 | if __name__ == "__main__":
114 | try:
115 | test_integrate_numpy()
116 | test_integrate_torch()
117 | test_integrate_jax()
118 | test_integrate_tensorflow()
119 | except KeyboardInterrupt:
120 | pass
121 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/monte_carlo_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from integration.monte_carlo import MonteCarlo
6 | from helper_functions import (
7 | compute_integration_test_errors,
8 | setup_test_for_backend,
9 | )
10 |
11 |
12 | def _run_monte_carlo_tests(backend, _precision):
13 | """Test the integrate function in integration.MonteCarlo for the given backend."""
14 |
15 | mc = MonteCarlo()
16 |
17 | # 1D Tests
18 | N = 100000 # integration points to use
19 |
20 | errors, funcs = compute_integration_test_errors(
21 | mc.integrate,
22 | {"N": N, "dim": 1, "seed": 0},
23 | dim=1,
24 | use_complex=True,
25 | backend=backend,
26 | )
27 | print(
28 | f"1D Monte Carlo Test passed. N: {N}, backend: {backend}, Errors: {str(errors)}"
29 | )
30 | # Constant functions can be integrated exactly with MonteCarlo.
31 | # (at least our example functions)
32 | for err, test_function in zip(errors, funcs):
33 | assert test_function.get_order() > 0 or err == 0.0
34 |
35 | # If this breaks check if test functions in helper_functions changed.
36 | for error in errors[:3]:
37 | assert error < 7e-3
38 |
39 | assert errors[3] < 0.5
40 | assert errors[4] < 32.0
41 |
42 | for error in errors[6:10]:
43 | assert error < 1e-2
44 |
45 | for error in errors[10:]:
46 | assert error < 28.03
47 |
48 | # 3D Tests
49 | N = 1000000
50 | errors, funcs = compute_integration_test_errors(
51 | mc.integrate,
52 | {"N": N, "dim": 3, "seed": 0},
53 | dim=3,
54 | use_complex=True,
55 | backend=backend,
56 | )
57 | print(
58 | f"3D Monte Carlo Test passed. N: {N}, backend: {backend}, Errors: {str(errors)}"
59 | )
60 | for err, test_function in zip(errors, funcs):
61 | assert test_function.get_order() > 0 or err == 0.0
62 | for error in errors:
63 | assert error < 1e-1
64 |
65 | # 10D Tests
66 | N = 10000
67 | errors, funcs = compute_integration_test_errors(
68 | mc.integrate,
69 | {"N": N, "dim": 10, "seed": 0},
70 | dim=10,
71 | use_complex=True,
72 | backend=backend,
73 | )
74 | print(
75 | f"10D Monte Carlo Test passed. N: {N}, backend: {backend}, Errors:"
76 | f" {str(errors)}"
77 | )
78 | for err, test_function in zip(errors, funcs):
79 | assert test_function.get_order() > 0 or err == 0.0
80 | for error in errors:
81 | assert error < 26
82 |
83 |
84 | test_integrate_numpy = setup_test_for_backend(
85 | _run_monte_carlo_tests, "numpy", "float32"
86 | )
87 | test_integrate_torch = setup_test_for_backend(
88 | _run_monte_carlo_tests, "torch", "float32"
89 | )
90 | test_integrate_jax = setup_test_for_backend(_run_monte_carlo_tests, "jax", "float32")
91 | test_integrate_tensorflow = setup_test_for_backend(
92 | _run_monte_carlo_tests, "tensorflow", "float32"
93 | )
94 |
95 |
96 | if __name__ == "__main__":
97 | # used to run this test individually
98 | test_integrate_numpy()
99 | test_integrate_torch()
100 | test_integrate_jax()
101 | test_integrate_tensorflow()
102 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/rng_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import infer_backend, get_dtype_name, to_backend_dtype, to_numpy
7 |
8 | from integration.rng import RNG
9 |
10 | from helper_functions import setup_test_for_backend
11 |
12 |
13 | def _run_RNG_tests(backend, dtype_name):
14 | """
15 | Test the random number generator with the given numerical backend
16 | * With the same seed, the same numbers should be generated
17 | * With different seeds, different numbers should be generated
18 | * If seed is None / omitted, the RNG should be randomly seeded
19 | """
20 | backend_dtype = to_backend_dtype(dtype_name, like=backend)
21 | size = [3, 9]
22 | generateds = [
23 | RNG(backend, 547).uniform(size=size, dtype=backend_dtype),
24 | RNG(backend, None).uniform(size=size, dtype=backend_dtype),
25 | RNG(backend, 547).uniform(size=size, dtype=backend_dtype),
26 | RNG(backend).uniform(size=size, dtype=backend_dtype),
27 | RNG(backend, 42).uniform(size=size, dtype=backend_dtype),
28 | ]
29 | numpy_arrs = list(map(to_numpy, generateds))
30 |
31 | # Validity of the backend, dtype, shape and values range
32 | assert all(infer_backend(arr) == backend for arr in generateds)
33 | assert all(get_dtype_name(arr) == dtype_name for arr in generateds)
34 | assert all(arr.shape == (3, 9) for arr in generateds)
35 | assert all(0.0 <= x <= 1.0 for arr in numpy_arrs for x in arr.ravel())
36 |
37 | # Test if the seed argument leads to consistent results and
38 | # if omitting a seed leads to random numbers
39 | assert anp.array_equal(numpy_arrs[0], numpy_arrs[2])
40 | for i1 in range(len(generateds)):
41 | for i2 in range(i1 + 1, len(generateds)):
42 | if i1 == 0 and i2 == 2:
43 | continue
44 | # With a very low probability this may fail
45 | assert not anp.array_equal(numpy_arrs[i1], numpy_arrs[i2])
46 |
47 |
48 | test_rng_jax_f32 = setup_test_for_backend(_run_RNG_tests, "jax", "float32")
49 | test_rng_jax_f64 = setup_test_for_backend(_run_RNG_tests, "jax", "float64")
50 | test_rng_numpy_f32 = setup_test_for_backend(_run_RNG_tests, "numpy", "float32")
51 | test_rng_numpy_f64 = setup_test_for_backend(_run_RNG_tests, "numpy", "float64")
52 | test_rng_torch_f32 = setup_test_for_backend(_run_RNG_tests, "torch", "float32")
53 | test_rng_torch_f64 = setup_test_for_backend(_run_RNG_tests, "torch", "float64")
54 | test_rng_tensorflow_f32 = setup_test_for_backend(
55 | _run_RNG_tests, "tensorflow", "float32"
56 | )
57 | test_rng_tensorflow_f64 = setup_test_for_backend(
58 | _run_RNG_tests, "tensorflow", "float64"
59 | )
60 |
61 |
62 | if __name__ == "__main__":
63 | # used to run this test individually
64 | test_rng_numpy_f32()
65 | test_rng_numpy_f64()
66 | test_rng_torch_f32()
67 | test_rng_torch_f64()
68 | test_rng_jax_f32()
69 | test_rng_jax_f64()
70 | test_rng_tensorflow_f32()
71 | test_rng_tensorflow_f64()
72 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/simpson_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | import warnings
6 |
7 | from integration.simpson import Simpson
8 | from helper_functions import (
9 | compute_integration_test_errors,
10 | setup_test_for_backend,
11 | )
12 |
13 |
14 | def _run_simpson_tests(backend, _precision):
15 | """Test the integrate function in integration.Simpson for the given backend."""
16 |
17 | simp = Simpson()
18 |
19 | # 1D Tests
20 | N = 100001
21 |
22 | errors, funcs = compute_integration_test_errors(
23 | simp.integrate, {"N": N, "dim": 1}, dim=1, use_complex=True, backend=backend
24 | )
25 | print(f"1D Simpson Test passed. N: {N}, backend: {backend}, Errors: {errors}")
26 | # Polynomials up to degree 3 can be integrated almost exactly with Simpson.
27 | for err, test_function in zip(errors, funcs):
28 | assert test_function.get_order() > 3 or err < 3e-11
29 | for error in errors:
30 | assert error < 1e-7
31 |
32 | N = 3 # integration points, here 3 for order check (3 points should lead to almost 0 err for low order polynomials)
33 | errors, funcs = compute_integration_test_errors(
34 | simp.integrate, {"N": N, "dim": 1}, dim=1, use_complex=True, backend=backend
35 | )
36 | print(f"1D Simpson Test passed. N: {N}, backend: {backend}, Errors: {errors}")
37 | # All polynomials up to degree = 3 should be 0
38 | # If this breaks, check if test functions in helper_functions changed.
39 | for err, test_function in zip(errors, funcs):
40 | assert test_function.get_order() > 3 or err < 1e-15
41 |
42 | # 3D Tests
43 | N = 1076890 # N = 102.5 per dim (will change to 101 if all works)
44 | with warnings.catch_warnings():
45 | warnings.simplefilter("ignore")
46 | errors, funcs = compute_integration_test_errors(
47 | simp.integrate, {"N": N, "dim": 3}, dim=3, use_complex=True, backend=backend
48 | )
49 | print(f"3D Simpson Test passed. N: {N}, backend: {backend}, Errors: {errors}")
50 | for err, test_function in zip(errors, funcs):
51 | assert test_function.get_order() > 3 or err < 1e-12
52 | for error in errors:
53 | assert error < 5e-6
54 |
55 | # Tensorflow crashes with an Op:StridedSlice UnimplementedError with 10
56 | # dimensions
57 | if backend == "tensorflow":
58 | print("Skipping tensorflow 10D tests")
59 | return
60 |
61 | # 10D Tests
62 | N = 3**10
63 | errors, funcs = compute_integration_test_errors(
64 | simp.integrate, {"N": N, "dim": 10}, dim=10, use_complex=True, backend=backend
65 | )
66 | print(f"10D Simpson Test passed. N: {N}, backend: {backend}, Errors: {errors}")
67 | for error in errors:
68 | assert error < 5e-9
69 |
70 |
71 | test_integrate_numpy = setup_test_for_backend(_run_simpson_tests, "numpy", "float64")
72 | test_integrate_torch = setup_test_for_backend(_run_simpson_tests, "torch", "float64")
73 | test_integrate_jax = setup_test_for_backend(_run_simpson_tests, "jax", "float64")
74 | test_integrate_tensorflow = setup_test_for_backend(
75 | _run_simpson_tests, "tensorflow", "float64"
76 | )
77 |
78 |
79 | if __name__ == "__main__":
80 | # used to run this test individually
81 | test_integrate_numpy()
82 | test_integrate_torch()
83 | test_integrate_jax()
84 | test_integrate_tensorflow()
85 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/trapezoid_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from integration.trapezoid import Trapezoid
6 | from helper_functions import (
7 | compute_integration_test_errors,
8 | setup_test_for_backend,
9 | )
10 |
11 |
12 | def _run_trapezoid_tests(backend, _precision):
13 | """Test the integrate function in integration.Trapezoid for the given backend."""
14 |
15 | tp = Trapezoid()
16 |
17 | # 1D Tests
18 | N = 100000
19 | errors, funcs = compute_integration_test_errors(
20 | tp.integrate, {"N": N, "dim": 1}, dim=1, use_complex=True, backend=backend
21 | )
22 | print(f"1D Trapezoid Test passed. N: {N}, backend: {backend}, Errors: {errors}")
23 | # Polynomials up to degree 1 can be integrated almost exactly with Trapezoid.
24 | for err, test_function in zip(errors, funcs):
25 | assert test_function.get_order() > 1 or err < 2e-11
26 | for error in errors:
27 | assert error < 1e-5
28 |
29 | N = 2 # integration points, here 2 for order check (2 points should lead to almost 0 err for low order polynomials)
30 | errors, funcs = compute_integration_test_errors(
31 | tp.integrate, {"N": N, "dim": 1}, dim=1, use_complex=True, backend=backend
32 | )
33 | print(f"1D Trapezoid Test passed. N: {N}, backend: {backend}, Errors: {errors}")
34 | # All polynomials up to degree = 1 should be 0
35 | # If this breaks check if test functions in helper_functions changed.
36 | for err, test_function in zip(errors, funcs):
37 | assert test_function.get_order() > 1 or err < 1e-15
38 | for error in errors[:2]:
39 | assert error < 1e-15
40 |
41 | # 3D Tests
42 | N = 1000000
43 | errors, funcs = compute_integration_test_errors(
44 | tp.integrate, {"N": N, "dim": 3}, dim=3, use_complex=True, backend=backend
45 | )
46 | print(f"3D Trapezoid Test passed. N: {N}, backend: {backend}, Errors: {errors}")
47 | for err, test_function in zip(errors, funcs):
48 | assert test_function.get_order() > 1 or err < 1e-12
49 | for error in errors:
50 | assert error < 6e-3
51 |
52 | # Tensorflow crashes with an Op:StridedSlice UnimplementedError with 10
53 | # dimensions
54 | if backend == "tensorflow":
55 | print("Skipping tensorflow 10D tests")
56 | return
57 |
58 | # 10D Tests
59 | N = 10000
60 | errors, funcs = compute_integration_test_errors(
61 | tp.integrate, {"N": N, "dim": 10}, dim=10, use_complex=True, backend=backend
62 | )
63 | print(f"10D Trapezoid Test passed. N: {N}, backend: {backend}, Errors: {errors}")
64 | for err, test_function in zip(errors, funcs):
65 | assert test_function.get_order() > 1 or err < 1e-11
66 | for error in errors:
67 | assert error < 7000
68 |
69 |
70 | test_integrate_numpy = setup_test_for_backend(_run_trapezoid_tests, "numpy", "float64")
71 | test_integrate_torch = setup_test_for_backend(_run_trapezoid_tests, "torch", "float64")
72 | test_integrate_jax = setup_test_for_backend(_run_trapezoid_tests, "jax", "float64")
73 | test_integrate_tensorflow = setup_test_for_backend(
74 | _run_trapezoid_tests, "tensorflow", "float64"
75 | )
76 |
77 |
78 | if __name__ == "__main__":
79 | # used to run this test individually
80 | test_integrate_numpy()
81 | test_integrate_torch()
82 | test_integrate_jax()
83 | test_integrate_tensorflow()
84 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/utils_integration_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import infer_backend, get_dtype_name, to_backend_dtype
7 | import importlib
8 | import pytest
9 | import warnings
10 |
11 | from integration.utils import (
12 | _linspace_with_grads,
13 | _add_at_indices,
14 | _setup_integration_domain,
15 | )
16 | from utils.set_precision import set_precision
17 | from utils.enable_cuda import enable_cuda
18 |
19 |
20 | def _run_tests_with_all_backends(func, func_extra_args=[{}]):
21 | """Run a test function with all backends and supported precisions
22 |
23 | Args:
24 | func (function(dtype_name, backend, ...)): Function which runs tests
25 | func_extra_args (list of dicts, optional): List of extra arguments which are passed to func. Defaults to one execution with no extra arguments.
26 | """
27 | # If JAX is tested before Tensorflow here, an out of memory error can
28 | # happen because both allocate all memory on the GPU by default.
29 | # The XLA_PYTHON_CLIENT_PREALLOCATE=false environment variable
30 | # avoids the crash of JAX.
31 | # For some reason it also does not crash if Tensorflow is tested
32 | # before JAX, which is done here.
33 | # Calling block_until_ready on all arrays created with JAX instead
34 | # of changing the tests order did not avoid the crash for some
35 | # reason.
36 | for backend in ["numpy", "torch", "tensorflow", "jax"]:
37 | if importlib.util.find_spec(backend) is None:
38 | warnings.warn(f"Backend is not installed: {backend}")
39 | continue
40 | if backend == "torch":
41 | enable_cuda()
42 | for dtype_name in ["float32", "float64"]:
43 | set_precision(dtype_name, backend=backend)
44 | # Iterate over arguments in an inner loop here instead of an outer
45 | # loop so that there are less switches between backends
46 | for kwargs in func_extra_args:
47 | func(dtype_name=dtype_name, backend=backend, **kwargs)
48 |
49 |
50 | def _run_linspace_with_grads_tests(dtype_name, backend, requires_grad):
51 | """
52 | Test _linspace_with_grads with the given dtype, numerical backend and
53 | requires_grad argument
54 | """
55 | if requires_grad and backend != "torch":
56 | # Currently only torch needs the requires_grad case distinction
57 | return
58 | print(
59 | f"Testing _linspace_with_grads; backend: {backend}, requires_grad:"
60 | f" {requires_grad}, precision: {dtype_name}"
61 | )
62 | dtype_backend = to_backend_dtype(dtype_name, like=backend)
63 | start = anp.array(-2.0, like=backend, dtype=dtype_backend)
64 | stop = anp.array(-1.0, like=backend, dtype=dtype_backend)
65 | assert (
66 | get_dtype_name(start) == dtype_name
67 | ), "Unexpected dtype for the configured precision"
68 | grid1d = _linspace_with_grads(start, stop, 10, requires_grad)
69 | # Test if the backend, dtype and shape match
70 | assert infer_backend(grid1d) == backend
71 | assert grid1d.dtype == start.dtype
72 | assert grid1d.shape == (10,)
73 | # The array has to begin at start and end at stop, the elements should
74 | # be inside [start, stop] and they should be monotonically increasing
75 | assert grid1d[0] == start and grid1d[9] == stop
76 | assert all(start <= grid1d[i] <= stop for i in range(10))
77 | assert all(grid1d[i] < grid1d[i + 1] for i in range(9))
78 |
79 |
80 | def test_linspace_with_grads():
81 | """Test _linspace_with_grads with all possible configurations"""
82 | _run_tests_with_all_backends(
83 | _run_linspace_with_grads_tests,
84 | [{"requires_grad": True}, {"requires_grad": False}],
85 | )
86 |
87 |
88 | def _run_add_at_indices_tests(dtype_name, backend):
89 | """
90 | Test _add_at_indices with the given dtype and numerical backend
91 | """
92 | # JAX and Tensorflow are not yet supported
93 | if backend in ["jax", "tensorflow"]:
94 | return
95 | dtype_backend = to_backend_dtype(dtype_name, like=backend)
96 |
97 | print("Testing _add_at_indices for a simple identity case")
98 | indices = anp.array(list(range(500)), like=backend)
99 | target = anp.array([0.0] * 500, dtype=dtype_backend, like=backend)
100 | source = anp.array([1.0] * 500, dtype=dtype_backend, like=backend)
101 | _add_at_indices(target, indices, source, is_sorted=True)
102 | assert target.dtype == dtype_backend
103 | assert target.shape == (500,)
104 | assert anp.max(anp.abs(target - source)) == 0.0
105 |
106 | print("Testing _add_at_indices when all indices refer to the same target index")
107 | target = target * 0.0
108 | indices = indices * 0 + 203
109 | _add_at_indices(target, indices, source, is_sorted=True)
110 | assert target[203] == 500.0
111 | target[203] = 0.0
112 | assert anp.max(anp.abs(target)) == 0.0
113 |
114 | print("Testing _add_at_indices with unsorted indices and integer dtype")
115 | target = anp.array([0, 0, 0], like=backend)
116 | indices = anp.array([2, 1, 1, 2], like=backend)
117 | source = anp.array([1, 10, 100, 1000], like=backend)
118 | _add_at_indices(target, indices, source)
119 | assert target.dtype == indices.dtype
120 | assert anp.max(anp.abs(target - anp.array([0, 110, 1001], like=backend))) == 0
121 |
122 |
123 | def test_add_at_indices():
124 | """Test _add_at_indices with all possible configurations"""
125 | _run_tests_with_all_backends(_run_add_at_indices_tests)
126 |
127 |
128 | def _run_setup_integration_domain_tests(dtype_name, backend):
129 | """
130 | Test _setup_integration_domain with the given dtype and numerical backend
131 | """
132 | # Domain given as List with Python floats
133 | domain = _setup_integration_domain(2, [[0.0, 1.0], [1.0, 2.0]], backend)
134 | assert infer_backend(domain) == backend
135 | assert get_dtype_name(domain) == dtype_name
136 |
137 | # Domain given as List with Python integers
138 | domain = _setup_integration_domain(2, [[0, 1], [1, 2]], backend)
139 | assert infer_backend(domain) == backend
140 | assert get_dtype_name(domain) == dtype_name
141 |
142 | # Domain given as List with mixed precision Python values
143 | domain = _setup_integration_domain(2, [[0, 1.0], [1, 2.0]], backend)
144 | assert infer_backend(domain) == backend
145 | assert get_dtype_name(domain) == dtype_name
146 |
147 | # Default [-1,1]^4 domain
148 | domain = _setup_integration_domain(4, None, backend)
149 | assert infer_backend(domain) == backend
150 | assert get_dtype_name(domain) == dtype_name
151 | assert domain.shape == (4, 2)
152 |
153 | # User-specified domain
154 | dtype_backend = to_backend_dtype(dtype_name, like=backend)
155 | custom_domain = anp.array(
156 | [[0.0, 1.0], [1.0, 2.0]], like=backend, dtype=dtype_backend
157 | )
158 | domain = _setup_integration_domain(2, custom_domain, "unused")
159 | assert domain.shape == custom_domain.shape
160 | assert domain.dtype == custom_domain.dtype
161 |
162 | # Tests for invalid arguments
163 | with pytest.raises(ValueError, match=r".*domain.*"):
164 | _setup_integration_domain(3, [[0, 1.0], [1, 2.0]], backend)
165 | with pytest.raises(ValueError, match=r".*domain.*"):
166 | _setup_integration_domain(3, custom_domain, "unused")
167 |
168 |
169 | def test_setup_integration_domain():
170 | """Test _setup_integration_domain with all possible configurations"""
171 | _run_tests_with_all_backends(_run_setup_integration_domain_tests)
172 |
173 |
174 | if __name__ == "__main__":
175 | try:
176 | # used to run this test individually
177 | test_linspace_with_grads()
178 | test_add_at_indices()
179 | test_setup_integration_domain()
180 | except KeyboardInterrupt:
181 | pass
182 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/vegas_map_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype
7 |
8 | from integration.vegas_map import VEGASMap
9 |
10 | from helper_functions import setup_test_for_backend
11 |
12 |
13 | def _check_tensor_similarity(a, b, err_abs_max=0.0, expected_dtype=None):
14 | """Check if two tensors have the same dtype, shape and are equal up to a specified error"""
15 | if expected_dtype:
16 | assert a.dtype == b.dtype == expected_dtype
17 | else:
18 | assert a.dtype == b.dtype
19 | assert a.shape == b.shape
20 | assert anp.max(anp.abs(a - b)) <= err_abs_max
21 |
22 |
23 | def _run_vegas_map_checks(backend, dtype_name):
24 | """Test if the VEGASMap methods work correctly while running a map update for example integrand output"""
25 | print(f"Testing VEGASMap with {backend}, {dtype_name}")
26 | dtype_float = to_backend_dtype(dtype_name, like=backend)
27 | dtype_int = to_backend_dtype("int64", like=backend)
28 | dim = 3
29 | N_intervals = 20
30 | vegasmap = VEGASMap(N_intervals, dim, backend, dtype_float)
31 |
32 | y = anp.array(
33 | [[0.8121, 0.4319, 0.1612], [0.4746, 0.6501, 0.9241], [0.6143, 0.0724, 0.5818]],
34 | dtype=dtype_float,
35 | like=backend,
36 | )
37 |
38 | # Test _get_interval_ID and _get_interval_offset for the fresh VEGAS map
39 | ID_expected = anp.array(
40 | [[16, 8, 3], [9, 13, 18], [12, 1, 11]],
41 | dtype=dtype_int,
42 | like=backend,
43 | )
44 | off_expected = anp.array(
45 | [[0.2420, 0.6380, 0.2240], [0.4920, 0.0020, 0.4820], [0.2860, 0.4480, 0.6360]],
46 | dtype=dtype_float,
47 | like=backend,
48 | )
49 | ID = vegasmap._get_interval_ID(y)
50 | _check_tensor_similarity(ID, ID_expected, 0, dtype_int)
51 | off = vegasmap._get_interval_offset(y)
52 | _check_tensor_similarity(off, off_expected, 6e-5, dtype_float)
53 |
54 | # Test get_X for the fresh VEGAS map
55 | # Initially it should not change the points
56 | _check_tensor_similarity(vegasmap.get_X(y), y, 3e-7, dtype_float)
57 |
58 | # Get example point and function values
59 | N_per_dim = 100
60 | y = anp.linspace(0.0, 0.99999, N_per_dim, dtype=dtype_float, like=backend)
61 | y = anp.meshgrid(*([y] * dim))
62 | y = anp.stack([mg.ravel() for mg in y], axis=1, like=backend)
63 | # Use exp to get a peak in a corner
64 | f_eval = anp.prod(anp.exp(y), axis=1)
65 |
66 | # Test get_Jac for a fresh VEGAS map
67 | jac = vegasmap.get_Jac(y)
68 | assert jac.shape == (N_per_dim**dim,)
69 | assert jac.dtype == dtype_float
70 | assert anp.max(anp.abs(jac - 1.0)) < 1e-14
71 |
72 | # Test vegasmap.accumulate_weight for a fresh VEGAS map
73 | jf_vec = f_eval * jac
74 | jf_vec2 = jf_vec**2
75 | vegasmap.accumulate_weight(y, jf_vec2)
76 | assert vegasmap.weights.dtype == dtype_float
77 | assert vegasmap.weights.shape == (dim, N_intervals)
78 | # The weights should be monotonically increasing for the given f_evals and
79 | # vegasmap
80 | assert anp.min(vegasmap.weights[:, 1:] - vegasmap.weights[:, :-1]) > 0.0
81 | assert vegasmap.counts.dtype == dtype_int
82 | assert vegasmap.counts.shape == (dim, N_intervals)
83 | # The counts are all 50000 here since y are grid points and the VEGAS map
84 | # does not yet warp points
85 | assert anp.max(anp.abs(vegasmap.counts - 50000)) == 0
86 |
87 | # Test vegasmap._smooth_map
88 | weights = anp.array(
89 | [[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0]],
90 | dtype=dtype_float,
91 | like=backend,
92 | )
93 | counts = anp.ones(weights.shape, dtype=dtype_int, like=backend)
94 | alpha = 0.5
95 | smoothed_weights_expected = anp.array(
96 | [
97 | [0.0, 0.0, 0.54913316, 0.75820765, 0.77899047, 0.77899047],
98 | [-0.0, -0.0, 0.64868024, 0.93220967, 0.64868024, -0.0],
99 | ],
100 | dtype=dtype_float,
101 | like=backend,
102 | )
103 | smoothed_weights = VEGASMap._smooth_map(weights, counts, alpha)
104 | _check_tensor_similarity(
105 | smoothed_weights, smoothed_weights_expected, 3e-7, dtype_float
106 | )
107 |
108 | # Test if vegasmap.update_map changes the edge locations and distances
109 | # correctly
110 | vegasmap.update_map()
111 | # The outermost edge locations must match the domain [0,1]^dim
112 | unit_domain = anp.array([[0.0, 1.0]] * dim, dtype=dtype_float, like=backend)
113 | _check_tensor_similarity(
114 | vegasmap.x_edges[:, [0, -1]], unit_domain, 0.0, dtype_float
115 | )
116 | assert vegasmap.x_edges.shape == (dim, N_intervals + 1), "Invalid number of edges"
117 | assert vegasmap.dx_edges.shape == (
118 | dim,
119 | N_intervals,
120 | ), "Invalid number of edge distances"
121 | assert vegasmap.dx_edges.dtype == dtype_float
122 | assert (
123 | anp.max(anp.abs(anp.sum(vegasmap.dx_edges, axis=1) - 1.0)) < 3e-7
124 | ), "In each dimension the edge distances should sum up to one."
125 | assert anp.min(vegasmap.dx_edges) > 0.0, "Non-positive edge distance"
126 | # The absolute value of the given integrand is monotonically increasing in
127 | # each dimension, so calculated interval sizes should monotonically decrease
128 | assert (
129 | anp.max(vegasmap.dx_edges[:, 1:] - vegasmap.dx_edges[:, :-1]) < 0.0
130 | ), "Edge distances should shrink towards the peak"
131 |
132 | # Test if the new mapping of points works correctly
133 | x = vegasmap.get_X(y)
134 | assert x.dtype == dtype_float
135 | assert x.shape == y.shape
136 | assert anp.max(anp.abs(x[0])) == 0.0, "Boundary point was remapped"
137 |
138 |
139 | test_vegas_map_numpy_f32 = setup_test_for_backend(
140 | _run_vegas_map_checks, "numpy", "float32"
141 | )
142 | test_vegas_map_numpy_f64 = setup_test_for_backend(
143 | _run_vegas_map_checks, "numpy", "float64"
144 | )
145 | test_vegas_map_torch_f32 = setup_test_for_backend(
146 | _run_vegas_map_checks, "torch", "float32"
147 | )
148 | test_vegas_map_torch_f64 = setup_test_for_backend(
149 | _run_vegas_map_checks, "torch", "float64"
150 | )
151 |
152 |
153 | if __name__ == "__main__":
154 | # used to run this test individually
155 | test_vegas_map_numpy_f32()
156 | test_vegas_map_numpy_f64()
157 | test_vegas_map_torch_f32()
158 | test_vegas_map_torch_f64()
159 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/vegas_mul_map_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype
7 |
8 | from integration.vegas_mul_map import VEGASMultiMap
9 |
10 | from helper_functions import setup_test_for_backend
11 | import torch
12 |
13 | # n, N_intervals, dim, backend, dtype, alpha=0.5
14 |
15 | # VEGASMultiMap(n=200, N_intervals=1000, dim=6, backend="torch", dtype=torch.cuda.FloatTensor)
16 | vmap = VEGASMultiMap(n=200, N_intervals=1000, dim=6, backend="torch", dtype=torch.float)
17 |
18 |
19 | # rng_y = torch.rand((200, 6, 2000))
20 | rng_y = torch.rand((6, 200, 2000))
21 |
22 | # print(rng_y.min(), rng_y.max())
23 | rng_x = vmap.get_X(rng_y)
24 | # print(rng_x)
25 |
26 | jf2 = torch.rand_like(rng_y)
27 |
28 | vmap.accumulate_weight(rng_y, jf2)
29 |
30 |
31 | VEGASMultiMap._smooth_map(vmap.weights, vmap.counts, 0.5)
32 |
33 | vmap.update_map()
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/vegas_mul_stratification_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype
7 |
8 | from integration.vegas_mul_stratification import VEGASMultiStratification
9 |
10 | from helper_functions import setup_test_for_backend
11 | import torch
12 | from integration.rng import RNG
13 |
14 | n = 200
15 | dim = 6
16 | rng = RNG(backend="torch", seed=123)
17 |
18 | vstrat = VEGASMultiStratification(n=n, N_increment=20000, dim=dim, rng=rng, backend="torch", dtype=torch.float, beta=0.75)
19 |
20 | # Test if get_NH works correctly for a fresh VEGASStratification
21 | neval = vstrat.get_NH(40000)
22 | # assert neval.dtype == dtype_int
23 | assert neval.shape == (n, vstrat.N_cubes,)
24 | # assert (
25 | # anp.max(anp.abs(neval - neval[0])) == 0
26 | # ), "Varying number of evaluations per hypercube for a fresh VEGASStratification"
27 |
28 |
29 | print(vstrat.dh.shape)
30 | print("neval shape", neval.shape)
31 |
32 | # Test if sample point calculation works correctly for a
33 | # fresh VEGASStratification
34 | y = vstrat.get_Y(neval)
35 | # print("y shape", y.shape)
36 | # assert y.dtype == dtype_float
37 | assert y.shape == (n, anp.sum(neval[0,:]), dim)
38 | # assert anp.all(y >= 0.0) and anp.all(y <= 1.0), "Sample points are out of bounds"
39 |
40 |
41 | # Test accumulate_weight
42 | # Use exp to get a peak in a corner
43 | f_eval = anp.prod(anp.exp(y), axis=2)
44 | jf, jf2 = vstrat.accumulate_weight(neval, f_eval)
45 | # assert jf.dtype == jf2.dtype == dtype_float
46 | # assert jf.shape == jf2.shape == (vstrat.N_cubes,)
47 | assert anp.min(jf2) >= 0.0, "Sums of squared values should be non-negative"
48 | assert (
49 | anp.min(jf ** 2 - jf2) >= 0.0
50 | ), "Squared sums should be bigger than summed squares"
51 |
52 |
53 |
54 | # Test the dampened sample counts update
55 | vstrat.update_DH()
56 |
57 |
58 | # Test if get_NH still works correctly
59 | neval = vstrat.get_NH(40000)
60 | assert neval[0,-1] > neval[0,0], "The hypercube at the peak should have more points"
61 |
62 |
63 |
64 | # Test if sample point calculation still works correctly
65 | y = vstrat.get_Y(neval)
66 | assert anp.all(y >= 0.0) and anp.all(y <= 1.0), "Sample points are out of bounds"
67 |
68 |
69 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/vegas_stratification_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype
7 |
8 | from integration.rng import RNG
9 | from integration.vegas_stratification import VEGASStratification
10 |
11 | from helper_functions import setup_test_for_backend
12 |
13 |
14 | def _run_vegas_stratification_checks(backend, dtype_name):
15 | """Test if the VEGASStratification methods work correctly"""
16 | print(f"Testing VEGASStratification with {backend}, {dtype_name}")
17 | dtype_float = to_backend_dtype(dtype_name, like=backend)
18 | dtype_int = to_backend_dtype("int64", like=backend)
19 | dim = 3
20 | strat = VEGASStratification(
21 | 1000,
22 | dim=dim,
23 | rng=RNG(backend=backend, seed=0),
24 | backend=backend,
25 | dtype=dtype_float,
26 | )
27 |
28 | # Test if get_NH works correctly for a fresh VEGASStratification
29 | neval = strat.get_NH(4000)
30 | assert neval.dtype == dtype_int
31 | assert neval.shape == (strat.N_cubes,)
32 | assert (
33 | anp.max(anp.abs(neval - neval[0])) == 0
34 | ), "Varying number of evaluations per hypercube for a fresh VEGASStratification"
35 |
36 | # Test if sample point calculation works correctly for a
37 | # fresh VEGASStratification
38 | y = strat.get_Y(neval)
39 | assert y.dtype == dtype_float
40 | assert y.shape == (anp.sum(neval), dim)
41 | assert anp.all(y >= 0.0) and anp.all(y <= 1.0), "Sample points are out of bounds"
42 |
43 | # Test accumulate_weight
44 | # Use exp to get a peak in a corner
45 | f_eval = anp.prod(anp.exp(y), axis=1)
46 | jf, jf2 = strat.accumulate_weight(neval, f_eval)
47 | assert jf.dtype == jf2.dtype == dtype_float
48 | assert jf.shape == jf2.shape == (strat.N_cubes,)
49 | assert anp.min(jf2) >= 0.0, "Sums of squared values should be non-negative"
50 | assert (
51 | anp.min(jf**2 - jf2) >= 0.0
52 | ), "Squared sums should be bigger than summed squares"
53 |
54 | # Test the dampened sample counts update
55 | strat.update_DH()
56 | assert strat.dh.shape == (strat.N_cubes,)
57 | assert strat.dh.dtype == dtype_float
58 | assert anp.min(strat.dh) >= 0.0, "Invalid probabilities for hypercubes"
59 | assert anp.abs(strat.dh.sum() - 1.0) < 4e-7, "Invalid probabilities for hypercubes"
60 | assert (
61 | strat.dh[-1] > strat.dh[0]
62 | ), "The hypercube at the peak should have a higher probability to get points"
63 |
64 | # Test if get_NH still works correctly
65 | neval = strat.get_NH(4000)
66 | assert neval.dtype == dtype_int
67 | assert neval.shape == (strat.N_cubes,)
68 | assert neval[-1] > neval[0], "The hypercube at the peak should have more points"
69 |
70 | # Test if sample point calculation still works correctly
71 | y = strat.get_Y(neval)
72 | assert y.dtype == dtype_float
73 | assert y.shape == (anp.sum(neval), dim)
74 | assert anp.all(y >= 0.0) and anp.all(y <= 1.0), "Sample points are out of bounds"
75 |
76 |
77 | test_vegas_stratification_numpy_f32 = setup_test_for_backend(
78 | _run_vegas_stratification_checks, "numpy", "float32"
79 | )
80 | test_vegas_stratification_numpy_f64 = setup_test_for_backend(
81 | _run_vegas_stratification_checks, "numpy", "float64"
82 | )
83 | test_vegas_stratification_torch_f32 = setup_test_for_backend(
84 | _run_vegas_stratification_checks, "torch", "float32"
85 | )
86 | test_vegas_stratification_torch_f64 = setup_test_for_backend(
87 | _run_vegas_stratification_checks, "torch", "float64"
88 | )
89 |
90 |
91 | if __name__ == "__main__":
92 | # used to run this test individually
93 | test_vegas_stratification_numpy_f32()
94 | test_vegas_stratification_numpy_f64()
95 | test_vegas_stratification_torch_f32()
96 | test_vegas_stratification_torch_f64()
97 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/tests/vegas_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append("../")
4 |
5 | from autoray import numpy as anp
6 | from autoray import to_backend_dtype, astype
7 | import timeit
8 | import cProfile
9 | import pstats
10 | from unittest.mock import patch
11 |
12 | from integration.vegas import VEGAS
13 | from integration.rng import RNG
14 |
15 | from helper_functions import (
16 | compute_integration_test_errors,
17 | setup_test_for_backend,
18 | )
19 |
20 |
21 | def _run_example_integrations(backend, dtype_name):
22 | """Test the integrate method in VEGAS for the given backend and example test functions using compute_integration_test_errors"""
23 | print(f"Testing VEGAS+ with example functions with {backend}, {dtype_name}")
24 | vegas = VEGAS()
25 |
26 | # 1D Tests
27 | N = 10000
28 | errors, _ = compute_integration_test_errors(
29 | vegas.integrate,
30 | {"N": N, "dim": 1, "seed": 0},
31 | dim=1,
32 | use_complex=False,
33 | backend=backend,
34 | )
35 | print("1D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
36 | for error in errors[:3]:
37 | assert error < 5e-3
38 |
39 | for error in errors:
40 | assert error < 9.0
41 |
42 | for error in errors[6:]:
43 | assert error < 6e-3
44 |
45 | # 3D Tests
46 | N = 10000
47 | errors, _ = compute_integration_test_errors(
48 | vegas.integrate,
49 | {"N": N, "dim": 3, "seed": 0},
50 | dim=3,
51 | use_complex=False,
52 | backend=backend,
53 | )
54 | print("3D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
55 | for error in errors:
56 | assert error < 0.61
57 |
58 | # 10D Tests
59 | N = 10000
60 | errors, _ = compute_integration_test_errors(
61 | vegas.integrate,
62 | {"N": N, "dim": 10, "seed": 0},
63 | dim=10,
64 | use_complex=False,
65 | backend=backend,
66 | )
67 | print("10D VEGAS Test: Passed N =", N, "\n", "Errors: ", errors)
68 | for error in errors:
69 | assert error < 12.5
70 |
71 |
72 | def _run_vegas_accuracy_checks(backend, dtype_name):
73 | """Test VEGAS+ with special peak integrands where it should be significantly more accurate than MonteCarlo"""
74 | print(f"Testing VEGAS+ accuracy with {backend}, {dtype_name}")
75 | dtype = to_backend_dtype(dtype_name, like=backend)
76 | integrator = VEGAS()
77 |
78 | print("Integrating a function with a single peak")
79 | integration_domain = anp.array(
80 | [[1.0, 5.0], [-4.0, 4.0], [2.0, 6.0]], dtype=dtype, like=backend
81 | )
82 | dim = integration_domain.shape[0]
83 |
84 | def integrand_hypercube_peak(x):
85 | """An integrand which is close to zero everywhere except in a hypercube of volume 1."""
86 | # A product corresponds to logical And
87 | in_cube = anp.prod((x >= 3.0) * (x < 4.0), axis=1)
88 | # Add 0.01 since VEGAS+ does not yet support integrands which evaluate
89 | # to zero for all passed points
90 | return astype(in_cube, dtype_name) + 0.001
91 |
92 | reference_integral = (
93 | anp.prod(integration_domain[:, 1] - integration_domain[:, 0]) * 0.001 + 1.0
94 | )
95 |
96 | # Use multiple seeds to reduce luck
97 | for seed in [0, 1, 2, 3, 41317]:
98 | integral = integrator.integrate(
99 | integrand_hypercube_peak,
100 | dim,
101 | N=30000,
102 | integration_domain=integration_domain,
103 | seed=seed,
104 | )
105 | assert anp.abs(integral - reference_integral) < 0.03
106 |
107 | print("Integrating a function with peaks on the diagonal")
108 | peak_distance = 100.0
109 | integration_domain = anp.array(
110 | [[1.0, 1.0 + peak_distance], [-4.0, -4.0 + peak_distance]],
111 | dtype=dtype,
112 | like=backend,
113 | )
114 | dim = 2
115 |
116 | def integrand_diagonal_peaks(x):
117 | """An integrand which is close to zero everywhere except two corners of the integration domain."""
118 | a = anp.exp(anp.sum(integration_domain[:, 0] - x, axis=1))
119 | b = anp.exp(anp.sum(x - integration_domain[:, 1], axis=1))
120 | return a + b
121 |
122 | # If the integration domain is [r_1, r_1 + c]x[r_2, r_2 + c]
123 | # for some numbers r_1, r_2,
124 | # the integral of integrand_diagonal_peaks is the integral of
125 | # exp(-x_1) exp(-x_2) + exp(x_1 - c) exp(x_2 - c) over x in [0, c]^2.
126 | # indefinite integral:
127 | # F(x) = exp(-x_1) exp(-x_2) + exp(x_1 - c) exp(x_2 - c)
128 | # definite integral:
129 | # F((c,c)) - F((c,0)) - F((0,c)) + F((0,0)) = 2 - 4 exp(-c) + 2 exp(-2c)
130 | reference_integral = (
131 | 2.0
132 | - 4.0 * anp.exp(-peak_distance, like="numpy")
133 | + 2.0 * anp.exp(-2.0 * peak_distance, like="numpy")
134 | )
135 |
136 | # Use multiple seeds to reduce luck
137 | for seed in [0, 1, 2, 3, 41317]:
138 | integral = integrator.integrate(
139 | integrand_diagonal_peaks,
140 | dim,
141 | N=30000,
142 | integration_domain=integration_domain,
143 | seed=seed,
144 | )
145 | assert anp.abs(integral - reference_integral) < 0.03
146 |
147 |
148 | class ModifiedRNG(RNG):
149 | """A modified Random Number Generator which replaces some of the random numbers with 0.0 and 1.0"""
150 |
151 | def __init__(self, *args, **kargs):
152 | super().__init__(*args, **kargs)
153 | rng_uniform = self.uniform
154 | self.uniform = lambda *args, **kargs: self.modify_numbers(
155 | rng_uniform(*args, **kargs)
156 | )
157 |
158 | def modify_numbers(self, numbers):
159 | """Change the randomly generated numbers"""
160 | zeros = anp.zeros(numbers.shape, dtype=numbers.dtype, like=numbers)
161 | ones = anp.ones(numbers.shape, dtype=numbers.dtype, like=numbers)
162 | # Replace half of the random values randomly with 0.0 or 1.0
163 | return anp.where(
164 | numbers < 0.5, numbers * 2.0, anp.where(numbers < 0.75, zeros, ones)
165 | )
166 |
167 |
168 | def _run_vegas_special_case_checks(backend, dtype_name):
169 | """Test VEGAS+ in special cases, for example an integrand which is zero everywhere"""
170 | print(f"Testing VEGAS+ special cases with {backend}, {dtype_name}")
171 | integrator = VEGAS()
172 |
173 | print("Testing VEGAS with an integrand which is zero everywhere")
174 | integral = integrator.integrate(
175 | lambda x: x[:, 0] * 0.0,
176 | 2,
177 | N=10000,
178 | integration_domain=[[0.0, 3.0]] * 2,
179 | seed=0,
180 | backend=backend,
181 | )
182 | assert anp.abs(integral) == 0.0
183 |
184 | print("Testing VEGAS with a constant integrand")
185 | integral = integrator.integrate(
186 | lambda x: x[:, 0] * 0.0 + 10.0,
187 | 2,
188 | N=10000,
189 | integration_domain=[[0.0, 3.0]] * 2,
190 | seed=0,
191 | backend=backend,
192 | )
193 | assert anp.abs(integral - 90.0) < 1e-13
194 |
195 | print("Testing VEGAS with random numbers which are 0.0 and 1.0")
196 | # This test may be helpful to detect rounding and indexing errors which
197 | # would happen with a low probability with the usual RNG
198 | with patch("integration.vegas.RNG", ModifiedRNG):
199 | integral = integrator.integrate(
200 | lambda x: anp.sum(x, axis=1),
201 | 2,
202 | N=10000,
203 | integration_domain=[[0.0, 1.0]] * 2,
204 | seed=0,
205 | backend=backend,
206 | )
207 | assert isinstance(integrator.rng, ModifiedRNG)
208 | assert anp.abs(integral - 1.0) < 0.1
209 |
210 |
211 | def _run_vegas_tests(backend, dtype_name):
212 | """Test if VEGAS+ works with example functions and is accurate as expected"""
213 | _run_vegas_accuracy_checks(backend, dtype_name)
214 | _run_vegas_special_case_checks(backend, dtype_name)
215 | _run_example_integrations(backend, dtype_name)
216 |
217 |
218 | test_integrate_numpy = setup_test_for_backend(_run_vegas_tests, "numpy", "float64")
219 | test_integrate_torch = setup_test_for_backend(_run_vegas_tests, "torch", "float64")
220 |
221 |
222 | if __name__ == "__main__":
223 | # used to run this test individually
224 | test_integrate_numpy()
225 |
226 | profile_torch = False
227 | if profile_torch:
228 | profiler = cProfile.Profile()
229 | profiler.enable()
230 | start = timeit.default_timer()
231 | test_integrate_torch()
232 | profiler.disable()
233 | stats = pstats.Stats(profiler).sort_stats("tottime")
234 | stats.print_stats()
235 | stop = timeit.default_timer()
236 | print("Test ran for ", stop - start, " seconds.")
237 | else:
238 | test_integrate_torch()
239 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/utils/deployment_test.py:
--------------------------------------------------------------------------------
1 | from torchquad import Boole, Trapezoid, Simpson, VEGAS, MonteCarlo
2 |
3 | # TODO test these in the future
4 | # from ..plots.plot_convergence import plot_convergence
5 | # from ..plots.plot_runtime import plot_runtime
6 |
7 | from torchquad import enable_cuda
8 | from torchquad import set_precision
9 | from torchquad import set_log_level
10 | from loguru import logger
11 |
12 |
13 | def _deployment_test():
14 | """This method is used to check successful deployment of torch.
15 | It should not be used by users. We use it internally to check
16 | successful deployment of torchquad.
17 | """
18 | """[summary]
19 | """
20 | import torch
21 |
22 | set_log_level("INFO")
23 | logger.info("####################################")
24 | logger.info("######## TESTING DEPLOYMENT ########")
25 | logger.info("####################################")
26 | logger.info("")
27 |
28 | logger.info("Testing CUDA init... ")
29 | # Test inititialization on GPUs if available
30 | enable_cuda()
31 | set_precision("double")
32 | logger.info("Done.")
33 |
34 | logger.info("")
35 | logger.info("####################################")
36 |
37 | logger.info("Initializing integrators... ")
38 | tp = Trapezoid()
39 | sp = Simpson()
40 | boole = Boole()
41 | mc = MonteCarlo()
42 | vegas = VEGAS()
43 | logger.info("Done.")
44 |
45 | def some_test_function(x):
46 | return torch.exp(x) * torch.pow(x, 2)
47 |
48 | logger.info("")
49 | logger.info("####################################")
50 |
51 | logger.info("Testing integrate functions... ")
52 | tp.integrate(some_test_function, dim=1, N=101)
53 | sp.integrate(some_test_function, dim=1, N=101)
54 | boole.integrate(some_test_function, dim=1, N=101)
55 | mc.integrate(some_test_function, dim=1, N=101)
56 | vegas.integrate(some_test_function, dim=1, N=300)
57 | logger.info("Done.")
58 | logger.info("")
59 |
60 | logger.info("####################################")
61 | logger.info("############ ALL DONE. #############")
62 | logger.info("####################################")
63 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/utils/enable_cuda.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 |
3 | from .set_precision import set_precision
4 |
5 |
6 | def enable_cuda(data_type="float32"):
7 | """This function sets torch's default device to CUDA if possible. Call before declaring any variables!
8 | The default precision can be set here initially, or using set_precision later.
9 |
10 | Args:
11 | data_type ("float32", "float64" or None, optional): Data type to use. If None, skip the call to set_precision. Defaults to "float32".
12 | """
13 | import torch
14 |
15 | if torch.cuda.is_available():
16 | logger.info("PyTorch VERSION: " + str(torch.__version__))
17 | logger.info("CUDNN VERSION: " + str(torch.backends.cudnn.version()))
18 | logger.info("Number of CUDA Devices: " + str(torch.cuda.device_count()))
19 | logger.info("Active CUDA Device: GPU" + str(torch.cuda.current_device()))
20 | if data_type is not None:
21 | set_precision(data_type)
22 | else:
23 | logger.warning(
24 | "Error enabling CUDA. cuda.is_available() returned False. CPU will be used."
25 | )
26 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/utils/set_log_level.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | import sys
3 |
4 |
5 | def set_log_level(log_level: str):
6 | """Set the log level for the logger.
7 | The preset log level when initialising Torchquad is the value of the TORCHQUAD_LOG_LEVEL environment variable, or 'WARNING' if the environment variable is unset.
8 |
9 | Args:
10 | log_level (str): The log level to set. Options are 'TRACE','DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', 'CRITICAL'
11 | """
12 | logger.remove()
13 | logger.add(
14 | sys.stderr,
15 | level=log_level,
16 | format="{time:HH:mm:ss}|TQ-{level}| {message}",
17 | )
18 | logger.debug(f"Setting LogLevel to {log_level}")
19 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/utils/set_precision.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | import os
3 |
4 |
5 | def _get_precision(backend):
6 | """Get the configured default precision for NumPy or Tensorflow.
7 |
8 | Args:
9 | backend ("numpy" or "tensorflow"): Numerical backend
10 |
11 | Returns:
12 | "float32", "float64" or None: Default floating point precision
13 | """
14 | return os.environ.get(f"TORCHQUAD_DTYPE_{backend.upper()}", None)
15 |
16 |
17 | def set_precision(data_type="float32", backend="torch"):
18 | """This function allows the user to set the default precision for floating point numbers for the given numerical backend.
19 | Call before declaring your variables.
20 | NumPy and Tensorflow don't have global dtypes:
21 | https://github.com/numpy/numpy/issues/6860
22 | https://github.com/tensorflow/tensorflow/issues/26033
23 | Therefore, torchquad sets the dtype argument for these two when initialising the integration domain.
24 |
25 | Args:
26 | data_type (string, optional): Data type to use, either "float32" or "float64". Defaults to "float32".
27 | backend (string, optional): Numerical backend for which the data type is changed. Defaults to "torch".
28 | """
29 | # Backwards-compatibility: allow "float" and "double", optionally with
30 | # upper-case letters
31 | data_type = {"float": "float32", "double": "float64"}.get(
32 | data_type.lower(), data_type
33 | )
34 | if data_type not in ["float32", "float64"]:
35 | logger.error(
36 | f'Invalid data type "{data_type}". Only float32 and float64 are supported. Setting the data type to float32.'
37 | )
38 | data_type = "float32"
39 |
40 | if backend == "torch":
41 | import torch
42 |
43 | cuda_enabled = torch.cuda.is_initialized()
44 | tensor_dtype, tensor_dtype_name = {
45 | ("float32", True): (torch.cuda.FloatTensor, "cuda.Float32"),
46 | ("float64", True): (torch.cuda.DoubleTensor, "cuda.Float64"),
47 | ("float32", False): (torch.FloatTensor, "Float32"),
48 | ("float64", False): (torch.DoubleTensor, "Float64"),
49 | }[(data_type, cuda_enabled)]
50 | cuda_enabled_info = (
51 | "CUDA is initialized" if cuda_enabled else "CUDA not initialized"
52 | )
53 | logger.info(
54 | f"Setting Torch's default tensor type to {tensor_dtype_name} ({cuda_enabled_info})."
55 | )
56 | torch.set_default_tensor_type(tensor_dtype)
57 | elif backend == "jax":
58 | from jax.config import config
59 |
60 | config.update("jax_enable_x64", data_type == "float64")
61 | logger.info(f"JAX data type set to {data_type}")
62 | elif backend in ["numpy", "tensorflow"]:
63 | os.environ[f"TORCHQUAD_DTYPE_{backend.upper()}"] = data_type
64 | logger.info(
65 | f"Default dtype config for backend {backend} set to {_get_precision(backend)}"
66 | )
67 | else:
68 | logger.error(f"Changing the data type is not supported for backend {backend}")
69 |
--------------------------------------------------------------------------------
/torchquadMy/torchquad/utils/set_up_backend.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | import os
3 |
4 | from .set_precision import set_precision
5 | from .enable_cuda import enable_cuda
6 |
7 |
8 | def _get_default_backend():
9 | """Get the latest backend which was passed to set_up_backend.
10 | If set_up_backend has never been executed, return "torch" for backwards compatibility"""
11 | return os.environ.get("TORCHQUAD_DEFAULT_BACKEND", "torch")
12 |
13 |
14 | def set_up_backend(backend, data_type=None, torch_enable_cuda=True):
15 | """Configure a numerical backend for torchquad.
16 |
17 | With the torch backend, this function calls torchquad.enable_cuda unless torch_enable_cuda is False.
18 | With the tensorflow backend, this function enables tensorflow's numpy behaviour, which is a requirement for torchquad.
19 | If a data type is passed, set the default floating point precision with torchquad.set_precision.
20 |
21 | Args:
22 | backend (string): Numerical backend, e.g. "torch"
23 | data_type ("float32", "float64" or None, optional): Data type which is passed to set_precision. If None, do not call set_precision except if CUDA is enabled for torch. Defaults to None.
24 | torch_enable_cuda (Bool, optional): If True and backend is "torch", call enable_cuda. Defaults to True.
25 | """
26 | if backend == "torch":
27 | if torch_enable_cuda:
28 | if data_type is None:
29 | enable_cuda()
30 | else:
31 | # Do not call set_precision twice.
32 | enable_cuda(data_type=None)
33 | elif backend == "tensorflow":
34 | from tensorflow.python.ops.numpy_ops import np_config
35 |
36 | logger.info("Enabling numpy behaviour for Tensorflow")
37 | # The Tensorflow backend only works with numpy behaviour enabled.
38 | np_config.enable_numpy_behavior()
39 | if data_type is not None:
40 | set_precision(data_type, backend=backend)
41 | # Change the current globally default backend
42 | os.environ["TORCHQUAD_DEFAULT_BACKEND"] = backend
43 |
--------------------------------------------------------------------------------
/train/BJAQ.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/train/BJAQ.pickle
--------------------------------------------------------------------------------
/train/models/BJAQ/BJAQ-id1-best-val.t:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/train/models/BJAQ/BJAQ-id1-best-val.t
--------------------------------------------------------------------------------
/train/models/power/power-id1-best-val.t:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/train/models/power/power-id1-best-val.t
--------------------------------------------------------------------------------
/train/power.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/for0nething/FACE-A-Normalizing-Flow-based-Cardinality-Estimator/a71dbc24dbcf537f6d1afea426f3f720ce60c70e/train/power.pickle
--------------------------------------------------------------------------------
/train/prefetcher.py:
--------------------------------------------------------------------------------
1 | import torch
2 | """
3 |
4 | """
5 | import time
6 | class data_prefetcher():
7 | def __init__(self, loader):
8 | st = time.time()
9 | self.loader = iter(loader)
10 |
11 | self.origin_loader = iter(loader)
12 | # print('Generate loader took', time.time() - st)
13 | self.stream = torch.cuda.Stream()
14 | self.preload()
15 |
16 | def preload(self):
17 | try:
18 | self.next_batch = next(self.loader)
19 | except StopIteration:
20 | self.next_batch = None
21 | return
22 | with torch.cuda.stream(self.stream):
23 | self.next_batch = self.next_batch.cuda(non_blocking=True)
24 |
25 | def next(self):
26 | torch.cuda.current_stream().wait_stream(self.stream)
27 | batch = self.next_batch
28 | self.preload()
29 | return batch
--------------------------------------------------------------------------------
/utils/dataUtils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 | PROJECT_PATH = '/home/jiayi/disk/FACE-release/'
6 |
7 | import numpy as np
8 | import pandas as pd
9 | import os
10 |
11 |
12 | # # Help Functions
13 |
14 | # ## LoadTable
15 |
16 | # In[2]:
17 |
18 |
19 | COLS = {
20 | 'power': ['c0', 'c1', 'c2', 'c3', 'c4', 'c5'],
21 | 'BJAQ' : ['PM2.5', 'PM10', 'NO2', 'O3', 'TEMP'],
22 |
23 | }
24 |
25 |
26 | # In[3]:
27 |
28 |
29 | def LoadTable(dataset_name):
30 | data_PATH = PROJECT_PATH + 'data/'
31 | path = os.path.join(data_PATH, '{}.npy'.format(dataset_name))
32 | data = np.load(path).astype(np.float32)
33 | cols = COLS[dataset_name]
34 |
35 | print('data shape:', data.shape)
36 | n, dim = data.shape
37 |
38 | return pd.DataFrame(data, columns=cols), n, dim
39 |
40 |
41 | # In[ ]:
42 |
43 |
44 |
45 |
46 |
47 | # ## completeColumns
48 |
49 | # In[4]:
50 |
51 |
52 | def completeColumns(table, columns, operators, vals):
53 | """ complete columns not used in query"""
54 | ncols = table.dim
55 | cs = table.columns
56 | os, vs = [None] * ncols, [None] * ncols
57 |
58 | for c, o, v in zip(columns, operators, vals):
59 | idx = table.getColID(c if isinstance(c, str) else c.name)
60 | os[idx] = o
61 | vs[idx] = v
62 |
63 | return cs, os, vs
64 |
65 |
66 | # In[ ]:
67 |
68 |
69 |
70 |
71 |
72 | # In[5]:
73 |
74 |
75 | def FillInUnqueriedColumns(table, columns, operators, vals):
76 | ncols = len(table.columns)
77 | cs = table.columns
78 | os, vs = [None] * ncols, [None] * ncols
79 |
80 | for c, o, v in zip(columns, operators, vals):
81 | assert isinstance(c, str)
82 | idx = table.ColumnIndex(c)
83 | os[idx] = o
84 | vs[idx] = v
85 |
86 | return cs, os, vs
87 |
88 |
89 | # ## LoadOracleCardinalities
90 |
91 | # In[6]:
92 |
93 |
94 | def LoadOracleCardinalities(dataset_name, querySeed=1234):
95 |
96 | ORACLE_CARD_FILES = {
97 | 'power': PROJECT_PATH + 'evaluate/oracle/{}_rng-{}.csv'.format(dataset_name, querySeed),
98 | 'BJAQ': PROJECT_PATH + 'evaluate/oracle/{}_rng-{}.csv'.format(dataset_name, querySeed),
99 | }
100 |
101 | path = ORACLE_CARD_FILES.get(dataset_name, None)
102 | if path and os.path.exists(path):
103 | df = pd.read_csv(path)
104 | print('Found oracle card!')
105 | return df.values.reshape(-1)
106 | print('Can not find oracle card! at')
107 | print(path)
108 | return None
109 |
110 |
111 | # In[ ]:
112 |
113 |
114 |
115 |
116 |
117 | # # DataWrapper
118 |
119 | # In[7]:
120 |
121 |
122 |
123 | filterNum = {
124 | 'power': (3,6),
125 | 'BJAQ' : (2, 4),
126 | }
127 |
128 | sensible = {
129 | 'power' : np.ones(6),
130 | 'BJAQ' : np.ones(5),
131 | }
132 |
133 |
134 | deltas = {
135 | 'power': np.zeros(6),
136 | 'BJAQ' : np.array([1, 1, 1, 1, 0.1])
137 | }
138 |
139 |
140 | Norm_us = {
141 | 'BJAQ' : np.array([79.9326, 105.07354, 51.070656, 57.876205, 13.568575]),
142 | }
143 | Norm_ss = {
144 | 'BJAQ' : np.array([80.15541, 91.38018, 35.06305, 56.71038, 11.425453]),
145 | }
146 |
147 |
148 | # In[8]:
149 |
150 |
151 | OPS = {
152 | '>':np.greater,
153 | '<':np.less,
154 | '>=':np.greater_equal,
155 | '<=':np.less_equal,
156 | '=':np.equal,
157 | }
158 |
159 |
160 | # In[9]:
161 |
162 |
163 | class DataWrapper():
164 | def __init__(self, data, dataset_name):
165 |
166 | self.data = data
167 | self.n = data.shape[0]
168 | self.cardinality = data.shape[0]
169 | self.dim = data.shape[1]
170 | self.columns = data.columns
171 | self.dataset_name = dataset_name
172 |
173 | self.Mins = data.min(axis=0)
174 | self.Maxs = data.max(axis=0)
175 |
176 | self.minFilter, self.maxFilter = filterNum[self.dataset_name]
177 | self.sensible_to_do_range = sensible[self.dataset_name]
178 | self.colMap = {col:i for i,col in enumerate(self.columns)}
179 | self.delta = deltas[self.dataset_name]
180 |
181 |
182 | def getColID(self, col):
183 | return self.colMap[col]
184 |
185 | def getCateColumns(self, cols):
186 | cols = [self.getColID(col) for col in cols]
187 | return self.sensible_to_do_range[cols]
188 |
189 |
190 |
191 | def GetUnNormalizedValue(self, col_id, val):
192 | if self.dataset_name=='power':
193 | return val
194 | U = Norm_us[self.dataset_name]
195 | S = Norm_ss[self.dataset_name]
196 | ret = (val - U[col_id])/S[col_id]
197 | return ret
198 |
199 |
200 |
201 | def GetLegalRange(self, col, op, val):
202 | """ legal range for a column """
203 | col_id = self.getColID(col)
204 | add_one = self.delta[col_id]
205 | if op == '=':
206 | l = self.GetUnNormalizedValue(col_id, val)
207 | r = self.GetUnNormalizedValue(col_id, val + add_one)
208 | elif op == '>' or op =='>=':
209 | l = self.GetUnNormalizedValue(col_id, val)
210 | r = self.GetUnNormalizedValue(col_id, self.Maxs[col_id] + add_one)
211 | elif op == '<' or op =='<=':
212 | l = self.GetUnNormalizedValue(col_id, self.Mins[col_id])
213 | r = self.GetUnNormalizedValue(col_id, val + add_one)
214 | elif op == 'in':
215 | l = self.GetUnNormalizedValue(col_id, val[0])
216 | r = self.GetUnNormalizedValue(col_id, val[1] + add_one)
217 |
218 | return [l,r]
219 |
220 |
221 |
222 | def getLegalRangeQuery(self, query):
223 | """legal range for a query"""
224 | cols, ops, vals = query
225 | cols, ops, vals = completeColumns(self, cols, ops, vals)
226 |
227 |
228 | legal_list = [[0., 1.]] * len(vals)
229 | i = 0
230 | for co, op, val_i in zip(cols, ops, vals):
231 | col_id = self.getColID(co)
232 | if val_i is None:
233 | legal_list[i] = self.GetLegalRange(co, 'in', [self.Mins[col_id], self.Maxs[col_id]])
234 | else:
235 | legal_list[i] = self.GetLegalRange(co, op, val_i)
236 | i = i + 1
237 | return legal_list
238 |
239 |
240 | def getLegalRangeNQuery(self, queries):
241 | """ legal ranges for N queries"""
242 | legal_lists = []
243 |
244 | for query in queries:
245 | legal_lists.append(self.getLegalRangeQuery(query))
246 | return legal_lists
247 |
248 |
249 | def generateQuery(self, rng):
250 | """ generate a query """
251 |
252 | num_filters = rng.randint(self.minFilter, self.maxFilter)
253 |
254 | loc = rng.randint(0, self.cardinality)
255 | tuple0 = self.data.iloc[loc]
256 | tuple0 = tuple0.values
257 | loc = rng.randint(0, self.cardinality)
258 | tuple1 = self.data.iloc[loc]
259 | tuple1 = tuple1.values
260 |
261 | idxs = rng.choice(len(self.columns), replace=False, size=num_filters)
262 | cols = np.take(self.columns, idxs)
263 |
264 | ops = rng.choice(['<=', '>='], size=num_filters)
265 | ops_all_eqs = ['='] * num_filters
266 |
267 |
268 | sensible_to_do_range = self.getCateColumns(cols)
269 | ops = np.where(sensible_to_do_range, ops, ops_all_eqs)
270 | vals = tuple0[idxs]
271 | vals = list(vals)
272 |
273 | if self.dataset_name == 'BJAQ':
274 | ops = ['in'] * len(ops)
275 |
276 | tuple0 = tuple0[idxs]
277 | tuple1 = tuple1[idxs]
278 | for i, op in enumerate(ops):
279 | if op == 'in':
280 | vals[i] = ([tuple0[i], tuple1[i]] if tuple0[i]<=tuple1[i] else [tuple1[i], tuple0[i]])
281 |
282 | return cols, ops, vals
283 |
284 |
285 | def generateNQuery(self, n, rng):
286 | """ generate N queries """
287 | ret = []
288 | for i in range(n):
289 | ret.append(self.generateQuery(rng))
290 | return ret
291 |
292 | def getOracle(self, query):
293 | """ get oracle result for a query """
294 | columns, operators, vals = query
295 | assert len(columns) == len(operators) == len(vals)
296 |
297 | bools = None
298 | for c, o, v in zip(columns, operators, vals):
299 | c = self.data[c]
300 | if o in OPS.keys():
301 | inds = OPS[o](c, v)
302 | else:
303 | if o == 'in' or o == 'IN':
304 | inds = np.greater_equal(c, v[0])
305 | inds &= np.less_equal(c, v[1])
306 |
307 | if bools is None:
308 | bools = inds
309 | else:
310 | bools &= inds
311 | c = bools.sum()
312 | return c
313 |
314 | def getAndSaveOracle(self, queries, querySeed=2345):
315 | """ Calculate oracle results for input queries and save the results"""
316 | n = len(queries)
317 | oracle_cards = np.empty(2000)
318 | for i, query in enumerate(queries):
319 | oracle_cards[i] = self.getOracle(query)
320 | oracle_cards = oracle_cards.astype(np.int)
321 | df = pd.DataFrame(oracle_cards, columns=['true_card'])
322 |
323 | """ Change it to your own path """
324 | print("Save oracle results to :")
325 | print(PROJECT_PATH + 'evaluate/oracle/{}_rng-{}.csv'.format(self.dataset_name, querySeed))
326 | df.to_csv(PROJECT_PATH + 'evaluate/oracle/{}_rng-{}.csv'.format(self.dataset_name, querySeed),
327 | index=False)
328 | return
329 |
330 |
331 |
--------------------------------------------------------------------------------