├── .conda └── meta.yaml ├── .dev ├── README.md ├── container_anaconda │ ├── Dockerfile │ └── devcontainer.json └── container_py39 │ ├── Dockerfile │ └── devcontainer.json ├── .editorconfig ├── .github └── workflows │ ├── conda.yml │ ├── pytest.yml │ └── wheel.yml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE.txt ├── README.rst ├── doc ├── augment.rst ├── basis.rst ├── conf.py ├── hacking.rst ├── index.rst ├── kernels.rst ├── otherbases.rst ├── porting.rst ├── sampling.rst ├── sve.rst └── tutorial.rst ├── setup.py ├── src └── sparse_ir │ ├── __init__.py │ ├── _gauss.py │ ├── _roots.py │ ├── _util.py │ ├── abstract.py │ ├── adapter.py │ ├── augment.py │ ├── basis.py │ ├── basis_set.py │ ├── dlr.py │ ├── kernel.py │ ├── poly.py │ ├── sampling.py │ ├── svd.py │ └── sve.py └── test ├── conftest.py ├── test_augment.py ├── test_basis_set.py ├── test_compare.py ├── test_dlr.py ├── test_gauss.py ├── test_kernel.py ├── test_poly.py ├── test_sampling.py ├── test_scipost_sample_code.py ├── test_sve.py └── test_whitespace.py /.conda/meta.yaml: -------------------------------------------------------------------------------- 1 | {% set data = load_setup_py_data(setup_file='../setup.py', from_recipe_dir=True) %} 2 | {% set name = "sparse-ir" %} 3 | {% set version = data.get("version") %} 4 | 5 | package: 6 | name: "{{ name|lower }}" 7 | version: "{{ version }}" 8 | 9 | source: 10 | git_url: ../ 11 | 12 | build: 13 | number: 0 14 | noarch: python 15 | script: "{{ PYTHON }} -m pip install . -vv" 16 | 17 | requirements: 18 | host: 19 | - numpy 20 | - pip 21 | - python 22 | - scipy 23 | - setuptools 24 | run: 25 | - numpy 26 | - python 27 | - scipy 28 | - setuptools 29 | 30 | #test: 31 | # imports: 32 | # - sparse_ir 33 | 34 | about: 35 | home: "https://github.com/SpM-lab/sparse-ir" 36 | license: MIT 37 | license_family: MIT 38 | license_file: 39 | summary: "intermediate representation (IR) basis for electronic propagator" 40 | doc_url: 41 | dev_url: 42 | 43 | extra: 44 | recipe-maintainers: 45 | - shinaoka 46 | -------------------------------------------------------------------------------- /.dev/README.md: -------------------------------------------------------------------------------- 1 | # For developers 2 | ## For VS code users 3 | 4 | This directory includes configuration files for using VS code Remote Containers + Docker. 5 | Each subdirectory contains a Dockerfile as well as a configuration file for VS code. 6 | 7 | For instance, to use the Python 3.9 environment, you can do 8 | 9 | ```bash 10 | ln -s .dev/container_py39 .devcontainer 11 | ``` 12 | 13 | and open the top directory of the repository by VS code. 14 | 15 | ## Fans of other editors 16 | You can create a container from a Dockerfile or simply read it; the dockerfile 17 | is a recipe describing setting up your environment. 18 | -------------------------------------------------------------------------------- /.dev/container_anaconda/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM continuumio/anaconda3 2 | 3 | ENV PYTHONUNBUFFERED=1 4 | 5 | RUN apt-get update && \ 6 | DEBIAN_FRONTEND=noninteractive apt-get install -y \ 7 | build-essential \ 8 | curl \ 9 | ca-certificates \ 10 | git \ 11 | zip \ 12 | vim \ 13 | cmake pkg-config gfortran \ 14 | && \ 15 | apt-get clean && rm -rf /var/cache/apt/archives/* /var/lib/apt/lists/* # clean up 16 | 17 | # Create non-root user 18 | ARG NB_USER=vscode 19 | ARG NB_UID=1000 20 | ARG WORK_DIR=/home/${NB_USER}/work 21 | RUN useradd -u $NB_UID -m $NB_USER -s /bin/bash && \ 22 | echo 'vscode ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 23 | USER $NB_USER 24 | 25 | RUN mkdir /home/${NB_USER}/work -------------------------------------------------------------------------------- /.dev/container_anaconda/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at 2 | // https://github.com/microsoft/vscode-dev-containers/tree/master/containers/docker-existing-dockerfile 3 | { 4 | "name": "Existing Dockerfile", 5 | // Sets the run context to one level up instead of the .devcontainer folder. 6 | "context": "..", 7 | // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. 8 | "dockerFile": "./Dockerfile", 9 | // The optional 'runArgs' property can be used to specify additional runtime arguments. 10 | "runArgs": [], 11 | // Use 'settings' to set *default* container specific settings.json values on container create. 12 | // You can edit these settings after create using File > Preferences > Settings > Remote. 13 | // Uncomment the next line if you want to publish any ports. 14 | // "appPort": [], 15 | // Uncomment the next line to run commands after the container is created - for example installing git. 16 | // "postCreateCommand": "apt-get update && apt-get install -y git", 17 | // Add the IDs of extensions you want installed when the container is created in the array below. 18 | "extensions": [ 19 | "ms-azuretools.vscode-docker", 20 | "mutantdino.resourcemonitor", 21 | "shardulm94.trailing-spaces", 22 | "cliffordfajardo.hightlight-selections-vscode", 23 | "wdawson.better-kill-ring", 24 | "oderwat.indent-rainbow", 25 | "github.vscode-pull-request-github", 26 | "mhutchie.git-graph", 27 | "donjayamanne.githistory", 28 | "eamodio.gitlens", 29 | "bungcip.better-toml", 30 | "usernamehw.errorlens", 31 | "ms-vscode.live-server", 32 | "christian-kohler.path-intellisense", 33 | "ms-python.python", 34 | ], 35 | "remoteUser": "vscode", 36 | "workspaceFolder": "/home/vscode/work", 37 | "workspaceMount": "src=${localWorkspaceFolder},dst=/home/vscode/work,type=bind", 38 | } 39 | -------------------------------------------------------------------------------- /.dev/container_py39/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | ENV PYTHONUNBUFFERED=1 4 | 5 | RUN apt-get update && \ 6 | DEBIAN_FRONTEND=noninteractive apt-get install -y \ 7 | build-essential \ 8 | curl \ 9 | ca-certificates \ 10 | git \ 11 | zip \ 12 | vim \ 13 | gosu \ 14 | hdf5-tools \ 15 | libopenblas-base \ 16 | libopenblas-dev \ 17 | libhdf5-103 \ 18 | libhdf5-dev \ 19 | libeigen3-dev \ 20 | cmake \ 21 | pkg-config \ 22 | gfortran \ 23 | openmpi-bin \ 24 | libopenmpi-dev \ 25 | sudo \ 26 | libboost-dev \ 27 | libboost-mpi-dev \ 28 | && \ 29 | apt-get clean && rm -rf /var/cache/apt/archives/* /var/lib/apt/lists/* # clean up 30 | 31 | 32 | # Create non-root user 33 | ARG NB_USER=vscode 34 | ARG NB_UID=1000 35 | RUN useradd -u $NB_UID -m $NB_USER -s /bin/bash && \ 36 | echo 'vscode ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 37 | USER $NB_USER 38 | ENV PATH "/home/${NB_USER}/.local/bin:/opt/pomerol2dcore/bin:${PATH}" 39 | ENV PYTHONPATH "/home/${NB_USER}/work/src:${PYTONPATH}" 40 | 41 | # for vscode 42 | RUN mkdir /home/${NB_USER}/work 43 | 44 | RUN pip3 install -U pip && \ 45 | pip3 install scipy h5py toml tomli matplotlib pytest mypy xprec irbasis sphinx_rtd_theme 46 | -------------------------------------------------------------------------------- /.dev/container_py39/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at 2 | // https://github.com/microsoft/vscode-dev-containers/tree/master/containers/docker-existing-dockerfile 3 | { 4 | "name": "Existing Dockerfile", 5 | // Sets the run context to one level up instead of the .devcontainer folder. 6 | "context": "..", 7 | // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. 8 | "dockerFile": "./Dockerfile", 9 | // The optional 'runArgs' property can be used to specify additional runtime arguments. 10 | "runArgs": [], 11 | // Use 'settings' to set *default* container specific settings.json values on container create. 12 | // You can edit these settings after create using File > Preferences > Settings > Remote. 13 | // Uncomment the next line if you want to publish any ports. 14 | // "appPort": [], 15 | // Uncomment the next line to run commands after the container is created - for example installing git. 16 | // "postCreateCommand": "apt-get update && apt-get install -y git", 17 | // Add the IDs of extensions you want installed when the container is created in the array below. 18 | "extensions": [ 19 | "ms-azuretools.vscode-docker", 20 | "mutantdino.resourcemonitor", 21 | "shardulm94.trailing-spaces", 22 | "cliffordfajardo.hightlight-selections-vscode", 23 | "wdawson.better-kill-ring", 24 | "oderwat.indent-rainbow", 25 | "github.vscode-pull-request-github", 26 | "mhutchie.git-graph", 27 | "donjayamanne.githistory", 28 | "eamodio.gitlens", 29 | "bungcip.better-toml", 30 | "usernamehw.errorlens", 31 | "ms-vscode.live-server", 32 | "christian-kohler.path-intellisense", 33 | "ms-python.python", 34 | ], 35 | "remoteUser": "vscode", 36 | "workspaceFolder": "/home/vscode/work", 37 | "workspaceMount": "src=${localWorkspaceFolder},dst=/home/vscode/work,type=bind", 38 | } 39 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # https://editorconfig.org 2 | root = true 3 | 4 | # Respect git trailing whitespace rules 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | trim_trailing_whitespace = true 9 | charset = utf-8 10 | 11 | # Enforce python indentation 12 | [*.py] 13 | indent_style = space 14 | indent_size = 4 15 | -------------------------------------------------------------------------------- /.github/workflows/conda.yml: -------------------------------------------------------------------------------- 1 | name: Build and upload conda packages 2 | 3 | # Triggered a new tag starting with "v" is pushed 4 | on: 5 | push: 6 | tags: 7 | - 'v*' 8 | 9 | jobs: 10 | build: 11 | runs-on: ${{ matrix .os }} 12 | strategy: 13 | matrix: 14 | os: [ubuntu-latest] 15 | python-version: ["3.9"] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | 20 | - name: Set up Python 21 | uses: actions/setup-python@v2 22 | 23 | - name: Set up Conda 24 | uses: s-weigand/setup-conda@v1 25 | with: 26 | update-conda: true 27 | python-version: ${{ matrix.python-version }} 28 | 29 | - name: Install dependencies 30 | run: | 31 | conda install conda-build anaconda-client -y 32 | 33 | - name: Bulid and upload 34 | env: 35 | ANACONDA_API_TOKEN: ${{secrets.ANACONDA_TOKEN}} 36 | run: | 37 | conda config --set anaconda_upload yes 38 | conda build .conda --user SpM-lab 39 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: Build/test suite 2 | 3 | on: 4 | push: 5 | branches: 6 | mainline 7 | pull_request: 8 | branches: 9 | mainline 10 | 11 | jobs: 12 | build: 13 | name: | 14 | ${{ matrix.os }}, numpy==${{ matrix.numpy }}, scipy==${{ matrix.scipy }} 15 | runs-on: ${{ matrix.os }} 16 | strategy: 17 | matrix: 18 | include: 19 | - os: ubuntu-20.04 20 | numpy: "1.16" 21 | scipy: "1.4" 22 | python: "3.6" 23 | - os: ubuntu-latest 24 | numpy: auto 25 | scipy: auto 26 | python: "3.10" 27 | 28 | steps: 29 | - uses: actions/checkout@v2 30 | 31 | - name: Set up python ${{ matrix.python }} 32 | uses: actions/setup-python@v2 33 | with: 34 | python-version: ${{ matrix.python }} 35 | 36 | - name: Install numpy ${{ matrix.numpy }}, scipy ${{ matrix.scipy }} 37 | if: ${{ matrix.numpy != 'auto' }} 38 | run: | 39 | pip install numpy==${{ matrix.numpy}} scipy==${{ matrix.scipy }} 40 | 41 | - name: Install package with testing dependencies 42 | run: | 43 | pip install .[test] 44 | 45 | - name: Test with pytest 46 | run: | 47 | pytest 48 | -------------------------------------------------------------------------------- /.github/workflows/wheel.yml: -------------------------------------------------------------------------------- 1 | name: Build and upload to PyPI 2 | 3 | # Triggered a new tag starting with "v" is pushed 4 | on: 5 | push: 6 | tags: 7 | - 'v*' 8 | 9 | jobs: 10 | build_sdist: 11 | name: Build source distribution 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Build dist 17 | run: python setup.py sdist bdist_wheel 18 | 19 | - uses: actions/upload-artifact@v2 20 | with: 21 | name: dist 22 | path: | 23 | dist/*.tar.gz 24 | dist/*.whl 25 | if-no-files-found: error 26 | 27 | upload_pypi: 28 | needs: [build_sdist] 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/download-artifact@v2 32 | with: 33 | name: dist 34 | path: dist 35 | 36 | - uses: pypa/gh-action-pypi-publish@v1.4.2 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | skip_existing: true 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | *~ 3 | \#*\# 4 | 5 | *.pyc 6 | __pycache__/ 7 | *.egg-info/ 8 | /dist/ 9 | /build/ 10 | 11 | doc/*build*/ 12 | 13 | *.ipynb 14 | 15 | !/.gitignore 16 | !/.github/ 17 | !/.conda/ 18 | !/.dev/ 19 | !/.editorconfig 20 | !/.readthedocs.yaml 21 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 2 | version: 2 3 | 4 | build: 5 | os: ubuntu-lts-latest 6 | tools: 7 | python: latest 8 | 9 | sphinx: 10 | configuration: doc/conf.py 11 | 12 | # Equivalent to: pip install .[doc] 13 | python: 14 | install: 15 | - method: pip 16 | path: . 17 | extra_requirements: 18 | - doc 19 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020-2021 Markus Wallerberger and others 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | sparse-ir - A library for the intermediate representation of propagators 2 | ======================================================================== 3 | This library provides routines for constructing and working with the 4 | intermediate representation of correlation functions. It provides: 5 | 6 | - on-the-fly computation of basis functions for arbitrary cutoff Λ 7 | - basis functions and singular values are accurate to full precision 8 | - routines for sparse sampling 9 | 10 | 11 | Installation 12 | ------------ 13 | Install via `pip `_:: 14 | 15 | pip install sparse-ir[xprec] 16 | 17 | The above line is the recommended way to install `sparse-ir`. It automatically 18 | installs the `xprec `_ package, which 19 | allows one to compute the IR basis functions with greater accuracy. If you do 20 | not want to do this, simply remove the string ``[xprec]`` from the above command. 21 | 22 | Install via `conda `_:: 23 | 24 | conda install -c spm-lab sparse-ir xprec 25 | 26 | Other than the optional xprec dependency, sparse-ir requires only 27 | `numpy `_ and `scipy `_. 28 | 29 | To manually install the current development version, you can use the following:: 30 | 31 | # Only recommended for developers - no automatic updates! 32 | git clone https://github.com/SpM-lab/sparse-ir 33 | pip install -e sparse-ir/[xprec] 34 | 35 | Documentation and tutorial 36 | -------------------------- 37 | Check out our `comprehensive tutorial`_, where we self-contained 38 | notebooks for several many-body methods - GF(2), GW, Eliashberg equations, 39 | Lichtenstein formula, FLEX, ... - are presented. 40 | 41 | Refer to the `API documentation`_ for more details on how to work 42 | with the python library. 43 | 44 | There is also a `Julia library`_ and (currently somewhat restricted) 45 | `Fortran library`_ available for the IR basis and sparse sampling. 46 | 47 | .. _comprehensive tutorial: https://spm-lab.github.io/sparse-ir-tutorial 48 | .. _API documentation: https://sparse-ir.readthedocs.io 49 | .. _Julia library: https://github.com/SpM-lab/SparseIR.jl 50 | .. _Fortran library: https://github.com/SpM-lab/sparse-ir-fortran 51 | 52 | Getting started 53 | --------------- 54 | Here is a full second-order perturbation theory solver (GF(2)) in a few 55 | lines of Python code:: 56 | 57 | # Construct the IR basis and sparse sampling for fermionic propagators 58 | import sparse_ir, numpy as np 59 | basis = sparse_ir.FiniteTempBasis('F', beta=10, wmax=8, eps=1e-6) 60 | stau = sparse_ir.TauSampling(basis) 61 | siw = sparse_ir.MatsubaraSampling(basis, positive_only=True) 62 | 63 | # Solve the single impurity Anderson model coupled to a bath with a 64 | # semicircular states with unit half bandwidth. 65 | U = 1.2 66 | def rho0w(w): 67 | return np.sqrt(1-w.clip(-1,1)**2) * 2/np.pi 68 | 69 | # Compute the IR basis coefficients for the non-interacting propagator 70 | rho0l = basis.v.overlap(rho0w) 71 | G0l = -basis.s * rho0l 72 | 73 | # Self-consistency loop: alternate between second-order expression for the 74 | # self-energy and the Dyson equation until convergence. 75 | Gl = G0l 76 | Gl_prev = 0 77 | while np.linalg.norm(Gl - Gl_prev) > 1e-6: 78 | Gl_prev = Gl 79 | Gtau = stau.evaluate(Gl) 80 | Sigmatau = U**2 * Gtau**3 81 | Sigmal = stau.fit(Sigmatau) 82 | Sigmaiw = siw.evaluate(Sigmal) 83 | G0iw = siw.evaluate(G0l) 84 | Giw = 1/(1/G0iw - Sigmaiw) 85 | Gl = siw.fit(Giw) 86 | 87 | You may want to start with reading up on the `intermediate representation`_. 88 | It is tied to the analytic continuation of bosonic/fermionic spectral 89 | functions from (real) frequencies to imaginary time, a transformation mediated 90 | by a kernel ``K``. The kernel depends on a cutoff, which you should choose to 91 | be ``lambda_ >= beta * W``, where ``beta`` is the inverse temperature and ``W`` 92 | is the bandwidth. 93 | 94 | One can now perform a `singular value expansion`_ on this kernel, which 95 | generates two sets of orthonormal basis functions, one set ``v[l](w)`` for 96 | real frequency side ``w``, and one set ``u[l](tau)`` for the same obejct in 97 | imaginary (Euclidean) time ``tau``, together with a "coupling" strength 98 | ``s[l]`` between the two sides. 99 | 100 | By this construction, the imaginary time basis can be shown to be *optimal* in 101 | terms of compactness. 102 | 103 | .. _intermediate representation: https://arxiv.org/abs/2106.12685 104 | .. _singular value expansion: https://w.wiki/3poQ 105 | 106 | License and citation 107 | -------------------- 108 | This software is released under the MIT License. See LICENSE.txt for details. 109 | 110 | If you find the intermediate representation, sparse sampling, or this software 111 | useful in your research, please consider citing the following papers: 112 | 113 | - Hiroshi Shinaoka et al., `Phys. Rev. B 96, 035147`_ (2017) 114 | - Jia Li et al., `Phys. Rev. B 101, 035144`_ (2020) 115 | - Markus Wallerberger et al., `SoftwareX 21, 101266`_ (2023) 116 | 117 | If you are discussing sparse sampling in your research specifically, please 118 | also consider citing an independently discovered, closely related approach, the 119 | MINIMAX isometry method (Merzuk Kaltak and Georg Kresse, 120 | `Phys. Rev. B 101, 205145`_, 2020). 121 | 122 | .. _Phys. Rev. B 96, 035147: https://doi.org/10.1103/PhysRevB.96.035147 123 | .. _Phys. Rev. B 101, 035144: https://doi.org/10.1103/PhysRevB.101.035144 124 | .. _SoftwareX 21, 101266: https://doi.org/10.1016/j.softx.2022.101266 125 | .. _Phys. Rev. B 101, 205145: https://doi.org/10.1103/PhysRevB.101.205145 126 | -------------------------------------------------------------------------------- /doc/augment.rst: -------------------------------------------------------------------------------- 1 | Augmented bases 2 | =============== 3 | 4 | .. autoclass:: sparse_ir.augment.AugmentedBasis 5 | :members: 6 | 7 | Available augmentations 8 | ----------------------- 9 | 10 | .. autoclass:: sparse_ir.augment.TauConst 11 | :members: 12 | :special-members: __call__ 13 | 14 | .. autoclass:: sparse_ir.augment.TauLinear 15 | :members: 16 | :special-members: __call__ 17 | 18 | .. autoclass:: sparse_ir.augment.MatsubaraConst 19 | :members: 20 | :special-members: __call__ 21 | 22 | 23 | Base classes 24 | ------------ 25 | 26 | .. autoclass:: sparse_ir.augment.AbstractAugmentation 27 | :members: 28 | :special-members: __call__ 29 | -------------------------------------------------------------------------------- /doc/basis.rst: -------------------------------------------------------------------------------- 1 | IR Basis 2 | ======== 3 | 4 | .. autoclass:: sparse_ir.FiniteTempBasis 5 | :members: 6 | 7 | Piecewise polynomials 8 | --------------------- 9 | 10 | .. autoclass:: sparse_ir.poly.PiecewiseLegendrePoly 11 | :members: 12 | :special-members: __call__, __getitem__ 13 | 14 | .. autoclass:: sparse_ir.poly.PiecewiseLegendreFT 15 | :members: 16 | :special-members: __call__, __getitem__ 17 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # Run build as: 3 | # sphinx-build -M html . build 4 | import io 5 | import os 6 | import re 7 | 8 | 9 | def extract_varval(*varnames): 10 | """Return contents of file with path relative to script directory""" 11 | herepath = os.path.abspath(os.path.dirname(__file__)) 12 | fullpath = os.path.join(herepath, '..', 'src', 'sparse_ir', '__init__.py') 13 | with io.open(fullpath, 'r') as f: 14 | contents = f.read() 15 | for varname in varnames: 16 | var_re = re.compile(rf"(?m)^{varname}\s*=\s*['\"]([^'\"]*)['\"]") 17 | match = var_re.search(contents) 18 | yield match.group(1) 19 | 20 | 21 | # === Project information 22 | 23 | project = 'sparse-ir' 24 | copyright, version = extract_varval('__copyright__', '__version__') 25 | release = version 26 | author = ', '.join([ 27 | 'Markus Wallerberger', 28 | 'Hiroshi Shinaoka', 29 | 'Kazuyoshi Yoshimi', 30 | 'Junya Otsuki', 31 | 'Chikano Naoya', 32 | ]) 33 | 34 | # === General configuration 35 | 36 | extensions = [ 37 | 'sphinx.ext.duration', 38 | 'sphinx.ext.doctest', 39 | 'sphinx.ext.autodoc', 40 | 'sphinx.ext.autosummary', 41 | 'sphinx.ext.napoleon', 42 | 'sphinx.ext.intersphinx', 43 | 'sphinx_rtd_theme', 44 | ] 45 | 46 | intersphinx_mapping = { 47 | 'python': ('https://docs.python.org/3/', None), 48 | } 49 | 50 | #templates_path = ['_templates'] 51 | #html_static_path = ['_static'] 52 | 53 | exclude_patterns = [ 54 | '_build', 55 | 'Thumbs.db', 56 | '.DS_Store', 57 | ] 58 | 59 | html_theme = "sphinx_rtd_theme" 60 | -------------------------------------------------------------------------------- /doc/hacking.rst: -------------------------------------------------------------------------------- 1 | Hacking/Development 2 | =================== 3 | For development, we recommend installing a "editable" version of the 4 | repository with unit test dependencies:: 5 | 6 | git checkout https://github.com/SpM-lab/sparse-ir.git 7 | cd sparse_ir 8 | pip install -e .[test] 9 | 10 | Now the installed package automatically updates as you are working on the code. 11 | Make sure you run unit tests:: 12 | 13 | pytest 14 | 15 | 16 | Code guidelines 17 | --------------- 18 | The code tries to follow `PEP 8`_ more-or-less. 19 | 20 | 1. Please note that lines are at most 79 characters! In (occasionally) 21 | exceeding this limit, imagine yourself fighting through increasingly 22 | hostile terrain: at 83 characters, you are wading through smelly swamps, at 23 | 90 characters, you are crawling through Mordor. 24 | 25 | 2. Indentation is four spaces, no tabs. 26 | 27 | 3. New code must have unit tests associated with it. 28 | 29 | 4. Classes are abstractions. If a class has no clear *thing* its modelling, 30 | it is not a class. 31 | 32 | .. _`PEP 8`: https://www.python.org/dev/peps/pep-0008/ 33 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | 4 | Contents 5 | -------- 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | tutorial 11 | basis 12 | sampling 13 | kernels 14 | sve 15 | augment 16 | otherbases 17 | porting 18 | hacking 19 | 20 | 21 | Indices and tables 22 | ------------------ 23 | 24 | * :ref:`genindex` 25 | * :ref:`modindex` 26 | * :ref:`search` 27 | -------------------------------------------------------------------------------- /doc/kernels.rst: -------------------------------------------------------------------------------- 1 | Kernels 2 | ======= 3 | The IR basis is nothing but the `singular value expansion`_ of a suitable 4 | integral kernel `K` mediating the change from real frequencies to imaginary 5 | times: 6 | 7 | .. math:: G(τ) = - \int d\omega K(τ, \omega) w(\omega) \rho(\omega), 8 | 9 | where: 10 | 11 | .. math:: \rho(ω) = -\frac 1\pi \Im G(ω + i\delta) 12 | 13 | is the spectral function and w(ω) is a weight function. The integral is defined 14 | on the interval [-ωmax, ωmax], where ωmax(=Λ/β) is a frequency cutoff. 15 | 16 | Different kernels yield different IR basis functions. The `sparse-ir` library 17 | defines two kernels: 18 | 19 | - :class:`sparse_ir.LogisticKernel`: continuation of *fermionic/bosonic* 20 | spectral functions with w(ω)=1 for fermions 21 | and w(ω)=1/tanh(ω/ωmax) for bosons. 22 | - :class:`sparse_ir.RegularizedBoseKernel`: continuation of *bosonic* spectral functions 23 | with w(ω)=1/ω. 24 | 25 | By default, :class:`sparse_ir.LogisticKernel` is used. 26 | Kernels can be fed directly into :class:`sparse_ir.FiniteTempBasis` to 27 | get the intermediate representation. 28 | 29 | .. _singular value expansion: https://w.wiki/3poQ 30 | 31 | 32 | Predefined kernels 33 | ------------------ 34 | .. autoclass:: sparse_ir.LogisticKernel 35 | :members: 36 | :special-members: __call__ 37 | 38 | .. autoclass:: sparse_ir.RegularizedBoseKernel 39 | :members: 40 | :special-members: __call__ 41 | 42 | 43 | Custom kernels 44 | -------------- 45 | Adding kernels to ``sparse_ir`` is simple - at the very basic level, the 46 | library expects a kernel ``K`` to be able to provide two things: 47 | 48 | 1. the values through ``K(x, y)`` 49 | 2. a set of SVE discretization hints through ``K.hints()`` 50 | 51 | Let us suppose you simply want to include a Gaussian default model on the real 52 | axis instead of the default (flat) one. We create a new kernel by inheriting 53 | from :class:`sparse_ir.kernel.AbstractKernel` and then simply wrap around a 54 | fermionic kernel, modifying the values as needed:: 55 | 56 | import sparse_ir 57 | import sparse_ir.kernel 58 | 59 | class KernelFGauss(sparse_ir.kernel.AbstractKernel): 60 | def __init__(self, lambda_, std): 61 | self._inner = sparse_ir.LogisticKernel(lambda_) 62 | self.lambda_ = lambda_ 63 | self.std = std 64 | 65 | def __call__(self, x, y, x_plus=None, x_minus=None): 66 | # First, get value of kernel 67 | val = self._inner(x, y, x_plus, x_minus) 68 | # Then multiply with default model 69 | val *= np.exp(-.5 * (y / self.std)**2) 70 | return val 71 | 72 | def hints(self, eps): 73 | return self._inner.hints(eps) 74 | 75 | You can feed this kernel now directly to :class:`sparse_ir.FiniteTempBasis`:: 76 | 77 | K = GaussFKernel(10., 1.) 78 | basis = sparse_ir.FiniteTempBasis(K, 'F') 79 | print(basis.s) 80 | 81 | This should get you started. For a fully-fledged and robust implementation, 82 | you should: 83 | 84 | 1. Make sure that your kernel does not lose precision in computing ``K(x, y)``, 85 | as this directly affects the quality of the basis. This is also where the 86 | arguments ``x_plus`` and ``x_minus`` may become useful. 87 | 88 | 2. Optimize your discretization hints. To do so, it is useful to choose the 89 | segments in ``x`` and ``y`` close to the asymptotic distribution of the roots 90 | of the respective singular functions. The Gauss order can then be 91 | determined from a convergence analysis. 92 | 93 | 3. Check whether your kernel is centrosymmetric, and if so, override the 94 | ``is_centrosymmetric`` property. This yields a approximately four-times 95 | performance boost. However, check that the symmetrized versions of the 96 | kernels do not lose precision. 97 | 98 | 99 | Base classes 100 | ------------ 101 | .. autoclass:: sparse_ir.kernel.AbstractKernel 102 | :members: 103 | :special-members: __call__ 104 | 105 | .. autoclass:: sparse_ir.kernel.ReducedKernel 106 | :members: 107 | :special-members: __call__ 108 | 109 | .. autoclass:: sparse_ir.kernel.AbstractSVEHints 110 | :members: 111 | -------------------------------------------------------------------------------- /doc/otherbases.rst: -------------------------------------------------------------------------------- 1 | Other bases 2 | =========== 3 | 4 | Aside from the intermediate representation (IR), `sparse-ir` allows to work 5 | with any bases derived from :class:`sparse_ir.abstract.AbstractBasis`. 6 | 7 | In particular, we allow working we offer a variant of the Discrete Lehmann 8 | Representation: :class:`sparse_ir.dlr.DiscreteLehmannRepresentation`. 9 | 10 | .. autoclass:: sparse_ir.dlr.DiscreteLehmannRepresentation 11 | :members: 12 | 13 | Base classes 14 | ------------ 15 | 16 | .. autoclass:: sparse_ir.abstract.AbstractBasis 17 | :members: 18 | -------------------------------------------------------------------------------- /doc/porting.rst: -------------------------------------------------------------------------------- 1 | Porting 2 | ======= 3 | For porting codes from irbasis, version 2, to sparse_ir, you have two options: 4 | 5 | * using the ``sparse_ir.adapter`` module (easier) 6 | * porting to the new ``sparse_ir`` API (cleaner) 7 | 8 | In both cases, please note that ``sparse_ir`` now computes the basis on-the-fly 9 | rather than loading it from a file. This has some important consequences: 10 | 11 | * Singular values and basis functions may slightly differ from the old 12 | code, and may also slightly differ from platform to platform, but should 13 | be consistent within a few units in the last place (``2e-16``). 14 | 15 | * The sampling times and frequencies are chosen in an improved fashion 16 | and thus differ from the old code and may also differ slightly between 17 | versions and platforms. 18 | 19 | In transferring persistent data, please therefore either directly transfer the 20 | basis coefficients or store the sampling points together with the data. 21 | 22 | 23 | Adapter module 24 | -------------- 25 | For ease of porting, we provide the ``sparse_ir.adapter`` module. This module 26 | is API-compatible with the ``irbasis`` package, version 2. This means you 27 | simply have to replace:: 28 | 29 | import irbasis 30 | 31 | with the following:: 32 | 33 | import sparse_ir.adapter as irbasis 34 | 35 | and everything should work as expected. 36 | 37 | .. autoclass:: sparse_ir.adapter.Basis 38 | :members: 39 | -------------------------------------------------------------------------------- /doc/sampling.rst: -------------------------------------------------------------------------------- 1 | Sparse sampling 2 | =============== 3 | 4 | Sparse sampling is a way to transition between the truncated IR representation 5 | of a propagator (useful for convergence analysis) and a sparse set of 6 | sampling points in imaginary time or Matsubara frequency. This is mediated 7 | by two classes: 8 | 9 | - :class:`sparse_ir.TauSampling`: 10 | sparse sampling in imaginary time, useful for, e.g., constructing Feynman 11 | diagrams with a spontaneous interation term. 12 | 13 | - :class:`sparse_ir.MatsubaraSampling`: 14 | sparse sampling in Matsubara frequencies, useful for, e.g., solving 15 | diagrammatic equations such as the Dyson equation. 16 | 17 | All sampling classes contain ``sampling_points``, which are the corresponding 18 | sampling points in time or frequency, and a method ``evaluate()``, which allows 19 | one to go from coefficients to sampling points, and a method ``fit()`` to go 20 | back:: 21 | 22 | ________________ ___________________ 23 | | | evaluate() | | 24 | | Basis |---------------->| Value on | 25 | | coefficients |<----------------| sampling_points | 26 | |________________| fit() |___________________| 27 | 28 | 29 | .. warning:: 30 | When storing data in sparse time/frequency, *always* store the sampling 31 | points together with the data. The exact location of the sampling points 32 | may be different from between platforms and/or between releases. 33 | 34 | 35 | Sparse sampling transformers 36 | ---------------------------- 37 | 38 | .. autoclass:: sparse_ir.TauSampling 39 | :members: tau, evaluate, fit 40 | 41 | .. autoclass:: sparse_ir.MatsubaraSampling 42 | :members: wn, evaluate, fit 43 | 44 | 45 | Base classes 46 | ------------- 47 | 48 | .. autoclass:: sparse_ir.sampling.AbstractSampling 49 | :members: 50 | 51 | .. autoclass:: sparse_ir.sampling.DecomposedMatrix 52 | :members: 53 | 54 | .. autoclass:: sparse_ir.sampling.SplitDecomposedMatrix 55 | :members: 56 | 57 | .. autoclass:: sparse_ir.sampling.ConditioningWarning 58 | :members: 59 | -------------------------------------------------------------------------------- /doc/sve.rst: -------------------------------------------------------------------------------- 1 | Singular value expansion 2 | ======================== 3 | 4 | .. autofunction:: sparse_ir.compute_sve 5 | 6 | Expansion module 7 | ---------------- 8 | 9 | .. automodule:: sparse_ir.sve 10 | :members: 11 | 12 | Singular value decomposition 13 | ---------------------------- 14 | 15 | .. automodule:: sparse_ir.svd 16 | :members: 17 | -------------------------------------------------------------------------------- /doc/tutorial.rst: -------------------------------------------------------------------------------- 1 | Getting started 2 | =============== 3 | 4 | A comprehensive introduction is available at 5 | ``_. 6 | 7 | It includes a set of self-contained notebooks for a variety of 8 | many-body applications using sparse-ir: 9 | 10 | - second-order perturbation theory (SOPT) 11 | - fluctuation exchange (FLEX) 12 | - GW and GF(2) method 13 | - Eliashberg equation 14 | - numerical analytical continuation 15 | - etc. 16 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import io, os.path, re 4 | from setuptools import setup, find_packages 5 | 6 | 7 | def readfile(*parts): 8 | """Return contents of file with path relative to script directory""" 9 | herepath = os.path.abspath(os.path.dirname(__file__)) 10 | fullpath = os.path.join(herepath, *parts) 11 | with io.open(fullpath, 'r') as f: 12 | return f.read() 13 | 14 | 15 | def extract_varvals(*varnames): 16 | """Extract value of __version__ variable by parsing python script""" 17 | initfile = readfile('src', 'sparse_ir', '__init__.py') 18 | for varname in varnames: 19 | var_re = re.compile(rf"(?m)^{varname}\s*=\s*['\"]([^'\"]*)['\"]") 20 | match = var_re.search(initfile) 21 | yield match.group(1) 22 | 23 | 24 | REPO_URL = "https://github.com/SpM-lab/sparse-ir" 25 | VERSION, = extract_varvals('__version__') 26 | LONG_DESCRIPTION = readfile('README.rst') 27 | 28 | setup( 29 | name='sparse-ir', 30 | version=VERSION, 31 | 32 | description= 33 | 'intermediate representation (IR) basis for electronic propagator', 34 | long_description=LONG_DESCRIPTION, 35 | long_description_content_type='text/x-rst', 36 | keywords=' '.join([ 37 | 'irbasis', 'many-body', 'propagator', 'svd' 38 | ]), 39 | classifiers=[ 40 | 'Development Status :: 5 - Production/Stable', 41 | 'Intended Audience :: Developers', 42 | 'Intended Audience :: Science/Research', 43 | 'Topic :: Scientific/Engineering :: Physics', 44 | 'License :: OSI Approved :: MIT License', 45 | 'Programming Language :: Python :: 3', 46 | ], 47 | 48 | url=REPO_URL, 49 | author=[ 50 | 'Markus Wallerberger', 51 | 'Hiroshi Shinaoka', 52 | 'Kazuyoshi Yoshimi', 53 | 'Junya Otsuki', 54 | 'Chikano Naoya' 55 | ], 56 | author_email='markus.wallerberger@tuwien.ac.at', 57 | 58 | python_requires='>=3', 59 | install_requires=[ 60 | 'numpy', 61 | 'scipy', 62 | 'setuptools' 63 | ], 64 | extras_require={ 65 | 'test': ['pytest', 'irbasis', 'xprec'], 66 | 'doc': ['sphinx>=2.1', 'sphinx_rtd_theme'], 67 | 'xprec': [f'xprec>=1.0'], 68 | }, 69 | 70 | package_dir={'': 'src'}, 71 | packages=find_packages(where='src'), 72 | ) 73 | -------------------------------------------------------------------------------- /src/sparse_ir/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Intermediate representation (IR) for many-body propagators 4 | ========================================================== 5 | 6 | This library provides routines for constructing and working with the 7 | intermediate representation of correlation functions. It provides: 8 | 9 | - on-the-fly computation of basis functions for arbitrary cutoff Λ 10 | - basis functions and singular values are accurate to full precision 11 | - routines for sparse sampling 12 | """ 13 | __copyright__ = "2020-2024 Markus Wallerberger, Hiroshi Shinaoka, and others" 14 | __license__ = "MIT" 15 | __version__ = "1.1.2" 16 | 17 | from .kernel import RegularizedBoseKernel, LogisticKernel 18 | from .sve import compute as compute_sve, SVEResult 19 | from .basis import FiniteTempBasis, finite_temp_bases 20 | from .basis_set import FiniteTempBasisSet 21 | from .sampling import TauSampling, MatsubaraSampling 22 | -------------------------------------------------------------------------------- /src/sparse_ir/_gauss.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import warnings 4 | import numpy as np 5 | 6 | import scipy.linalg as sp_linalg 7 | import numpy.polynomial.legendre as np_legendre 8 | 9 | 10 | class Rule: 11 | """Quadrature rule. 12 | 13 | Approximation of an integral by a weighted sum over discrete points: 14 | 15 | ∫ f(x) * omega(x) * dx ~ sum(f(xi) * wi for (xi, wi) in zip(x, w)) 16 | 17 | where we generally have superexponential convergence for smooth ``f(x)`` 18 | with the number of quadrature points. 19 | """ 20 | def __init__(self, x, w, x_forward=None, x_backward=None, a=-1, b=1): 21 | x = np.asarray(x) 22 | if x_forward is None: 23 | x_forward = x - a 24 | if x_backward is None: 25 | x_backward = b - x 26 | 27 | self.x = x 28 | self.w = np.asarray(w) 29 | self.x_forward = np.asarray(x_forward) 30 | self.x_backward = np.asarray(x_backward) 31 | self.a = a 32 | self.b = b 33 | 34 | def reseat(self, a, b): 35 | """Reseat current quadrature rule to new domain""" 36 | scaling = (b - a) / (self.b - self.a) 37 | x = scaling * (self.x - (self.b + self.a)/2) + (b + a)/2 38 | w = self.w * scaling 39 | x_forward = self.x_forward * scaling 40 | x_backward = self.x_backward * scaling 41 | return Rule(x, w, x_forward, x_backward, a, b) 42 | 43 | def scale(self, factor): 44 | """Scale weights by factor""" 45 | return Rule(self.x, self.w * factor, self.x_forward, self.x_backward, 46 | self.a, self.b) 47 | 48 | def piecewise(self, edges): 49 | """Piecewise quadrature with the same quadrature rule, but scaled""" 50 | edges = np.asarray(edges) 51 | start = edges[:-1] 52 | stop = edges[1:] 53 | if not (stop > start).all(): 54 | raise ValueError("segments ends must be ordered ascendingly") 55 | 56 | return self.join(*(self.reseat(start_i, stop_i) 57 | for (start_i, stop_i) in zip(start, stop))) 58 | 59 | def astype(self, dtype): 60 | dtype = np.dtype(dtype) 61 | return Rule(self.x.astype(dtype), self.w.astype(dtype), 62 | self.x_forward.astype(dtype), self.x_backward.astype(dtype), 63 | dtype.type(self.a), dtype.type(self.b)) 64 | 65 | @staticmethod 66 | def join(*gauss_list): 67 | """Join multiple Gauss quadratures together""" 68 | if not gauss_list: 69 | return Rule((), ()) 70 | 71 | a = gauss_list[0].a 72 | b = gauss_list[-1].b 73 | prev_b = a 74 | parts = [] 75 | 76 | for curr in gauss_list: 77 | if curr.a != prev_b: 78 | raise ValueError("Gauss rules must be ascending") 79 | prev_b = curr.b 80 | x_forward = curr.x_forward + (curr.a - a) 81 | x_backward = curr.x_backward + (b - curr.b) 82 | parts.append((curr.x, curr.w, x_forward, x_backward)) 83 | 84 | x, w, x_forward, x_backward = map(np.hstack, zip(*parts)) 85 | return Rule(x, w, x_forward, x_backward, a, b) 86 | 87 | 88 | def legendre(n, dtype=float): 89 | """Gauss-Legendre quadrature""" 90 | return rule_from_recurrence(*_legendre_recurrence(n, dtype)) 91 | 92 | 93 | def legendre_collocation(rule, n=None): 94 | """Generate collocation matrix from Gauss-Legendre rule""" 95 | if n is None: 96 | n = rule.x.size 97 | 98 | res = np_legendre.legvander(rule.x, n - 1).T.copy() 99 | res *= rule.w 100 | 101 | invnorm = np.arange(0.5, n + 0.5, dtype=rule.x.dtype) 102 | res *= invnorm[:,None] 103 | return res 104 | 105 | 106 | def rule_from_recurrence(alpha, beta, a, b): 107 | """Make new Gauss scheme based on recurrence coefficients. 108 | 109 | Given a set of polynomials ``P[n]`` defined by the following three-term 110 | recurrence relation:: 111 | 112 | P[0](x) == 1 113 | P[1](x) == x - alpha[0] 114 | P[n+1](x) == (x - alpha[n]) * P[n] - beta[n] * P[n-1] 115 | 116 | we construct both a set of quadrature points ``x`` and weights ``w`` for 117 | Gaussian quadrature. It is usually a good idea to work in extended 118 | precision for extra acccuracy in the quadrature rule. 119 | """ 120 | dtype = np.result_type(alpha, beta) 121 | 122 | # First approximation of roots by finding eigenvalues of tridiagonal system 123 | # corresponding to the recursion 124 | beta[0] = b - a 125 | beta_is_pos = beta >= 0 126 | if not beta_is_pos.all(): 127 | raise NotImplementedError("scipy solver cannot handle complex") 128 | 129 | sqrt_beta = np.sqrt(beta[1:]) 130 | x = sp_linalg.eigvalsh_tridiagonal(alpha, sqrt_beta) 131 | x = x.astype(dtype) 132 | 133 | # These roots are usually only accurate to 100 ulps or so, so we improve 134 | # on them using a few iterations of the Newton method. 135 | prevdiff = 1.0 136 | maxiter = 5 137 | for _ in range(maxiter): 138 | p, dp, _, _ = _polyvalderiv(x, alpha, beta) 139 | diff = p / dp 140 | x -= diff 141 | 142 | # check convergence without relying on ATOL 143 | currdiff = np.abs(diff).max() 144 | #print(currdiff) 145 | if not (2 * currdiff <= prevdiff): 146 | break 147 | prevdiff = currdiff 148 | else: 149 | warnings.warn("Newton iteration did not converge, error = {.2g}" 150 | .format(currdiff)) 151 | 152 | # Now we know that the weights are proportional to the following: 153 | _, dp1, p0, _ = _polyvalderiv(x, alpha, beta) 154 | with np.errstate(over='ignore'): 155 | w = 1 / (dp1 * p0) 156 | w *= beta[0] / w.sum(initial=dtype.type(0)) 157 | return Rule(x, w, x - a, b - x, a, b) 158 | 159 | 160 | def _polyvalderiv(x, alpha, beta): 161 | """Return value and derivative of polynomial. 162 | 163 | Given a set of polynomials ``P[n]`` defined by a three-term recurrence, 164 | we evaluate both value and derviative for the highest polynomial and 165 | the second highest one. 166 | """ 167 | n = len(alpha) 168 | p0 = np.ones_like(x) 169 | p1 = x - alpha[0] * p0 170 | dp0 = np.zeros_like(x) 171 | dp1 = p0 172 | for k in range(1, n): 173 | x_minus_alpha = x - alpha[k] 174 | p2 = x_minus_alpha * p1 - beta[k] * p0 175 | dp2 = p1 + x_minus_alpha * dp1 - beta[k] * dp0 176 | p0 = p1 177 | p1 = p2 178 | dp0 = dp1 179 | dp1 = dp2 180 | 181 | return p1, dp1, p0, dp0 182 | 183 | 184 | def _legendre_recurrence(n, dtype=float): 185 | """Returns the alpha, beta for Gauss-Legendre integration""" 186 | # The Legendre polynomials are defined by the following recurrence: 187 | # 188 | # (n + 1) * P[n+1](x) == (2 * n + 1) * x * P[n](x) - n * P[n-1](x) 189 | # 190 | # To normalize this, we realize that the prefactor of the highest power 191 | # of P[n] is (2n -1)!! / n!, which we divide by to obtain the "scaled" 192 | # beta values. 193 | dtype = np.dtype(dtype) 194 | k = np.arange(n, dtype=dtype) 195 | ksq = k**2 196 | alpha = np.zeros_like(k) 197 | beta = ksq / (4 * ksq - 1) 198 | beta[0] = 2 199 | one = dtype.type(1) 200 | return alpha, beta, -one, one 201 | 202 | 203 | class NestedRule(Rule): 204 | """Nested Gauss quadrature rule.""" 205 | def __init__(self, x, w, v, x_forward=None, x_backward=None, a=-1, b=1): 206 | super().__init__(x, w, x_forward, x_backward, a, b) 207 | self.v = np.asarray(v) 208 | self.vsel = slice(1, None, 2) 209 | 210 | def reseat(self, a, b): 211 | """Reseat current quadrature rule to new domain""" 212 | res = super().reseat(a, b) 213 | new_v = (b - a) / (self.b - self.a) * self.v 214 | return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward, 215 | res.a, res.b) 216 | 217 | def scale(self, factor): 218 | """Scale weights by factor""" 219 | res = super().scale(factor) 220 | new_v = factor * self.v 221 | return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward, 222 | res.a, res.b) 223 | 224 | def astype(self, dtype): 225 | dtype = np.dtype(dtype) 226 | res = super().astype(dtype) 227 | new_v = self.v.astype(dtype) 228 | return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward, 229 | res.a, res.b) 230 | 231 | 232 | def kronrod_31_15(): 233 | x = (-0.99800229869339710, -0.98799251802048540, -0.96773907567913910, 234 | -0.93727339240070600, -0.89726453234408190, -0.84820658341042720, 235 | -0.79041850144246600, -0.72441773136017010, -0.65099674129741700, 236 | -0.57097217260853880, -0.48508186364023970, -0.39415134707756340, 237 | -0.29918000715316884, -0.20119409399743451, -0.10114206691871750, 238 | +0.00000000000000000, +0.10114206691871750, +0.20119409399743451, 239 | +0.29918000715316884, +0.39415134707756340, +0.48508186364023970, 240 | +0.57097217260853880, +0.65099674129741700, +0.72441773136017010, 241 | +0.79041850144246600, +0.84820658341042720, +0.89726453234408190, 242 | +0.93727339240070600, +0.96773907567913910, +0.98799251802048540, 243 | +0.99800229869339710) 244 | w = (0.005377479872923349, 0.015007947329316122, 0.025460847326715320, 245 | 0.035346360791375850, 0.044589751324764880, 0.053481524690928090, 246 | 0.062009567800670640, 0.069854121318728260, 0.076849680757720380, 247 | 0.083080502823133020, 0.088564443056211760, 0.093126598170825320, 248 | 0.096642726983623680, 0.099173598721791960, 0.100769845523875590, 249 | 0.101330007014791540, 0.100769845523875590, 0.099173598721791960, 250 | 0.096642726983623680, 0.093126598170825320, 0.088564443056211760, 251 | 0.083080502823133020, 0.076849680757720380, 0.069854121318728260, 252 | 0.062009567800670640, 0.053481524690928090, 0.044589751324764880, 253 | 0.035346360791375850, 0.025460847326715320, 0.015007947329316122, 254 | 0.005377479872923349) 255 | v = (0.03075324199611727, 0.07036604748810812, 0.10715922046717194, 256 | 0.13957067792615432, 0.16626920581699392, 0.18616100001556220, 257 | 0.19843148532711158, 0.20257824192556130, 0.19843148532711158, 258 | 0.18616100001556220, 0.16626920581699392, 0.13957067792615432, 259 | 0.10715922046717194, 0.07036604748810812, 0.03075324199611727) 260 | return NestedRule(np.array(x), np.array(w), np.array(v)) 261 | -------------------------------------------------------------------------------- /src/sparse_ir/_roots.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | """ 4 | Auxiliary module for root finding routines. 5 | """ 6 | import numpy as np 7 | 8 | 9 | def find_all(f, xgrid, type='continuous'): 10 | """Find all roots of function between gridpoints""" 11 | xgrid = np.asarray(xgrid) 12 | if xgrid.ndim != 1: 13 | raise ValueError("grid must be a one-dimensional array") 14 | 15 | # First, extract roots that lie directly on the grid points 16 | fx = f(xgrid) 17 | hit = fx == 0 18 | x_hit = xgrid[hit] 19 | 20 | # Next, find out where the sign changes (sign bit flips) remove the 21 | # previously found points from consideration (we need to remove both 22 | # directions for transitions + -> - and - -> +) 23 | sign_change = np.signbit(fx[:-1]) != np.signbit(fx[1:]) 24 | sign_change &= ~hit[:-1] & ~hit[1:] 25 | if not sign_change.any(): 26 | return x_hit 27 | 28 | # sign_change[i] being set means that the sign changes from xgrid[i] to 29 | # xgrid[i+1]. This means a corresponds to those xgrid[i] and b to those 30 | # xgrid[i+1] where sign_change[i] is set. 31 | where_a = np.hstack((sign_change, False)) 32 | where_b = np.hstack((False, sign_change)) 33 | a = xgrid[where_a] 34 | b = xgrid[where_b] 35 | fa = fx[where_a] 36 | fb = fx[where_b] 37 | 38 | # Depending on whether we have a discrete or continuous function, do 39 | # this. 40 | if type == 'continuous': 41 | xeps = np.finfo(xgrid.dtype).eps * np.abs(xgrid).max() 42 | x_bisect = _bisect_cont(f, a, b, fa, fb, xeps) 43 | elif type == 'discrete': 44 | x_bisect = _bisect_discr(f, a, b, fa, fb) 45 | else: 46 | raise ValueError("invalid type") 47 | return np.sort(np.hstack([x_hit, x_bisect])) 48 | 49 | 50 | def _bisect_cont(f, a, b, fa, fb, xeps): 51 | """Bisect roots already found""" 52 | while True: 53 | mid = 0.5 * (a + b) 54 | fmid = f(mid) 55 | towards_a = np.signbit(fa) != np.signbit(fmid) 56 | a = np.where(towards_a, a, mid) 57 | fa = np.where(towards_a, fa, fmid) 58 | b = np.where(towards_a, mid, b) 59 | fb = np.where(towards_a, fmid, fb) 60 | found = b - a < xeps 61 | if found.any(): 62 | break 63 | 64 | roots = mid[found] 65 | if found.all(): 66 | return roots 67 | more = _bisect_cont(f, a[~found], b[~found], fa[~found], fb[~found], xeps) 68 | return np.hstack([roots, more]) 69 | 70 | 71 | def _bisect_discr(f, a, b, fa, fb): 72 | """Bisect roots already found""" 73 | while True: 74 | mid = (a + b) // 2 75 | found = a == mid 76 | if found.any(): 77 | break 78 | 79 | fmid = f(mid) 80 | towards_a = np.signbit(fa) != np.signbit(fmid) 81 | a = np.where(towards_a, a, mid) 82 | fa = np.where(towards_a, fa, fmid) 83 | b = np.where(towards_a, mid, b) 84 | fb = np.where(towards_a, fmid, fb) 85 | 86 | roots = mid[found] 87 | if found.all(): 88 | return roots 89 | more = _bisect_discr(f, a[~found], b[~found], fa[~found], fb[~found]) 90 | return np.hstack([roots, more]) 91 | 92 | 93 | def discrete_extrema(f, xgrid): 94 | """Find extrema of Bessel-like discrete function""" 95 | fx = f(xgrid) 96 | absfx = np.abs(fx) 97 | 98 | # Forward differences: where[i] now means that the secant changes sign 99 | # fx[i+1]. This means that the extremum is STRICTLY between x[i] and 100 | # x[i+2] 101 | gx = fx[1:] - fx[:-1] 102 | sgx = np.signbit(gx) 103 | where = sgx[:-1] != sgx[1:] 104 | where_a = np.hstack([where, False, False]) 105 | where_b = np.hstack([False, False, where]) 106 | 107 | a = xgrid[where_a] 108 | b = xgrid[where_b] 109 | absf_a = absfx[where_a] 110 | absf_b = absfx[where_b] 111 | res = [_bisect_discr_extremum(f, *args) 112 | for args in zip(a, b, absf_a, absf_b)] 113 | 114 | # We consider the outer point to be extremua if there is a decrease 115 | # in magnitude or a sign change inwards 116 | sfx = np.signbit(fx) 117 | if absfx[0] > absfx[1] or sfx[0] != sfx[1]: 118 | res.insert(0, xgrid[0]) 119 | if absfx[-1] > absfx[-2] or sfx[-1] != sfx[-2]: 120 | res.append(xgrid[-1]) 121 | 122 | return np.array(res) 123 | 124 | 125 | def _bisect_discr_extremum(f, a, b, absf_a, absf_b): 126 | """Bisect extremum of f on the set {a+1, ..., b-1}""" 127 | d = b - a 128 | if d <= 1: 129 | return a if absf_a > absf_b else b 130 | if d == 2: 131 | return a + 1 132 | 133 | m = (a + b) // 2 134 | n = m + 1 135 | absf_m = np.abs(f(m)) 136 | absf_n = np.abs(f(n)) 137 | if absf_m > absf_n: 138 | return _bisect_discr_extremum(f, a, n, absf_a, absf_n) 139 | else: 140 | return _bisect_discr_extremum(f, m, b, absf_m, absf_b) 141 | -------------------------------------------------------------------------------- /src/sparse_ir/_util.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import functools 4 | import numpy as np 5 | 6 | 7 | def ravel_argument(last_dim=False): 8 | """Wrap function operating on 1-D numpy array to allow arbitrary shapes. 9 | 10 | This decorator allows to write functions which only need to operate over 11 | one-dimensional (ravelled) arrays. This often simplifies the "shape logic" 12 | of the computation. 13 | """ 14 | return lambda fn: RavelArgumentDecorator(fn, last_dim) 15 | 16 | 17 | class RavelArgumentDecorator(object): 18 | def __init__(self, inner, last_dim=False): 19 | self.instance = None 20 | self.inner = inner 21 | self.last_dim = last_dim 22 | functools.update_wrapper(self, inner) 23 | 24 | def __get__(self, instance, _owner=None): 25 | self.instance = instance 26 | return self 27 | 28 | def __call__(self, x): 29 | x = np.asarray(x) 30 | if self.instance is None: 31 | res = self.inner(x.ravel()) 32 | else: 33 | res = self.inner(self.instance, x.ravel()) 34 | if self.last_dim: 35 | return res.reshape(res.shape[:-1] + x.shape) 36 | else: 37 | return res.reshape(x.shape + res.shape[1:]) 38 | 39 | 40 | def check_reduced_matsubara(n, zeta=None): 41 | """Checks that ``n`` is a reduced Matsubara frequency. 42 | 43 | Check that the argument is a reduced Matsubara frequency, which is an 44 | integer obtained by scaling the freqency `w[n]` as follows:: 45 | 46 | beta / np.pi * w[n] == 2 * n + zeta 47 | 48 | Note that this means that instead of a fermionic frequency (``zeta == 1``), 49 | we expect an odd integer, while for a bosonic frequency (``zeta == 0``), 50 | we expect an even one. If ``zeta`` is omitted, any one is fine. 51 | """ 52 | n = np.asarray(n) 53 | if not np.issubdtype(n.dtype, np.integer): 54 | nfloat = n 55 | n = nfloat.astype(int) 56 | if not (n == nfloat).all(): 57 | raise ValueError("reduced frequency n must be integer") 58 | if zeta is not None: 59 | if not (n & 1 == zeta).all(): 60 | raise ValueError("n have wrong parity") 61 | return n 62 | 63 | 64 | def check_range(x, xmin, xmax): 65 | """Checks each element is in range [xmin, xmax]""" 66 | x = np.asarray(x) 67 | if not (x >= xmin).all(): 68 | raise ValueError(f"Some x violate lower bound {xmin}") 69 | if not (x <= xmax).all(): 70 | raise ValueError(f"Some x violate upper bound {xmax}") 71 | return x 72 | 73 | 74 | def check_svd_result(svd_result, matrix_shape=None): 75 | """Checks that argument is a valid SVD triple (u, s, vH)""" 76 | u, s, vH = map(np.asarray, svd_result) 77 | m_u, k_u = u.shape 78 | k_s, = s.shape 79 | k_v, n_v = vH.shape 80 | if k_u != k_s or k_s != k_v: 81 | raise ValueError("shape mismatch between SVD elements:" 82 | f"({m_u}, {k_u}) x ({k_s}) x ({k_v}, {n_v})") 83 | if matrix_shape is not None: 84 | m, n = matrix_shape 85 | if m_u != m or n_v != n: 86 | raise ValueError(f"shape mismatch between SVD ({m_u}, {n_v}) " 87 | f"and matrix ({m}, {n})") 88 | return u, s, vH 89 | -------------------------------------------------------------------------------- /src/sparse_ir/abstract.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | 4 | class AbstractBasis: 5 | r"""Abstract base class for bases on the imaginary-time axis. 6 | 7 | This class stores a set of basis functions. We can then expand a two-point 8 | propagator `G(τ)`, where `τ` is imaginary time: 9 | 10 | .. math:: G(\tau) \approx \sum_{l=0}^{L-1} g_l U_l(\tau) 11 | 12 | where `U` is now the `l`-th basis function, stored in :py:attr:`u` and 13 | `g` denote the expansion coefficients. Similarly, the Fourier transform 14 | `Ĝ(n)`, where `n` is a reduced Matsubara frequency, can be expanded as 15 | follows: 16 | 17 | .. math:: \hat G(n) \approx \sum_{l=0}^{L-1} g_l \hat U_l(n) 18 | 19 | where `Û` is the Fourier transform of the `l`-th basis function, stored 20 | in :py:attr:`uhat`. 21 | 22 | Assuming that ``basis`` is an instance of some abstract basis, ``g`` 23 | is a vector of expansion coefficients, ``tau`` is some imaginary time and 24 | ``n`` some frequency, we can write this in the library as follows:: 25 | 26 | G_tau = basis.u(tau).T @ gl 27 | G_n = basis.uhat(n).T @ gl 28 | 29 | """ 30 | @property 31 | def u(self): 32 | r"""Basis functions on the imaginary time axis. 33 | 34 | Set of IR basis functions on the imaginary time (tau) axis, where tau 35 | is a real number between zero and :py:attr:`beta`. To get the ``l``-th 36 | basis function at imaginary time ``tau`` of some basis ``basis``, use:: 37 | 38 | ultau = basis.u[l](tau) # l-th basis function at time tau 39 | 40 | Note that ``u`` supports vectorization both over ``l`` and ``tau``. 41 | In particular, omitting the subscript yields a vector with all basis 42 | functions, evaluated at that position:: 43 | 44 | basis.u(tau) == [basis.u[l](tau) for l in range(basis.size)] 45 | 46 | Similarly, supplying a vector of `tau` points yields a matrix ``A``, 47 | where ``A[l,n]`` corresponds to the ``l``-th basis function evaluated 48 | at ``tau[n]``:: 49 | 50 | tau = [0.5, 1.0] 51 | basis.u(tau) == \ 52 | [[basis.u[l](t) for t in tau] for l in range(basis.size)] 53 | """ 54 | raise NotImplementedError() 55 | 56 | @property 57 | def uhat(self): 58 | r"""Basis functions on the reduced Matsubara frequency (``wn``) axis. 59 | 60 | Set of IR basis functions reduced Matsubara frequency (wn) axis, where 61 | wn is an integer. These are related to :py:attr:`u` by the following 62 | Fourier transform: 63 | 64 | .. math:: 65 | 66 | \hat u(n) = \int_0^\beta d\tau \exp(i\pi n \tau/\beta) u(\tau) 67 | 68 | To get the ``l``-th basis function at some reduced frequency ``wn`` of 69 | some basis ``basis``, use:: 70 | 71 | uln = basis.uhat[l](wn) # l-th basis function at freq wn 72 | 73 | ``uhat`` supports vectorization both over ``l`` and ``wn``. 74 | In particular, omitting the subscript yields a vector with all basis 75 | functions, evaluated at that position:: 76 | 77 | basis.uhat(wn) == [basis.uhat[l](wn) for l in range(basis.size)] 78 | 79 | Similarly, supplying a vector of `wn` points yields a matrix ``A``, 80 | where ``A[l,n]`` corresponds to the ``l``-th basis function evaluated 81 | at ``wn[n]``:: 82 | 83 | wn = [1, 3] 84 | basis.uhat(wn) == \\ 85 | [[basis.uhat[l](wi) for wi in wn] for l in range(basis.size)] 86 | 87 | Note: 88 | Instead of the value of the Matsubara frequency, these functions 89 | expect integers corresponding to the prefactor of pi over beta. 90 | For example, the first few positive fermionic frequencies would 91 | be specified as ``[1, 3, 5, 7]``, and the first bosonic frequencies 92 | are ``[0, 2, 4, 6]``. This is also distinct to an index! 93 | """ 94 | raise NotImplementedError() 95 | 96 | @property 97 | def statistics(self): 98 | """Quantum statistic (`"F"` for fermionic, `"B"` for bosonic)""" 99 | raise NotImplementedError() 100 | 101 | def __getitem__(self, index): 102 | """Return basis functions/singular values for given index/indices. 103 | 104 | This can be used to truncate the basis to the n most significant 105 | singular values: `basis[:3]`. 106 | """ 107 | raise NotImplementedError() 108 | 109 | @property 110 | def shape(self): 111 | """Shape of the basis function set""" 112 | raise NotImplementedError() 113 | 114 | @property 115 | def size(self): 116 | """Number of basis functions / singular values""" 117 | raise NotImplementedError() 118 | 119 | @property 120 | def significance(self): 121 | """Significances of the basis functions 122 | 123 | Vector of significance values, one for each basis function. Each 124 | value is a number between 0 and 1 which is a a-priori bound on the 125 | (relative) error made by discarding the associated coefficient. 126 | """ 127 | return NotImplementedError() 128 | 129 | @property 130 | def accuracy(self): 131 | """Accuracy of the basis. 132 | 133 | Upper bound to the relative error of reprensenting a propagator with 134 | the given number of basis functions (number between 0 and 1). 135 | """ 136 | return self.significance[-1] 137 | 138 | @property 139 | def lambda_(self): 140 | """Basis cutoff parameter, `Λ == β * wmax`, or None if not present""" 141 | raise NotImplementedError() 142 | 143 | @property 144 | def beta(self): 145 | """Inverse temperature""" 146 | raise NotImplementedError() 147 | 148 | @property 149 | def wmax(self): 150 | """Real frequency cutoff or `None` if not present""" 151 | raise NotImplementedError() 152 | 153 | def default_tau_sampling_points(self, *, npoints=None): 154 | """Default sampling points on the imaginary time axis 155 | 156 | Arguments: 157 | npoints (int): 158 | Minimum number of sampling points to return. 159 | 160 | .. versionadded: 1.1 161 | """ 162 | raise NotImplementedError() 163 | 164 | def default_matsubara_sampling_points(self, *, npoints=None, 165 | positive_only=False): 166 | """Default sampling points on the imaginary frequency axis 167 | 168 | Arguments: 169 | npoints (int): 170 | Minimum number of sampling points to return. 171 | 172 | .. versionadded: 1.1 173 | positive_only (bool): 174 | Only return non-negative frequencies. This is useful if the 175 | object to be fitted is symmetric in Matsubura frequency, 176 | ``ghat(w) == ghat(-w).conj()``, or, equivalently, real in 177 | imaginary time. 178 | """ 179 | raise NotImplementedError() 180 | 181 | @property 182 | def is_well_conditioned(self): 183 | """Returns True if the sampling is expected to be well-conditioned""" 184 | return True 185 | -------------------------------------------------------------------------------- /src/sparse_ir/adapter.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | """ 4 | Drop-in replacement for the irbasis module. 5 | 6 | This is designed to be a drop-in replacement for ``irbasis``, where the basis 7 | can be computed on-the-fly for arbitrary values of Lambda. In other words, 8 | you should be able to replace ``irbasis`` with ``sparse_ir.adapter`` and 9 | everything should hopefully still work. 10 | 11 | Note however that on-the-fly computation typically has lower accuracy unless 12 | xprec is available. Thus, by default we only populate the basis down to 13 | singular values of ~1e-9 and emit a warning. You can squelch the warning by 14 | setting `WARN_ACCURACY` to false. 15 | """ 16 | # Do not import additional public symbols into this namespace, always use 17 | # underscores - this module should look as much as possible like `irbasis`! 18 | import numpy as _np 19 | from warnings import warn as _warn 20 | 21 | from . import sve as _sve 22 | from . import poly as _poly 23 | from . import kernel as _kernel 24 | 25 | try: 26 | import xprec as _xprec 27 | except ImportError: 28 | ACCURACY = 1.0e-9 29 | WARN_ACCURACY = True 30 | else: 31 | ACCURACY = 1.0e-15 32 | WARN_ACCURACY = False 33 | 34 | 35 | def load(statistics, Lambda, h5file=None): 36 | if WARN_ACCURACY: 37 | _warn("xprec package is not found - expect degraded accuracy!\n" 38 | "To squelch this warning, set WARN_ACCURACY to False.") 39 | 40 | kernel_type = {"F": _kernel.LogisticKernel, 41 | "B": _kernel.RegularizedBoseKernel}[statistics] 42 | kernel = kernel_type(float(Lambda)) 43 | sve_result = _sve.compute(kernel) 44 | return Basis(statistics, Lambda, sve_result) 45 | 46 | 47 | class Basis: 48 | def __init__(self, statistics, Lambda, sve_result): 49 | u, s, v = sve_result.part() 50 | self._statistics = statistics 51 | self._Lambda = Lambda 52 | self._u = u 53 | self._s = s 54 | self._v = v 55 | 56 | conv_radius = 40 * Lambda 57 | even_odd = {'F': 'odd', 'B': 'even'}[statistics] 58 | self._uhat = _poly.PiecewiseLegendreFT(u, even_odd, conv_radius) 59 | 60 | @property 61 | def Lambda(self): 62 | """Dimensionless parameter of IR basis""" 63 | return self._Lambda 64 | 65 | @property 66 | def statistics(self): 67 | """Statistics, either "F" for fermions or "B" for bosons""" 68 | return self._statistics 69 | 70 | def dim(self): 71 | """Return dimension of basis""" 72 | return self._s.size 73 | 74 | def sl(self, l=None): 75 | """Return the singular value for the l-th basis function""" 76 | return _select(self._s, l) 77 | 78 | def ulx(self, l, x): 79 | """Return value of basis function for x""" 80 | return _selectvals(self._u, l, x) 81 | 82 | def d_ulx(self, l, x, order, section=None): 83 | """Return (higher-order) derivatives of u_l(x)""" 84 | return _selectvals(self._u.deriv(order), l, x) 85 | 86 | def vly(self, l, y): 87 | """Return value of basis function for y""" 88 | return _selectvals(self._v, l, y) 89 | 90 | def d_vly(self, l, y, order): 91 | """Return (higher-order) derivatives of v_l(y)""" 92 | return _selectvals(self._v.deriv(order), l, y) 93 | 94 | def compute_unl(self, n, whichl=None): 95 | """Compute transformation matrix from IR to Matsubara frequencies""" 96 | n = _np.ravel(n) 97 | nn = 2 * n + self._uhat.zeta 98 | return _np.squeeze(_select(self._uhat, whichl)(nn).T) 99 | 100 | def num_sections_x(self): 101 | "Number of sections of piecewise polynomial representation of u_l(x)" 102 | return self._u.nsegments 103 | 104 | @property 105 | def section_edges_x(self): 106 | """End points of sections for u_l(x)""" 107 | return self._u.knots 108 | 109 | def num_sections_y(self): 110 | "Number of sections of piecewise polynomial representation of v_l(y)" 111 | return self._v.nsegments 112 | 113 | @property 114 | def section_edges_y(self): 115 | """End points of sections for v_l(y)""" 116 | return self._v.knots 117 | 118 | def sampling_points_x(self, whichl): 119 | """Computes "optimal" sampling points in x space for given basis""" 120 | return sampling_points_x(self, whichl) 121 | 122 | def sampling_points_y(self, whichl): 123 | """Computes "optimal" sampling points in y space for given basis""" 124 | return sampling_points_y(self, whichl) 125 | 126 | def sampling_points_matsubara(self, whichl): 127 | """Computes sampling points in Matsubara domain for given basis""" 128 | return sampling_points_matsubara(self, whichl) 129 | 130 | 131 | 132 | def _select(p, l): 133 | return p if l is None else p[l] 134 | 135 | 136 | def _selectvals(p, l, x): 137 | return p(x) if l is None else p.value(l, x) 138 | 139 | 140 | """" CODE BELOW IS TAKEN FROM IRBAIS FOR COMPATIBLITITY""" 141 | def _find_roots(ulx): 142 | """Find all roots in (-1, 1) using double exponential mesh + bisection""" 143 | Nx = 10000 144 | eps = 1e-14 145 | tvec = _np.linspace(-3, 3, Nx) # 3 is a very safe option. 146 | xvec = _np.tanh(0.5 * _np.pi * _np.sinh(tvec)) 147 | 148 | zeros = [] 149 | for i in range(Nx - 1): 150 | if ulx(xvec[i]) * ulx(xvec[i + 1]) < 0: 151 | a = xvec[i + 1] 152 | b = xvec[i] 153 | u_a = ulx(a) 154 | while a - b > eps: 155 | half_point = 0.5 * (a + b) 156 | if ulx(half_point) * u_a > 0: 157 | a = half_point 158 | else: 159 | b = half_point 160 | zeros.append(0.5 * (a + b)) 161 | return _np.array(zeros) 162 | 163 | 164 | def _start_guesses(n=1000): 165 | "Construct points on a logarithmically extended linear interval" 166 | x1 = _np.arange(n) 167 | x2 = _np.array(_np.exp(_np.linspace(_np.log(n), _np.log(1E+8), n)), dtype=int) 168 | x = _np.unique(_np.hstack((x1, x2))) 169 | return x 170 | 171 | 172 | def _get_unl_real(basis_xy, x, l): 173 | "Return highest-order basis function on the Matsubara axis" 174 | unl = basis_xy.compute_unl(x, l) 175 | 176 | # Purely real functions 177 | zeta = 1 if basis_xy.statistics == 'F' else 0 178 | if l % 2 == zeta: 179 | assert _np.allclose(unl.imag, 0) 180 | return unl.real 181 | else: 182 | assert _np.allclose(unl.real, 0) 183 | return unl.imag 184 | 185 | 186 | def _sampling_points(fn): 187 | "Given a discretized 1D function, return the location of the extrema" 188 | fn = _np.asarray(fn) 189 | fn_abs = _np.abs(fn) 190 | sign_flip = fn[1:] * fn[:-1] < 0 191 | sign_flip_bounds = _np.hstack((0, sign_flip.nonzero()[0] + 1, fn.size)) 192 | points = [] 193 | for segment in map(slice, sign_flip_bounds[:-1], sign_flip_bounds[1:]): 194 | points.append(fn_abs[segment].argmax() + segment.start) 195 | return _np.asarray(points) 196 | 197 | 198 | def _full_interval(sample, stat): 199 | if stat == 'F': 200 | return _np.hstack((-sample[::-1]-1, sample)) 201 | else: 202 | # If we have a bosonic basis and even order (odd maximum), we have a 203 | # root at zero. We have to artifically add that zero back, otherwise 204 | # the condition number will blow up. 205 | if sample[0] == 0: 206 | sample = sample[1:] 207 | return _np.hstack((-sample[::-1], 0, sample)) 208 | 209 | 210 | def _get_mats_sampling(basis_xy, lmax=None): 211 | "Generate Matsubara sampling points from extrema of basis functions" 212 | if lmax is None: 213 | lmax = basis_xy.dim()-1 214 | 215 | x = _start_guesses() 216 | y = _get_unl_real(basis_xy, x, lmax) 217 | x_idx = _sampling_points(y) 218 | 219 | sample = x[x_idx] 220 | return _full_interval(sample, basis_xy.statistics) 221 | 222 | 223 | def sampling_points_x(b, whichl): 224 | """Computes "optimal" sampling points in x space for given basis""" 225 | xroots = _find_roots(b._u[whichl]) 226 | xroots_ex = _np.hstack((-1.0, xroots, 1.0)) 227 | return 0.5 * (xroots_ex[:-1] + xroots_ex[1:]) 228 | 229 | 230 | def sampling_points_y(b, whichl): 231 | """Computes "optimal" sampling points in y space for given basis""" 232 | 233 | roots_positive_half = 0.5 * _find_roots(lambda y: b.vly(whichl, (y + 1)/2)) + 0.5 234 | if whichl % 2 == 0: 235 | roots_ex = _np.sort( 236 | _np.hstack([-1, -roots_positive_half, roots_positive_half, 1])) 237 | else: 238 | roots_ex = _np.sort( 239 | _np.hstack([-1, -roots_positive_half, 0, roots_positive_half, 1])) 240 | return 0.5 * (roots_ex[:-1] + roots_ex[1:]) 241 | 242 | 243 | def sampling_points_matsubara(b, whichl): 244 | """ 245 | Computes "optimal" sampling points in Matsubara domain for given basis 246 | 247 | Parameters 248 | ---------- 249 | b : 250 | basis object 251 | whichl: int 252 | Index of reference basis function "l" 253 | 254 | Returns 255 | ------- 256 | sampling_points: 1D array of int 257 | sampling points in Matsubara domain 258 | 259 | """ 260 | stat = b.statistics 261 | 262 | assert stat == 'F' or stat == 'B' or stat == 'barB' 263 | 264 | if whichl > b.dim()-1: 265 | raise RuntimeError("Too large whichl") 266 | 267 | return _get_mats_sampling(b, whichl) 268 | -------------------------------------------------------------------------------- /src/sparse_ir/augment.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2021 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | 5 | from . import _util 6 | from . import abstract 7 | from . import basis 8 | 9 | 10 | class AugmentedBasis(abstract.AbstractBasis): 11 | """Augmented basis on the imaginary-time/frequency axis. 12 | 13 | Groups a set of additional functions, ``augmentations``, with a given 14 | ``basis``. The augmented functions then form the first basis 15 | functions, while the rest is provided by the regular basis, i.e.:: 16 | 17 | u[l](x) == augmentations[l](x) if l < naug else basis.u[l-naug](x), 18 | 19 | where ``naug = len(augmentations)`` is the number of added basis functions 20 | through augmentation. Similar expressions hold for Matsubara frequencies. 21 | 22 | Augmentation is useful in constructing bases for vertex-like quantities 23 | such as self-energies `[1]`_. It is also useful when constructing a 24 | two-point kernel that serves as a base for multi-point functions `[2]`_. 25 | 26 | Example: 27 | For constructing the vertex basis and the augmented basis, one can 28 | use:: 29 | 30 | import sparse_ir, sparse_ir.augment as aug 31 | basis = sparse_ir.FiniteTempBasis('B', beta=10, wmax=2.0) 32 | vertex_basis = aug.AugmentedBasis(basis, aug.MatsubaraConst) 33 | aug_basis = aug.AugmentedBasis(basis, aug.TauConst, aug.TauLinear) 34 | 35 | Warning: 36 | Bases augmented with `TauConst` and `TauLinear` tend to be poorly 37 | conditioned. Care must be taken while fitting and compactness should 38 | be enforced if possible to regularize the problem. 39 | 40 | While vertex bases, i.e., bases augmented with `MatsubaraConst`, stay 41 | reasonably well-conditioned, it is still good practice to treat the 42 | Hartree--Fock term separately rather than including it in the basis, 43 | if possible. 44 | 45 | See also: 46 | - :class:`MatsubaraConst` for vertex basis `[1]`_ 47 | - :class:`TauConst`, :class:`TauLinear` for multi-point `[2]`_ 48 | 49 | .. _[1]: https://doi.org/10.1103/PhysRevResearch.3.033168 50 | .. _[2]: https://doi.org/10.1103/PhysRevB.97.205111 51 | """ 52 | def __init__(self, basis, *augmentations): 53 | augmentations = tuple(_augmentation_factory(basis, *augmentations)) 54 | self._basis = basis 55 | self._augmentations = augmentations 56 | self._naug = len(augmentations) 57 | 58 | self._u = AugmentedTauFunction(self._basis.u, augmentations) 59 | self._uhat = AugmentedMatsubaraFunction( 60 | self._basis.uhat, [aug.hat for aug in augmentations]) 61 | 62 | @property 63 | def u(self): 64 | return self._u 65 | 66 | @property 67 | def uhat(self): 68 | return self._uhat 69 | 70 | @property 71 | def statistics(self): 72 | raise self._basis.statistics 73 | 74 | def __getitem__(self, index): 75 | stop = basis._slice_to_size(index) 76 | if stop <= self._naug: 77 | raise ValueError("Cannot truncate to only augmentation") 78 | return AugmentedBasis(self._basis[:stop - self._naug], 79 | *self._augmentations) 80 | 81 | @property 82 | def shape(self): 83 | return self.size, 84 | 85 | @property 86 | def size(self): 87 | return self._naug + self._basis.size 88 | 89 | @property 90 | def significance(self): 91 | return self._basis.significance 92 | 93 | @property 94 | def accuracy(self): 95 | return self._basis.accuracy 96 | 97 | @property 98 | def lambda_(self): 99 | return self._basis.lambda_ 100 | 101 | @property 102 | def beta(self): 103 | return self._basis.beta 104 | 105 | @property 106 | def wmax(self): 107 | return self._basis.wmax 108 | 109 | def default_tau_sampling_points(self, *, npoints=None): 110 | if npoints is None: 111 | npoints = self.size 112 | 113 | # Return the sampling points of the underlying basis, but since we 114 | # use the size of self, we add two further points. One then has to 115 | # hope that these give good sampling points. 116 | return self._basis.default_tau_sampling_points(npoints=npoints) 117 | 118 | def default_matsubara_sampling_points(self, *, npoints=None, 119 | positive_only=False): 120 | if npoints is None: 121 | npoints = self.size 122 | return self._basis.default_matsubara_sampling_points( 123 | npoints=npoints, positive_only=positive_only) 124 | 125 | @property 126 | def is_well_conditioned(self): 127 | wbasis = self._basis.is_well_conditioned 128 | waug = (len(self._augmentations) == 1 129 | and isinstance(self._augmentations[0], MatsubaraConst)) 130 | return wbasis and waug 131 | 132 | 133 | class _AugmentedFunction: 134 | def __init__(self, fbasis, faug): 135 | if fbasis.ndim != 1: 136 | raise ValueError("must have vector of functions as fbasis") 137 | self._fbasis = fbasis 138 | self._faug = faug 139 | self._naug = len(faug) 140 | 141 | @property 142 | def ndim(self): 143 | return 1 144 | 145 | @property 146 | def shape(self): 147 | return self.size, 148 | 149 | @property 150 | def size(self): 151 | return self._naug + self._fbasis.size 152 | 153 | def __call__(self, x): 154 | x = np.asarray(x) 155 | fbasis_x = self._fbasis(x) 156 | faug_x = [faug_l(x)[None] for faug_l in self._faug] 157 | f_x = np.concatenate(faug_x + [fbasis_x], axis=0) 158 | assert f_x.shape[1:] == x.shape 159 | return f_x 160 | 161 | def __getitem__(self, l): 162 | # TODO make this more general 163 | if isinstance(l, slice): 164 | stop = basis._slice_to_size(l) 165 | if stop <= self._naug: 166 | raise NotImplementedError("Don't truncate to only augmentation") 167 | return _AugmentedFunction(self._fbasis[:stop-self._naug], self._faug) 168 | else: 169 | l = int(l) 170 | if l < self._naug: 171 | return self._faug[l] 172 | else: 173 | return self._fbasis[l-self._naug] 174 | 175 | 176 | class AugmentedTauFunction(_AugmentedFunction): 177 | @property 178 | def xmin(self): 179 | return self._fbasis.xmin 180 | 181 | @property 182 | def xmax(self): 183 | return self._fbasis.xmin 184 | 185 | def deriv(self, n=1): 186 | """Get polynomial for the n'th derivative""" 187 | dbasis = self._fbasis.deriv(n) 188 | daug = [faug_l.deriv(n) for faug_l in self._faug] 189 | return AugmentedTauFunction(dbasis, *daug) 190 | 191 | 192 | class AugmentedMatsubaraFunction(_AugmentedFunction): 193 | @property 194 | def zeta(self): 195 | return self._fbasis.zeta 196 | 197 | 198 | class AbstractAugmentation: 199 | """Scalar function in imaginary time/frequency. 200 | 201 | This represents a single function in imaginary time and frequency, 202 | together with some auxiliary methods that make it suitable for augmenting 203 | a basis. 204 | 205 | See also: 206 | :class:`AugmentedBasis` 207 | """ 208 | @classmethod 209 | def create(cls, basis): 210 | """Factory method constructing an augmented term for a basis""" 211 | raise NotImplementedError() 212 | 213 | def __call__(self, tau): 214 | """Evaluate the function at imaginary time ``tau``""" 215 | raise NotImplementedError() 216 | 217 | def deriv(self, n): 218 | """Derivative of order ``n`` of the function""" 219 | raise NotImplementedError() 220 | 221 | def hat(self, n): 222 | """Evaluate the Fourier transform at reduced frequency ``n``""" 223 | raise NotImplementedError() 224 | 225 | 226 | class TauConst(AbstractAugmentation): 227 | """Constant in imaginary time/discrete delta in frequency""" 228 | @classmethod 229 | def create(cls, basis): 230 | _check_bosonic_statistics(basis.statistics) 231 | return cls(basis.beta) 232 | 233 | def __init__(self, beta): 234 | if beta <= 0: 235 | raise ValueError("temperature must be positive") 236 | self._beta = beta 237 | 238 | def __call__(self, tau): 239 | tau = _util.check_range(tau, 0, self._beta) 240 | return np.broadcast_to(1 / np.sqrt(self._beta), tau.shape) 241 | 242 | def deriv(self, n=1): 243 | if n == 0: 244 | return self 245 | else: 246 | return lambda tau: np.zeros_like(tau) 247 | 248 | def hat(self, n): 249 | n = _util.check_reduced_matsubara(n, zeta=0) 250 | return np.sqrt(self._beta) * (n == 0).astype(complex) 251 | 252 | 253 | class TauLinear(AbstractAugmentation): 254 | """Linear function in imaginary time, antisymmetric around beta/2""" 255 | @classmethod 256 | def create(cls, basis): 257 | _check_bosonic_statistics(basis.statistics) 258 | return cls(basis.beta) 259 | 260 | def __init__(self, beta): 261 | if beta <= 0: 262 | raise ValueError("temperature must be positive") 263 | self._beta = beta 264 | self._norm = np.sqrt(3/beta) 265 | 266 | def __call__(self, tau): 267 | tau = _util.check_range(tau, 0, self._beta) 268 | x = 2/self._beta * tau - 1 269 | return self._norm * x 270 | 271 | def deriv(self, n=1): 272 | if n == 0: 273 | return self 274 | elif n == 1: 275 | c = self._norm * 2/self._beta 276 | return lambda tau: np.full_like(tau, c) 277 | else: 278 | return lambda tau: np.zeros_like(tau) 279 | 280 | def hat(self, n): 281 | n = _util.check_reduced_matsubara(n, zeta=0) 282 | inv_w = np.pi/self._beta * n 283 | inv_w = np.reciprocal(inv_w, out=inv_w, where=n.astype(bool)) 284 | return self._norm * 2/1j * inv_w 285 | 286 | 287 | class MatsubaraConst(AbstractAugmentation): 288 | """Constant in Matsubara, undefined in imaginary time""" 289 | @classmethod 290 | def create(cls, basis): 291 | return cls(basis.beta) 292 | 293 | def __init__(self, beta): 294 | if beta <= 0: 295 | raise ValueError("temperature must be positive") 296 | self._beta = beta 297 | 298 | def __call__(self, tau): 299 | tau = _util.check_range(tau, 0, self._beta) 300 | return np.broadcast_to(np.nan, tau.shape) 301 | 302 | def deriv(self, n=1): 303 | return self 304 | 305 | def hat(self, n): 306 | n = _util.check_reduced_matsubara(n) 307 | return np.broadcast_to(1.0, n.shape) 308 | 309 | 310 | def _augmentation_factory(basis, *augs): 311 | for aug in augs: 312 | if isinstance(aug, AbstractAugmentation): 313 | yield aug 314 | else: 315 | yield aug.create(basis) 316 | 317 | 318 | def _check_bosonic_statistics(statistics): 319 | if statistics == 'B': 320 | return 321 | elif statistics == 'F': 322 | raise ValueError("term only allowed for bosonic basis") 323 | else: 324 | raise ValueError("invalid statistics") 325 | -------------------------------------------------------------------------------- /src/sparse_ir/basis.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | from typing import Tuple 4 | import numpy as np 5 | from warnings import warn 6 | 7 | from . import abstract 8 | from . import kernel as _kernel 9 | from . import poly 10 | from . import sve 11 | 12 | 13 | class FiniteTempBasis(abstract.AbstractBasis): 14 | r"""Intermediate representation (IR) basis for given temperature. 15 | 16 | For a continuation kernel from real frequencies, `ω` ∈ [-ωmax, ωmax], to 17 | imaginary time, `τ` ∈ [0, beta], this class stores the truncated singular 18 | value expansion or IR basis: 19 | 20 | .. math:: 21 | 22 | K(\tau, \omega) \approx \sum_{l=0}^{L-1} U_l(\tau) S_l V_l(\omega), 23 | 24 | where `U` are the IR basis functions on the imaginary-time axis, stored 25 | in :py:attr:`u`, `S` are the singular values, stored in :py:attr:`s`, 26 | and `V` are the IR basis functions on the real-frequency axis, stored 27 | in :py:attr:`V`. The IR basis functions in Matsubara frequency are 28 | stored in :py:attr:`uhat`. 29 | 30 | Example: 31 | The following example code assumes the spectral function is a single 32 | pole at ω = 2.5:: 33 | 34 | # Compute IR basis for fermions and β = 10, W <= 4.2 35 | import sparse_ir 36 | basis = sparse_ir.FiniteTempBasis(statistics='F', beta=10, wmax=4.2) 37 | 38 | # Assume spectrum is a single pole at ω = 2.5, compute G(iw) 39 | # on the first few Matsubara frequencies 40 | gl = basis.s * basis.v(2.5) 41 | giw = gl @ basis.uhat([1, 3, 5, 7]) 42 | """ 43 | def __init__(self, statistics, beta, wmax, eps=None, *, 44 | max_size=None, kernel=None, sve_result=None): 45 | if not (beta > 0): 46 | raise ValueError("inverse temperature beta must be positive") 47 | if not (wmax >= 0): 48 | raise ValueError("frequency cutoff must be non-negative") 49 | 50 | if eps is None and sve_result is None and not sve.HAVE_XPREC: 51 | warn("xprec package is not available:\n" 52 | "expect single precision (1.5e-8) only as both cutoff and\n" 53 | "accuracy of the basis functions") 54 | 55 | # Calculate basis functions from truncated singular value expansion 56 | self._kernel = _get_kernel(statistics, beta * wmax, kernel) 57 | if sve_result is None: 58 | sve_result = sve.compute(self._kernel, eps) 59 | 60 | self._sve_result = sve_result 61 | self._statistics = statistics 62 | self._beta = beta 63 | self._wmax = wmax 64 | 65 | u, s, v = sve_result.part(eps, max_size) 66 | if sve_result.s.size > s.size: 67 | self._accuracy = sve_result.s[s.size] / s[0] 68 | else: 69 | self._accuracy = s[-1] / s[0] 70 | 71 | # The polynomials are scaled to the new variables by transforming the 72 | # knots according to: tau = beta/2 * (x + 1), w = wmax * y. Scaling 73 | # the data is not necessary as the normalization is inferred. 74 | self._u = u.__class__(u.data, beta/2 * (u.knots + 1), beta/2 * u.dx, u.symm) 75 | self._v = v.__class__(v.data, wmax * v.knots, wmax * v.dx, v.symm) 76 | 77 | # The singular values are scaled to match the change of variables, with 78 | # the additional complexity that the kernel may have an additional 79 | # power of w. 80 | self._s = np.sqrt(beta/2 * wmax) * (wmax**(-self.kernel.ypower)) * s 81 | 82 | # HACK: as we don't yet support Fourier transforms on anything but the 83 | # unit interval, we need to scale the underlying data. 84 | uhat_base_full = poly.PiecewiseLegendrePoly( 85 | np.sqrt(beta) * sve_result.u.data, sve_result.u, 86 | symm=sve_result.u.symm) 87 | conv_radius = 40 * self.kernel.lambda_ 88 | even_odd = {'F': 'odd', 'B': 'even'}[statistics] 89 | self._uhat_full = poly.PiecewiseLegendreFT(uhat_base_full, even_odd, 90 | n_asymp=conv_radius) 91 | self._uhat = self._uhat_full[:s.size] 92 | 93 | def __getitem__(self, index): 94 | return FiniteTempBasis( 95 | self._statistics, self._beta, self._wmax, None, 96 | max_size=_slice_to_size(index), kernel=self._kernel, 97 | sve_result=self._sve_result) 98 | 99 | @property 100 | def statistics(self): return self._statistics 101 | 102 | @property 103 | def beta(self): return self._beta 104 | 105 | @property 106 | def wmax(self): return self._wmax 107 | 108 | @property 109 | def lambda_(self): return self._beta * self._wmax 110 | 111 | @property 112 | def shape(self): return self._s.shape 113 | 114 | @property 115 | def size(self): return self._s.size 116 | 117 | @property 118 | def u(self) -> poly.PiecewiseLegendrePoly: return self._u 119 | 120 | @property 121 | def uhat(self) -> poly.PiecewiseLegendreFT: return self._uhat 122 | 123 | @property 124 | def s(self) -> np.ndarray: 125 | """Vector of singular values of the continuation kernel""" 126 | return self._s 127 | 128 | @property 129 | def v(self) -> poly.PiecewiseLegendrePoly: 130 | r"""Basis functions on the real frequency axis. 131 | 132 | Set of IR basis functions on the real frequency (omega) axis, where 133 | omega is a real number of magnitude less than :py:attr:`wmax`. To get 134 | the ``l``-th basis function at real frequency ``omega`` of some basis 135 | ``basis``, use:: 136 | 137 | ulomega = basis.v[l](omega) # l-th basis function at freq. omega 138 | 139 | Note that ``v`` supports vectorization both over ``l`` and ``omega``. 140 | In particular, omitting the subscript yields a vector with all basis 141 | functions, evaluated at that position:: 142 | 143 | basis.v(omega) == [basis.v[l](omega) for l in range(basis.size)] 144 | 145 | Similarly, supplying a vector of `omega` points yields a matrix ``A``, 146 | where ``A[l,n]`` corresponds to the ``l``-th basis function evaluated 147 | at ``omega[n]``:: 148 | 149 | omega = [0.5, 1.0] 150 | basis.v(omega) == \ 151 | [[basis.v[l](t) for t in omega] for l in range(basis.size)] 152 | """ 153 | return self._v 154 | 155 | @property 156 | def significance(self): 157 | return self._s / self._s[0] 158 | 159 | @property 160 | def accuracy(self): 161 | return self._accuracy 162 | 163 | @property 164 | def kernel(self): 165 | """Kernel of which this is the singular value expansion""" 166 | return self._kernel 167 | 168 | @property 169 | def sve_result(self): 170 | return self._sve_result 171 | 172 | def default_tau_sampling_points(self, *, npoints=None): 173 | if npoints is None: 174 | npoints = self.size 175 | x = _default_sampling_points(self._sve_result.u, npoints) 176 | return self._beta/2 * (x + 1) 177 | 178 | def default_matsubara_sampling_points(self, *, npoints=None, 179 | positive_only=False): 180 | if npoints is None: 181 | npoints = self.size 182 | return _default_matsubara_sampling_points(self._uhat_full, npoints, 183 | positive_only=positive_only) 184 | 185 | def default_omega_sampling_points(self, *, npoints=None): 186 | """Return default sampling points in imaginary time. 187 | 188 | Arguments: 189 | npoints (int): 190 | Minimum number of sampling points to return. 191 | 192 | .. versionadded: 1.1 193 | """ 194 | if npoints is None: 195 | npoints = self.size 196 | y = _default_sampling_points(self._sve_result.v, npoints) 197 | return self._wmax * y 198 | 199 | def rescale(self, new_beta): 200 | """Return a basis for different temperature. 201 | 202 | Uses the same kernel with the same ``eps``, but a different 203 | temperature. Note that this implies a different UV cutoff ``wmax``, 204 | since ``lambda_ == beta * wmax`` stays constant. 205 | """ 206 | new_wmax = self._kernel.lambda_ / new_beta 207 | return FiniteTempBasis(self._statistics, new_beta, new_wmax, None, 208 | max_size=self.size, kernel=self._kernel, 209 | sve_result=self._sve_result) 210 | 211 | 212 | def finite_temp_bases( 213 | beta: float, wmax: float, eps: float = None, 214 | sve_result: tuple = None 215 | )-> Tuple[FiniteTempBasis, FiniteTempBasis]: 216 | """Construct FiniteTempBasis objects for fermion and bosons 217 | 218 | Construct FiniteTempBasis objects for fermion and bosons using 219 | the same LogisticKernel instance. 220 | """ 221 | if sve_result is None: 222 | sve_result = sve.compute(_kernel.LogisticKernel(beta*wmax), eps) 223 | basis_f = FiniteTempBasis("F", beta, wmax, eps, sve_result=sve_result) 224 | basis_b = FiniteTempBasis("B", beta, wmax, eps, sve_result=sve_result) 225 | return basis_f, basis_b 226 | 227 | 228 | def _default_sampling_points(u, L): 229 | if u.xmin != -1 or u.xmax != 1: 230 | raise ValueError("expecting unscaled functions here") 231 | 232 | if L < u.size: 233 | # For orthogonal polynomials (the high-T limit of IR), we know that the 234 | # ideal sampling points for a basis of size L are the roots of the L-th 235 | # polynomial. We empirically find that these stay good sampling points 236 | # for our kernels (probably because the kernels are totally positive). 237 | x0 = u[L].roots() 238 | else: 239 | # If we do not have enough polynomials in the basis, we approximate the 240 | # roots of the L'th polynomial by the extrema of the (L-1)'st basis 241 | # function, which is sensible due to the strong interleaving property 242 | # of these functions' roots. 243 | maxima = u[-1].deriv().roots() 244 | 245 | # Putting the sampling points right at [0, beta], which would be the 246 | # local extrema, is slightly worse conditioned than putting it in the 247 | # middel. This can be understood by the fact that the roots never 248 | # occur right at the border. 249 | left = .5 * (maxima[:1] + u.xmin) 250 | right = .5 * (maxima[-1:] + u.xmax) 251 | x0 = np.concatenate([left, maxima, right]) 252 | 253 | if x0.size != L: 254 | warn(f"Requesting {L} sampling points for corresponding basis size,\n" 255 | f"but {x0.size} were returned. This may indiciate a problem " 256 | f"with precision.", UserWarning, 3) 257 | return x0 258 | 259 | 260 | def _default_matsubara_sampling_points(uhat, L, *, fence=False, positive_only=False): 261 | l_requested = L 262 | 263 | # The number of sign changes is always odd for bosonic basis (freq=='even') 264 | # and even for fermionic basis (freq='odd'). So in order to get at least 265 | # as many sign changes as basis functions. 266 | if uhat.freq == 'odd' and l_requested % 2 == 1: 267 | l_requested += 1 268 | elif uhat.freq == 'even' and l_requested % 2 == 0: 269 | l_requested += 1 270 | 271 | if l_requested < uhat.size: 272 | # As with the zeros, the sign changes provide excellent sampling points 273 | wn = uhat[l_requested].sign_changes(positive_only=positive_only) 274 | else: 275 | # As a fallback, use the (discrete) extrema of the corresponding 276 | # highest-order basis function in Matsubara. This turns out to be okay. 277 | polyhat = uhat[-1] 278 | wn = polyhat.extrema(positive_only=positive_only) 279 | 280 | # For bosonic bases, we must explicitly include the zero frequency, 281 | # otherwise the condition number blows up. 282 | if wn[0] % 2 == 0: 283 | wn = np.unique(np.hstack((0, wn))) 284 | 285 | expected_size = l_requested 286 | if positive_only: 287 | expected_size = (expected_size + 1) // 2 288 | 289 | if wn.size != expected_size: 290 | warn(f"Requesting {expected_size} {uhat.freq} sampling frequencies\n" 291 | f"for basis size L={L}, but {wn.size} were returned. This may\n" 292 | f"indiciate a problem with precision.", UserWarning, 3) 293 | if fence: 294 | wn = _fence_matsubara_sampling(wn, positive_only) 295 | return wn 296 | 297 | 298 | def _fence_matsubara_sampling(wn, positive_only): 299 | # While the condition number for sparse sampling in tau saturates at a 300 | # modest level, the conditioning in Matsubara steadily deteriorates due 301 | # to the fact that we are not free to set sampling points continuously. 302 | # At double precision, tau sampling is better conditioned than iwn 303 | # by a factor of ~4 (still OK). To battle this, we fence the largest 304 | # frequency with two carefully chosen oversampling points, which brings 305 | # the two sampling problems within a factor of 2. 306 | wn_outer = wn[-1:] if positive_only else wn[[0, -1]] 307 | wn_diff = 2 * np.round(0.025 * wn_outer).astype(int) 308 | if wn.size >= 20: 309 | wn = np.hstack([wn, wn_outer - np.sign(wn_outer) * wn_diff]) 310 | if wn.size >= 42: 311 | wn = np.hstack([wn, wn_outer + np.sign(wn_outer) * wn_diff]) 312 | return np.unique(wn) 313 | 314 | 315 | def _get_kernel(statistics, lambda_, kernel): 316 | if statistics not in 'BF': 317 | raise ValueError("statistics must either be 'B' (for bosonic basis) " 318 | "or 'F' (for fermionic basis)") 319 | if kernel is None: 320 | kernel = _kernel.LogisticKernel(lambda_) 321 | else: 322 | try: 323 | lambda_kernel = kernel.lambda_ 324 | except AttributeError: 325 | pass 326 | else: 327 | if not np.allclose(lambda_kernel, lambda_, atol=0, rtol=4e-16): 328 | raise ValueError("lambda of kernel and basis mismatch") 329 | return kernel 330 | 331 | 332 | def _slice_to_size(index): 333 | if not isinstance(index, slice): 334 | raise ValueError("argument must be a slice (`n:m`)") 335 | if index.start is not None and index.start != 0: 336 | raise ValueError("slice must start at zero") 337 | if index.step is not None and index.step != 1: 338 | raise ValueError("slice must step in ones") 339 | return index.stop 340 | -------------------------------------------------------------------------------- /src/sparse_ir/basis_set.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | from . import basis 4 | from . import sampling 5 | 6 | 7 | class FiniteTempBasisSet: 8 | """Class for holding IR bases and sparse-sampling objects. 9 | 10 | An object of this class holds IR bases for fermions and bosons 11 | and associated sparse-sampling objects. 12 | 13 | Attributes: 14 | basis_f (FiniteTempBasis): 15 | Fermion basis 16 | basis_b (FiniteTempBasis): 17 | Boson basis 18 | smpl_tau_f (TauSampling): 19 | Sparse sampling for tau & fermion 20 | smpl_tau_b (TauSampling): 21 | Sparse sampling for tau & boson 22 | smpl_wn_f (MatsubaraSampling): 23 | Sparse sampling for Matsubara frequency & fermion 24 | smpl_wn_b (MatsubaraSampling): 25 | Sparse sampling for Matsubara frequency & boson 26 | """ 27 | def __init__(self, beta, wmax, eps=None, sve_result=None): 28 | """ 29 | Create basis sets for fermion and boson and 30 | associated sampling objects. 31 | Fermion and bosonic bases are constructed by SVE of the logistic kernel. 32 | """ 33 | if sve_result is None: 34 | # Create bases by sve of the logistic kernel 35 | self.basis_f, self.basis_b = basis.finite_temp_bases(beta, wmax, eps) 36 | else: 37 | # Create bases using the given sve results 38 | self.basis_f = basis.FiniteTempBasis( 39 | "F", beta, wmax, eps, sve_result=sve_result) 40 | self.basis_b = basis.FiniteTempBasis( 41 | "B", beta, wmax, eps, sve_result=sve_result) 42 | 43 | # Tau sampling 44 | self.smpl_tau_f = sampling.TauSampling(self.basis_f) 45 | self.smpl_tau_b = sampling.TauSampling(self.basis_b) 46 | 47 | # Matsubara sampling 48 | self.smpl_wn_f = sampling.MatsubaraSampling(self.basis_f) 49 | self.smpl_wn_b = sampling.MatsubaraSampling(self.basis_b) 50 | 51 | @property 52 | def lambda_(self): 53 | """Ultra-violet cutoff of the kernel""" 54 | return self.basis_f.lambda_ 55 | 56 | @property 57 | def beta(self): 58 | """Inverse temperature""" 59 | return self.basis_f.beta 60 | 61 | @property 62 | def wmax(self): 63 | """Cut-off frequency""" 64 | return self.basis_f.wmax 65 | 66 | @property 67 | def accuracy(self): 68 | """Accuracy of the bases""" 69 | return self.basis_f.accuracy 70 | 71 | @property 72 | def sve_result(self): 73 | """Result of singular value expansion""" 74 | return self.basis_f.sve_result 75 | 76 | @property 77 | def tau(self): 78 | """Sampling points in the imaginary-time domain""" 79 | return self.smpl_tau_f.sampling_points 80 | 81 | @property 82 | def wn_f(self): 83 | """Sampling fermionic frequencies""" 84 | return self.smpl_wn_f.sampling_points 85 | 86 | @property 87 | def wn_b(self): 88 | """Sampling bosonic frequencies""" 89 | return self.smpl_wn_b.sampling_points 90 | -------------------------------------------------------------------------------- /src/sparse_ir/dlr.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | 5 | from . import abstract 6 | from . import kernel 7 | from . import sampling 8 | from . import basis as _basis 9 | from . import _util 10 | from . import svd 11 | 12 | 13 | class DiscreteLehmannRepresentation(abstract.AbstractBasis): 14 | """Discrete Lehmann representation (DLR), with poles being extrema of IR. 15 | 16 | This class implements a variant of the discrete Lehmann representation 17 | (`DLR`_). Instead of a truncated singular value expansion of the analytic 18 | continuation kernel ``K`` like the IR, the discrete Lehmann representation 19 | is based on a "sketching" of ``K``. The resulting basis is a 20 | linear combination of discrete set of poles on the real-frequency axis, 21 | continued to the imaginary-frequency axis:: 22 | 23 | G(iv) == sum(a[i] / (iv - w[i]) for i in range(L)) 24 | 25 | Warning: 26 | The poles on the real-frequency axis selected for the DLR are based 27 | on a rank-revealing decomposition, which offers accuracy guarantees. 28 | Here, we instead select the pole locations based on the zeros of the IR 29 | basis functions on the real axis, which is a heuristic. We do not 30 | expect that difference to matter, but please don't blame the DLR 31 | authors if we were wrong :-) 32 | 33 | .. _DLR: https://doi.org/10.1103/PhysRevB.105.235115 34 | """ 35 | def __init__(self, basis: _basis.FiniteTempBasis, sampling_points=None): 36 | if sampling_points is None: 37 | sampling_points = basis.default_omega_sampling_points() 38 | if not isinstance(basis.kernel, kernel.LogisticKernel): 39 | raise ValueError("DLR supports only LogisticKernel") 40 | 41 | self._basis = basis 42 | self._poles = np.asarray(sampling_points) 43 | self._y_sampling_points = self._poles/basis.wmax 44 | 45 | self._u = TauPoles(basis.statistics, basis.beta, self._poles) 46 | self._uhat = MatsubaraPoles(basis.statistics, basis.beta, self._poles) 47 | 48 | # Fitting matrix from IR 49 | F = -basis.s[:, None] * basis.v(self._poles) 50 | 51 | # Now, here we *know* that F is ill-conditioned in very particular way: 52 | # it is a product A * B * C, where B is well conditioned and A, C are 53 | # scalings. This is handled with guaranteed relative accuracy by a 54 | # Jacobi SVD, implied by the 'accurate' strategy. 55 | uF, sF, vF = svd.compute(F, strategy='accurate') 56 | self.matrix = sampling.DecomposedMatrix(F, svd_result=(uF, sF, vF.T)) 57 | 58 | @property 59 | def u(self): return self._u 60 | 61 | @property 62 | def uhat(self): return self._uhat 63 | 64 | @property 65 | def statistics(self): 66 | return self.basis.statistics 67 | 68 | @property 69 | def sampling_points(self): 70 | return self._poles 71 | 72 | @property 73 | def shape(self): return self.size, 74 | 75 | @property 76 | def size(self): return self._poles.size 77 | 78 | @property 79 | def basis(self) -> _basis.FiniteTempBasis: 80 | """ Underlying basis """ 81 | return self._basis 82 | 83 | @property 84 | def lambda_(self): 85 | return self.basis.lambda_ 86 | 87 | @property 88 | def beta(self): 89 | return self.basis.beta 90 | 91 | @property 92 | def wmax(self): 93 | return self.basis.wmax 94 | 95 | @property 96 | def significance(self): 97 | return np.ones(self.shape) 98 | 99 | @property 100 | def accuracy(self): 101 | return self.basis.accuracy 102 | 103 | def from_IR(self, gl: np.ndarray, axis=0) -> np.ndarray: 104 | """ 105 | From IR to DLR 106 | 107 | gl: 108 | Expansion coefficients in IR 109 | """ 110 | return self.matrix.lstsq(gl, axis) 111 | 112 | def to_IR(self, g_dlr: np.ndarray, axis=0) -> np.ndarray: 113 | """ 114 | From DLR to IR 115 | 116 | g_dlr: 117 | Expansion coefficients in DLR 118 | """ 119 | return self.matrix.matmul(g_dlr, axis) 120 | 121 | def default_tau_sampling_points(self): 122 | return self.basis.default_tau_sampling_points() 123 | 124 | def default_matsubara_sampling_points(self, **kwargs): 125 | return self.basis.default_matsubara_sampling_points(**kwargs) 126 | 127 | @property 128 | def is_well_conditioned(self): 129 | return False 130 | 131 | 132 | class MatsubaraPoles: 133 | def __init__(self, statistics: str, beta: float, poles: np.ndarray): 134 | self._statistics = statistics 135 | self._beta = beta 136 | self._poles = np.array(poles) 137 | 138 | @_util.ravel_argument(last_dim=True) 139 | def __call__(self, n): 140 | """Evaluate basis functions at given frequency n""" 141 | iv = 1j*n * np.pi/self._beta 142 | if self._statistics == 'F': 143 | return 1 /(iv[None, :] - self._poles[:, None]) 144 | else: 145 | return np.tanh(0.5 * self._beta * self._poles)[:, None]\ 146 | /(iv[None, :] - self._poles[:, None]) 147 | 148 | 149 | class TauPoles: 150 | def __init__(self, statistics: str, beta: float, poles: np.ndarray): 151 | self._beta = beta 152 | self._statistics = statistics 153 | self._poles = np.array(poles) 154 | self._wmax = np.abs(poles).max() 155 | 156 | @_util.ravel_argument(last_dim=True) 157 | def __call__(self, tau): 158 | """ Evaluate basis functions at tau """ 159 | tau = np.asarray(tau) 160 | if (tau < 0).any() or (tau > self._beta).any(): 161 | raise RuntimeError("tau must be in [0, beta]!") 162 | 163 | x = 2 * tau/self._beta - 1 164 | y = self._poles/self._wmax 165 | lambda_ = self._beta * self._wmax 166 | 167 | res = -kernel.LogisticKernel(lambda_)(x[:, None], y[None, :]) 168 | return res.T 169 | -------------------------------------------------------------------------------- /src/sparse_ir/kernel.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | from typing import Callable 5 | 6 | 7 | class AbstractKernel: 8 | r"""Integral kernel ``K(x, y)``. 9 | 10 | Abstract base class for an integral kernel, i.e., a real binary function 11 | ``K(x, y)`` used in a Fredhold integral equation of the first kind: 12 | 13 | .. math:: u(x) = \int K(x, y) v(y) dy 14 | 15 | where ``x ∈ [xmin, xmax]`` and ``y ∈ [ymin, ymax]``. For its SVE to exist, 16 | the kernel must be square-integrable, for its singular values to decay 17 | exponentially, it must be smooth. 18 | 19 | In general, the kernel is applied to a scaled spectral function ρ'(y) as: 20 | 21 | .. math:: \int K(x, y) \rho'(y) dy, 22 | 23 | where ρ'(y) = w(y) ρ(y). 24 | """ 25 | def __call__(self, x, y, x_plus=None, x_minus=None): 26 | """Evaluate kernel at point (x, y) 27 | 28 | For given ``x, y``, return the value of ``K(x, y)``. The arguments may 29 | be numpy arrays, in which case the function shall be evaluated over 30 | the broadcast arrays. 31 | 32 | The parameters ``x_plus`` and ``x_minus``, if given, shall contain the 33 | values of ``x - xmin`` and ``xmax - x``, respectively. This is useful 34 | if either difference is to be formed and cancellation expected. 35 | """ 36 | raise NotImplementedError() 37 | 38 | def sve_hints(self, eps): 39 | """Provide discretisation hints for the SVE routines. 40 | 41 | Advises the SVE routines of discretisation parameters suitable in 42 | tranforming the (infinite) SVE into an (finite) SVD problem. 43 | 44 | See: :class:``AbstractSVEHints``. 45 | """ 46 | raise NotImplementedError() 47 | 48 | @property 49 | def xrange(self): 50 | """Tuple ``(xmin, xmax)`` delimiting the range of allowed x values""" 51 | return -1, 1 52 | 53 | @property 54 | def yrange(self): 55 | """Tuple ``(ymin, ymax)`` delimiting the range of allowed y values""" 56 | return -1, 1 57 | 58 | @property 59 | def is_centrosymmetric(self): 60 | """Kernel is centrosymmetric. 61 | 62 | Returns true if and only if ``K(x, y) == K(-x, -y)`` for all values of 63 | ``x`` and ``y``. This allows the kernel to be block-diagonalized, 64 | speeding up the singular value expansion by a factor of 4. Defaults 65 | to false. 66 | """ 67 | return False 68 | 69 | def get_symmetrized(self, sign): 70 | """Return symmetrized kernel ``K(x, y) + sign * K(x, -y)``. 71 | 72 | By default, this returns a simple wrapper over the current instance 73 | which naively performs the sum. You may want to override this to 74 | avoid cancellation. 75 | """ 76 | return ReducedKernel(self, sign) 77 | 78 | @property 79 | def ypower(self): 80 | """Power with which the y coordinate scales.""" 81 | return 0 82 | 83 | @property 84 | def conv_radius(self): 85 | """Convergence radius of the Matsubara basis asymptotic model. 86 | 87 | For improved relative numerical accuracy, the IR basis functions on the 88 | Matsubara axis ``basis.uhat(n)`` can be evaluated from an asymptotic 89 | expression for ``abs(n) > conv_radius``. If ``conv_radius`` is 90 | ``None``, then the asymptotics are unused (the default). 91 | """ 92 | return None 93 | 94 | def weight_func(self, statistics: str) -> Callable[[np.ndarray], np.ndarray]: 95 | """Return the weight function for given statistics""" 96 | if statistics not in 'FB': 97 | raise ValueError("statistics must be 'F' for fermions or 'B' for bosons") 98 | return lambda x: np.ones_like(x) 99 | 100 | 101 | class AbstractSVEHints: 102 | """Discretization hints for singular value expansion of a given kernel.""" 103 | @property 104 | def segments_x(self): 105 | """Segments for piecewise polynomials on the ``x`` axis. 106 | 107 | List of segments on the ``x`` axis for the associated piecewise 108 | polynomial. Should reflect the approximate position of roots of a 109 | high-order singular function in ``x``. 110 | """ 111 | raise NotImplementedError() 112 | 113 | @property 114 | def segments_y(self): 115 | """Segments for piecewise polynomials on the ``y`` axis. 116 | 117 | List of segments on the ``y`` axis for the associated piecewise 118 | polynomial. Should reflect the approximate position of roots of a 119 | high-order singular function in ``y``. 120 | """ 121 | raise NotImplementedError() 122 | 123 | @property 124 | def ngauss(self): 125 | """Gauss-Legendre order to use to guarantee accuracy""" 126 | raise NotImplementedError() 127 | 128 | @property 129 | def nsvals(self): 130 | """Upper bound for number of singular values 131 | 132 | Upper bound on the number of singular values above the given 133 | threshold, i.e., where ``s[l] >= eps * s[0]``. 134 | """ 135 | raise NotImplementedError() 136 | 137 | 138 | class LogisticKernel(AbstractKernel): 139 | r"""Fermionic/bosonic analytical continuation kernel. 140 | 141 | In dimensionless variables ``x = 2*τ/β - 1``, ``y = β*ω/Λ``, 142 | the integral kernel is a function on ``[-1, 1] x [-1, 1]``: 143 | 144 | .. math:: K(x, y) = \frac{\exp(-\Lambda y(x + 1)/2)}{1 + \exp(-\Lambda y)} 145 | 146 | LogisticKernel is a fermionic analytic continuation kernel. 147 | Nevertheless, one can model the τ dependence of 148 | a bosonic correlation function as follows: 149 | 150 | .. math:: 151 | 152 | \int \frac{\exp(-\Lambda y(x + 1)/2)}{1 - \exp(-\Lambda y)} \rho(y) dy 153 | = \int K(x, y) \frac{\rho'(y)}{\tanh(\Lambda y/2)} dy 154 | 155 | i.e., a rescaling of the spectral function with the weight function: 156 | 157 | .. math:: w(y) = \frac1{\tanh(\Lambda y/2)}. 158 | """ 159 | def __init__(self, lambda_): 160 | self.lambda_ = lambda_ 161 | 162 | def __call__(self, x, y, x_plus=None, x_minus=None): 163 | x, y = _check_domain(self, x, y) 164 | u_plus, u_minus, v = _compute_uv(self.lambda_, x, y, x_plus, x_minus) 165 | return self._compute(u_plus, u_minus, v) 166 | 167 | def sve_hints(self, eps): 168 | return _SVEHintsLogistic(self, eps) 169 | 170 | def _compute(self, u_plus, u_minus, v): 171 | # By introducing u_\pm = (1 \pm x)/2 and v = lambda * y, we can write 172 | # the kernel in the following two ways: 173 | # 174 | # k = exp(-u_+ * v) / (exp(-v) + 1) 175 | # = exp(-u_- * -v) / (exp(v) + 1) 176 | # 177 | # We need to use the upper equation for v >= 0 and the lower one for 178 | # v < 0 to avoid overflowing both numerator and denominator 179 | abs_v = np.abs(v) 180 | enum = np.exp(-abs_v * np.where(v > 0, u_plus, u_minus)) 181 | denom = 1 + np.exp(-abs_v) 182 | return enum / denom 183 | 184 | @property 185 | def is_centrosymmetric(self): 186 | return True 187 | 188 | def get_symmetrized(self, sign): 189 | if sign == -1: 190 | return _LogisticKernelOdd(self, sign) 191 | return super().get_symmetrized(sign) 192 | 193 | @property 194 | def conv_radius(self): return 40 * self.lambda_ 195 | 196 | def weight_func(self, statistics: str) -> Callable[[np.ndarray], np.ndarray]: 197 | """ 198 | Return the weight function for given statistics. 199 | 200 | - Fermion: `w(x) == 1` 201 | - Boson: `w(y) == 1/tanh(Λ*y/2)` 202 | """ 203 | if statistics not in "FB": 204 | raise ValueError("invalid value of statistics argument") 205 | if statistics == "F": 206 | return lambda y: np.ones_like(y) 207 | else: 208 | return lambda y: 1/np.tanh(0.5*self.lambda_*y) 209 | 210 | 211 | class _SVEHintsLogistic(AbstractSVEHints): 212 | def __init__(self, kernel, eps): 213 | self.kernel = kernel 214 | self.eps = eps 215 | 216 | @property 217 | def ngauss(self): return 10 if self.eps >= 1e-8 else 16 218 | 219 | @property 220 | def segments_x(self): 221 | nzeros = max(int(np.round(15 * np.log10(self.kernel.lambda_))), 1) 222 | diffs = 1./np.cosh(.143 * np.arange(nzeros)) 223 | zeros_pos = diffs.cumsum() 224 | zeros_pos /= zeros_pos[-1] 225 | return np.concatenate((-zeros_pos[::-1], [0], zeros_pos)) 226 | 227 | @property 228 | def segments_y(self): 229 | # Zeros around -1 and 1 are distributed asymptotically identical 230 | leading_diffs = np.array([ 231 | 0.01523, 0.03314, 0.04848, 0.05987, 0.06703, 0.07028, 0.07030, 232 | 0.06791, 0.06391, 0.05896, 0.05358, 0.04814, 0.04288, 0.03795, 233 | 0.03342, 0.02932, 0.02565, 0.02239, 0.01951, 0.01699]) 234 | 235 | nzeros = max(int(np.round(20 * np.log10(self.kernel.lambda_))), 2) 236 | if nzeros < 20: 237 | leading_diffs = leading_diffs[:nzeros] 238 | diffs = .25 / np.exp(.141 * np.arange(nzeros)) 239 | diffs[:leading_diffs.size] = leading_diffs 240 | zeros = diffs.cumsum() 241 | zeros = zeros[:-1] / zeros[-1] 242 | zeros -= 1 243 | return np.concatenate(([-1], zeros, [0], -zeros[::-1], [1])) 244 | 245 | @property 246 | def nsvals(self): 247 | log10_lambda = max(1, np.log10(self.kernel.lambda_)) 248 | return int(np.round((25 + log10_lambda) * log10_lambda)) 249 | 250 | 251 | class RegularizedBoseKernel(AbstractKernel): 252 | r"""Regularized bosonic analytical continuation kernel. 253 | 254 | In dimensionless variables ``x = 2*τ/β - 1``, ``y = β*ω/Λ``, the fermionic 255 | integral kernel is a function on ``[-1, 1] x [-1, 1]``: 256 | 257 | .. math:: 258 | 259 | K(x, y) = \frac{y \exp(-\Lambda y(x + 1)/2)}{\exp(-\Lambda y) - 1} 260 | 261 | Care has to be taken in evaluating this expression around ``y == 0``. 262 | """ 263 | def __init__(self, lambda_): 264 | self.lambda_ = lambda_ 265 | 266 | def __call__(self, x, y, x_plus=None, x_minus=None): 267 | x, y = _check_domain(self, x, y) 268 | u_plus, u_minus, v = _compute_uv(self.lambda_, x, y, x_plus, x_minus) 269 | return self._compute(u_plus, u_minus, v) 270 | 271 | def _compute(self, u_plus, u_minus, v): 272 | # With "reduced variables" u, v we have: 273 | # 274 | # K = -1/lambda * exp(-u_+ * v) * v / (exp(-v) - 1) 275 | # = -1/lambda * exp(-u_- * -v) * (-v) / (exp(v) - 1) 276 | # 277 | # where we again need to use the upper equation for v >= 0 and the 278 | # lower one for v < 0 to avoid overflow. 279 | abs_v = np.abs(v) 280 | enum = np.exp(-abs_v * np.where(v >= 0, u_plus, u_minus)) 281 | dtype = v.dtype 282 | 283 | # The expression ``v / (exp(v) - 1)`` is tricky to evaluate: firstly, 284 | # it has a singularity at v=0, which can be cured by treating that case 285 | # separately. Secondly, the denominator loses precision around 0 since 286 | # exp(v) = 1 + v + ..., which can be avoided using expm1(...) 287 | not_tiny = abs_v >= 1e-200 288 | denom = -np.ones_like(abs_v) 289 | np.divide(abs_v, np.expm1(-abs_v, where=not_tiny), 290 | out=denom, where=not_tiny) 291 | return -1/dtype.type(self.lambda_) * enum * denom 292 | 293 | def sve_hints(self, eps): 294 | return _SVEHintsRegularizedBose(self, eps) 295 | 296 | @property 297 | def is_centrosymmetric(self): 298 | return True 299 | 300 | def get_symmetrized(self, sign): 301 | if sign == -1: 302 | return _RegularizedBoseKernelOdd(self, sign) 303 | return super().get_symmetrized(sign) 304 | 305 | @property 306 | def ypower(self): return 1 307 | 308 | @property 309 | def conv_radius(self): return 40 * self.lambda_ 310 | 311 | def weight_func(self, statistics: str) -> Callable[[np.ndarray], np.ndarray]: 312 | """ Return the weight function for given statistics """ 313 | if statistics != "B": 314 | raise ValueError("Kernel is designed for bosonic functions") 315 | return lambda y: 1/y 316 | 317 | 318 | class _SVEHintsRegularizedBose(AbstractSVEHints): 319 | def __init__(self, kernel, eps): 320 | self.kernel = kernel 321 | self.eps = eps 322 | 323 | @property 324 | def ngauss(self): return 10 if self.eps >= 1e-8 else 16 325 | 326 | @property 327 | def segments_x(self): 328 | # Somewhat less accurate ... 329 | nzeros = max(int(np.round(15 * np.log10(self.kernel.lambda_))), 15) 330 | diffs = 1./np.cosh(.18 * np.arange(nzeros)) 331 | zeros_pos = diffs.cumsum() 332 | zeros_pos /= zeros_pos[-1] 333 | return np.concatenate((-zeros_pos[::-1], [0], zeros_pos)) 334 | 335 | @property 336 | def segments_y(self): 337 | # Zeros around -1 and 1 are distributed asymptotically identical 338 | leading_diffs = np.array([ 339 | 0.01363, 0.02984, 0.04408, 0.05514, 0.06268, 0.06679, 0.06793, 340 | 0.06669, 0.06373, 0.05963, 0.05488, 0.04987, 0.04487, 0.04005, 341 | 0.03553, 0.03137, 0.02758, 0.02418, 0.02115, 0.01846]) 342 | 343 | nzeros = max(int(np.round(20 * np.log10(self.kernel.lambda_))), 20) 344 | i = np.arange(nzeros) 345 | diffs = .12/np.exp(.0337 * i * np.log(i+1)) 346 | #diffs[:leading_diffs.size] = leading_diffs 347 | zeros = diffs.cumsum() 348 | zeros = zeros[:-1] / zeros[-1] 349 | zeros -= 1 350 | return np.concatenate(([-1], zeros, [0], -zeros[::-1], [1])) 351 | 352 | @property 353 | def nsvals(self): 354 | log10_lambda = max(1, np.log10(self.kernel.lambda_)) 355 | return int(28 * log10_lambda) 356 | 357 | 358 | class ReducedKernel(AbstractKernel): 359 | """Restriction of centrosymmetric kernel to positive interval. 360 | 361 | For a kernel ``K`` on ``[-1, 1] x [-1, 1]`` that is centrosymmetric, i.e. 362 | ``K(x, y) == K(-x, -y)``, it is straight-forward to show that the left/right 363 | singular vectors can be chosen as either odd or even functions. 364 | 365 | Consequentially, they are singular functions of a reduced kernel ``K_red`` 366 | on ``[0, 1] x [0, 1]`` that is given as either:: 367 | 368 | K_red(x, y) == K(x, y) + sign * K(x, -y) 369 | 370 | This kernel is what this class represents. The full singular functions can 371 | be reconstructed by (anti-)symmetrically continuing them to the negative 372 | axis. 373 | """ 374 | def __init__(self, inner, sign=1): 375 | if not inner.is_centrosymmetric: 376 | raise ValueError("inner kernel must be centrosymmetric") 377 | if np.abs(sign) != 1: 378 | raise ValueError("sign must square to one") 379 | 380 | self.inner = inner 381 | self.sign = sign 382 | 383 | def __call__(self, x, y, x_plus=None, x_minus=None): 384 | x, y = _check_domain(self, x, y) 385 | 386 | # The reduced kernel is defined only over the interval [0, 1], which 387 | # means we must add one to get the x_plus for the inner kernels. We 388 | # can compute this as 1 + x, since we are away from -1. 389 | x_plus = 1 + x_plus 390 | 391 | K_plus = self.inner(x, y, x_plus, x_minus) 392 | K_minus = self.inner(x, -y, x_plus, x_minus) 393 | return K_plus + K_minus if self.sign == 1 else K_plus - K_minus 394 | 395 | @property 396 | def xrange(self): 397 | _, xmax = self.inner.xrange 398 | return 0, xmax 399 | 400 | @property 401 | def yrange(self): 402 | _, ymax = self.inner.yrange 403 | return 0, ymax 404 | 405 | def sve_hints(self, eps): 406 | return _SVEHintsReduced(self.inner.sve_hints(eps)) 407 | 408 | @property 409 | def is_centrosymmetric(self): 410 | """True iff K(x,y) = K(-x, -y)""" 411 | return False 412 | 413 | def get_symmetrized(self, sign): 414 | raise RuntimeError("cannot symmetrize twice") 415 | 416 | @property 417 | def ypower(self): return self.inner.ypower 418 | 419 | @property 420 | def conv_radius(self): return self.inner.conv_radius 421 | 422 | 423 | class _SVEHintsReduced(AbstractSVEHints): 424 | def __init__(self, inner_hints): 425 | self.inner_hints = inner_hints 426 | 427 | @property 428 | def ngauss(self): return self.inner_hints.ngauss 429 | 430 | @property 431 | def segments_x(self): return _symm_segments(self.inner_hints.segments_x) 432 | 433 | @property 434 | def segments_y(self): return _symm_segments(self.inner_hints.segments_y) 435 | 436 | @property 437 | def nsvals(self): return (self.inner_hints.nsvals + 1) // 2 438 | 439 | 440 | class _LogisticKernelOdd(ReducedKernel): 441 | """Fermionic analytical continuation kernel, odd. 442 | 443 | In dimensionless variables ``x = 2*τ/β - 1``, ``y = β*ω/Λ``, the fermionic 444 | integral kernel is a function on ``[-1, 1] x [-1, 1]``:: 445 | 446 | K(x, y) == -sinh(Λ/2 * x * y) / cosh(Λ/2 * y) 447 | """ 448 | def __call__(self, x, y, x_plus=None, x_minus=None): 449 | result = super().__call__(x, y, x_plus, x_minus) 450 | 451 | # For x * y around 0, antisymmetrization introduces cancellation, which 452 | # reduces the relative precision. To combat this, we replace the 453 | # values with the explicit form 454 | v_half = self.inner.lambda_/2 * y 455 | xy_small = x * v_half < 1 456 | cosh_finite = v_half < 85 457 | np.divide(-np.sinh(v_half * x, where=xy_small), 458 | np.cosh(v_half, where=cosh_finite), 459 | out=result, where=np.logical_and(xy_small, cosh_finite)) 460 | return result 461 | 462 | 463 | class _RegularizedBoseKernelOdd(ReducedKernel): 464 | """Bosonic analytical continuation kernel, odd. 465 | 466 | In dimensionless variables ``x = 2*τ/β - 1``, ``y = β*ω/Λ``, the fermionic 467 | integral kernel is a function on ``[-1, 1] x [-1, 1]``:: 468 | 469 | K(x, y) = -y * sinh(Λ/2 * x * y) / sinh(Λ/2 * y) 470 | """ 471 | def __call__(self, x, y, x_plus=None, x_minus=None): 472 | result = super().__call__(x, y, x_plus, x_minus) 473 | 474 | # For x * y around 0, antisymmetrization introduces cancellation, which 475 | # reduces the relative precision. To combat this, we replace the 476 | # values with the explicit form 477 | v_half = self.inner.lambda_/2 * y 478 | xv_half = x * v_half 479 | xy_small = xv_half < 1 480 | sinh_range = np.logical_and(v_half > 1e-200, v_half < 85) 481 | np.divide( 482 | np.multiply(-y, np.sinh(xv_half, where=xy_small), where=xy_small), 483 | np.sinh(v_half, where=sinh_range), 484 | out=result, where=np.logical_and(xy_small, sinh_range)) 485 | return result 486 | 487 | 488 | def matrix_from_gauss(kernel, gauss_x, gauss_y): 489 | """Compute matrix for kernel from Gauss rule""" 490 | # (1 +- x) is problematic around x = -1 and x = 1, where the quadrature 491 | # nodes are clustered most tightly. Thus we have the need for the 492 | # matrix method. 493 | return kernel(gauss_x.x[:,None], gauss_y.x[None,:], 494 | gauss_x.x_forward[:,None], gauss_x.x_backward[:,None]) 495 | 496 | 497 | def _check_domain(kernel, x, y): 498 | """Check that arguments lie within the correct domain""" 499 | x = np.asarray(x) 500 | xmin, xmax = kernel.xrange 501 | if not (x >= xmin).all() or not (x <= xmax).all(): 502 | raise ValueError("x values not in range [{:g},{:g}]".format(xmin, xmax)) 503 | 504 | y = np.asarray(y) 505 | ymin, ymax = kernel.yrange 506 | if not (y >= ymin).all() or not (y <= ymax).all(): 507 | raise ValueError("y values not in range [{:g},{:g}]".format(ymin, ymax)) 508 | return x, y 509 | 510 | 511 | def _symm_segments(x): 512 | x = np.asarray(x) 513 | if not np.allclose(x, -x[::-1]): 514 | raise ValueError("segments must be symmetric") 515 | xpos = x[x.size // 2:] 516 | if xpos[0] != 0: 517 | xpos = np.hstack([0, xpos]) 518 | return xpos 519 | 520 | 521 | def _compute_uv(lambda_, x, y, x_plus=None, x_minus=None): 522 | if x_plus is None: 523 | x_plus = 1 + x 524 | if x_minus is None: 525 | x_minus = 1 - x 526 | u_plus = .5 * x_plus 527 | u_minus = .5 * x_minus 528 | v = lambda_ * y 529 | return u_plus, u_minus, v 530 | -------------------------------------------------------------------------------- /src/sparse_ir/poly.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | from warnings import warn 5 | import numpy.polynomial.legendre as np_legendre 6 | import scipy.special as sp_special 7 | 8 | from . import _util 9 | from . import _roots 10 | from . import _gauss 11 | 12 | 13 | class PiecewiseLegendrePoly: 14 | """Piecewise Legendre polynomial. 15 | 16 | Models a function on the interval ``[-1, 1]`` as a set of segments on the 17 | intervals ``S[i] = [a[i], a[i+1]]``, where on each interval the function 18 | is expanded in scaled Legendre polynomials. 19 | """ 20 | def __init__(self, data, knots, dx=None, symm=None): 21 | """Piecewise Legendre polynomial""" 22 | if np.isnan(data).any(): 23 | raise ValueError("PiecewiseLegendrePoly: data contains NaN!") 24 | if isinstance(knots, self.__class__): 25 | if dx is not None or symm is None: 26 | raise RuntimeError("wrong arguments") 27 | self.__dict__.update(knots.__dict__) 28 | self.data = data 29 | self.symm = symm 30 | return 31 | 32 | data = np.array(data) 33 | knots = np.array(knots) 34 | polyorder, nsegments = data.shape[:2] 35 | if knots.shape != (nsegments+1,): 36 | raise ValueError("Invalid knots array") 37 | if not (knots[1:] >= knots[:-1]).all(): 38 | raise ValueError("Knots must be monotonically increasing") 39 | if symm is None: 40 | # TODO: infer symmetry from data 41 | symm = np.zeros(data.shape[2:]) 42 | else: 43 | symm = np.array(symm) 44 | if symm.shape != data.shape[2:]: 45 | raise ValueError("shape mismatch") 46 | if dx is None: 47 | dx = knots[1:] - knots[:-1] 48 | else: 49 | dx = np.array(dx) 50 | if not np.allclose(dx, knots[1:] - knots[:-1]): 51 | raise ValueError("dx must work with knots") 52 | 53 | self.nsegments = nsegments 54 | self.polyorder = polyorder 55 | self.xmin = knots[0] 56 | self.xmax = knots[-1] 57 | 58 | self.knots = knots 59 | self.dx = dx 60 | self.data = data 61 | self.symm = symm 62 | self._xm = .5 * (knots[1:] + knots[:-1]) 63 | self._inv_xs = 2/dx 64 | self._norm = np.sqrt(self._inv_xs) 65 | 66 | def __getitem__(self, l): 67 | """Return part of a set of piecewise polynomials""" 68 | new_symm = self.symm[l] 69 | if isinstance(l, tuple): 70 | new_data = self.data[(slice(None), slice(None), *l)] 71 | else: 72 | new_data = self.data[:,:,l] 73 | return self.__class__(new_data, self, symm=new_symm) 74 | 75 | def __call__(self, x): 76 | """Evaluate polynomial at position x""" 77 | i, xtilde = self._split(np.asarray(x)) 78 | data = self.data[:, i] 79 | 80 | # Evaluate for all values of l. x and data array must be 81 | # broadcast'able against each other, so we append dimensions here 82 | func_dims = self.data.ndim - 2 83 | datashape = i.shape + (1,) * func_dims 84 | res = np_legendre.legval(xtilde.reshape(datashape), data, tensor=False) 85 | res *= self._norm[i.reshape(datashape)] 86 | 87 | # Finally, exchange the x and vector dimensions 88 | order = tuple(range(i.ndim, i.ndim + func_dims)) + tuple(range(i.ndim)) 89 | return res.transpose(*order) 90 | 91 | def value(self, l, x): 92 | """Return value for l and x.""" 93 | if self.data.ndim != 3: 94 | raise ValueError("Only allowed for vector of data") 95 | 96 | l, x = np.broadcast_arrays(l, x) 97 | i, xtilde = self._split(x) 98 | data = self.data[:, i, l] 99 | 100 | # This should now neatly broadcast against each other 101 | res = np_legendre.legval(xtilde, data, tensor=False) 102 | res *= self._norm[i] 103 | return res 104 | 105 | def overlap(self, f, *, rtol=2.3e-16, return_error=False, points=None): 106 | r"""Evaluate overlap integral of this polynomial with function ``f``. 107 | 108 | Given the function ``f``, evaluate the integral:: 109 | 110 | ∫ dx * f(x) * self(x) 111 | 112 | using piecewise Gauss-Legendre quadrature, where ``self`` are the 113 | polynomials. 114 | 115 | Arguments: 116 | f (callable): 117 | function that is called with a point ``x`` and returns ``f(x)`` 118 | at that position. 119 | 120 | points (sequence of floats) 121 | A sequence of break points in the integration interval 122 | where local difficulties of the integrand may occur 123 | (e.g., singularities, discontinuities) 124 | 125 | Return: 126 | array-like object with shape (poly_dims, f_dims) 127 | poly_dims are the shape of the polynomial and f_dims are those 128 | of the function f(x). 129 | """ 130 | int_result, int_error = _compute_overlap(self, f, rtol=rtol, points=points) 131 | if return_error: 132 | return int_result, int_error 133 | else: 134 | return int_result 135 | 136 | def deriv(self, n=1): 137 | """Get polynomial for the n'th derivative""" 138 | ddata = np_legendre.legder(self.data, n) 139 | 140 | _scale_shape = (1, -1) + (1,) * (self.data.ndim - 2) 141 | scale = self._inv_xs ** n 142 | ddata *= scale.reshape(_scale_shape) 143 | return self.__class__(ddata, self, symm=(-1)**n * self.symm) 144 | 145 | def roots(self, alpha=2): 146 | """Find all roots of the piecewise polynomial 147 | 148 | Assume that between each two knots (pieces) there are at most ``alpha`` 149 | roots. 150 | """ 151 | if self.data.ndim > 2: 152 | raise ValueError("select single polynomial before calling roots()") 153 | 154 | grid = self.knots 155 | xmid = (self.xmax + self.xmin) / 2 156 | if self.symm: 157 | if grid[self.nsegments // 2] == xmid: 158 | grid = grid[self.nsegments//2:] 159 | else: 160 | grid = np.hstack((xmid, grid[grid > xmid])) 161 | 162 | grid = _refine_grid(grid, alpha) 163 | roots = _roots.find_all(self, grid) 164 | 165 | if self.symm == 1: 166 | revroots = (self.xmax + self.xmin) - roots[::-1] 167 | roots = np.hstack((revroots, roots)) 168 | elif self.symm == -1: 169 | # There must be a zero at exactly the midpoint, but we may either 170 | # slightly miss it or have a spurious zero 171 | if roots.size: 172 | if roots[0] == xmid or self(xmid) * self.deriv()(xmid) < 0: 173 | roots = roots[1:] 174 | revroots = (self.xmax + self.xmin) - roots[::-1] 175 | roots = np.hstack((revroots, xmid, roots)) 176 | 177 | return roots 178 | 179 | @property 180 | def shape(self): return self.data.shape[2:] 181 | 182 | @property 183 | def size(self): return self.data[:1,:1].size 184 | 185 | @property 186 | def ndim(self): return self.data.ndim - 2 187 | 188 | def _split(self, x): 189 | """Split segment""" 190 | x = _util.check_range(x, self.xmin, self.xmax) 191 | i = self.knots.searchsorted(x, 'right').clip(None, self.nsegments) 192 | i -= 1 193 | xtilde = x - self._xm[i] 194 | xtilde *= self._inv_xs[i] 195 | return i, xtilde 196 | 197 | 198 | class PiecewiseLegendreFT: 199 | """Fourier transform of a piecewise Legendre polynomial. 200 | 201 | For a given frequency index ``n``, the Fourier transform of the Legendre 202 | function is defined as:: 203 | 204 | phat(n) == ∫ dx exp(1j * pi * n * x / (xmax - xmin)) p(x) 205 | 206 | The polynomial is continued either periodically (``freq='even'``), in which 207 | case ``n`` must be even, or antiperiodically (``freq='odd'``), in which case 208 | ``n`` must be odd. 209 | """ 210 | _DEFAULT_GRID = np.hstack([np.arange(2**6), 211 | (2**np.linspace(6, 35, 16*(35-6)+1)).astype(int)]) 212 | 213 | def __init__(self, poly, freq='even', n_asymp=None, power_model=None): 214 | if poly.xmin != -1 or poly.xmax != 1: 215 | raise NotImplementedError("Only interval [-1, 1] supported") 216 | self.poly = poly 217 | self.freq = freq 218 | self.zeta = {'any': None, 'even': 0, 'odd': 1}[freq] 219 | if n_asymp is None: 220 | self.n_asymp = np.inf 221 | self._model = None 222 | else: 223 | self.n_asymp = n_asymp 224 | if power_model is None: 225 | self._model = _power_model(freq, poly) 226 | else: 227 | self._model = power_model 228 | 229 | @property 230 | def shape(self): return self.poly.shape 231 | 232 | @property 233 | def size(self): return self.poly.size 234 | 235 | @property 236 | def ndim(self): return self.poly.ndim 237 | 238 | def __getitem__(self, l): 239 | model = self._model if self._model is None else self._model[l] 240 | return self.__class__(self.poly[l], self.freq, self.n_asymp, model) 241 | 242 | @_util.ravel_argument(last_dim=True) 243 | def __call__(self, n): 244 | """Obtain Fourier transform of polynomial for given frequencies""" 245 | n = _util.check_reduced_matsubara(n, self.zeta) 246 | result = _compute_unl_inner(self.poly, n) 247 | 248 | # We use the asymptotics at frequencies larger than conv_radius 249 | # since it has lower relative error. 250 | cond_outer = np.abs(n) >= self.n_asymp 251 | if cond_outer.any(): 252 | n_outer = n[cond_outer] 253 | result[..., cond_outer] = self._model.giw(n_outer).T 254 | 255 | return result 256 | 257 | def extrema(self, *, part=None, grid=None, positive_only=False): 258 | """Obtain extrema of Fourier-transformed polynomial.""" 259 | if self.poly.shape: 260 | raise ValueError("select single polynomial") 261 | if grid is None: 262 | grid = self._DEFAULT_GRID 263 | 264 | f = self._func_for_part(part) 265 | x0 = _roots.discrete_extrema(f, grid) 266 | x0 = 2 * x0 + self.zeta 267 | if not positive_only: 268 | x0 = _symmetrize_matsubara(x0) 269 | return x0 270 | 271 | def sign_changes(self, *, part=None, grid=None, positive_only=False): 272 | """Obtain sign changes of Fourier-transformed polynomial.""" 273 | if self.poly.shape: 274 | raise ValueError("select single polynomial") 275 | if grid is None: 276 | grid = self._DEFAULT_GRID 277 | 278 | f = self._func_for_part(part) 279 | x0 = _roots.find_all(f, grid, type='discrete') 280 | x0 = 2 * x0 + self.zeta 281 | if not positive_only: 282 | x0 = _symmetrize_matsubara(x0) 283 | return x0 284 | 285 | def _func_for_part(self, part=None): 286 | if part is None: 287 | parity = self.poly.symm 288 | if np.allclose(parity, 1): 289 | part = 'real' if self.zeta == 0 else 'imag' 290 | elif np.allclose(parity, -1): 291 | part = 'imag' if self.zeta == 0 else 'real' 292 | else: 293 | raise ValueError("cannot detect parity.") 294 | if part == 'real': 295 | return lambda n: self(2*n + self.zeta).real 296 | elif part == 'imag': 297 | return lambda n: self(2*n + self.zeta).imag 298 | else: 299 | raise ValueError("part must be either 'real' or 'imag'") 300 | 301 | 302 | 303 | def _imag_power(n): 304 | """Imaginary unit raised to an integer power without numerical error""" 305 | n = np.asarray(n) 306 | if not np.issubdtype(n.dtype, np.integer): 307 | raise ValueError("expecting set of integers here") 308 | cycle = np.array([1, 0+1j, -1, 0-1j], complex) 309 | return cycle[n % 4] 310 | 311 | 312 | def _get_tnl(l, w): 313 | r"""Fourier integral of the l-th Legendre polynomial:: 314 | 315 | T_l(w) == \int_{-1}^1 dx \exp(iwx) P_l(x) 316 | """ 317 | # spherical_jn gives NaN for w < 0, but since we know that P_l(x) is real, 318 | # we simply conjugate the result for w > 0 in these cases. 319 | result = 2 * _imag_power(l) * sp_special.spherical_jn(l, np.abs(w)) 320 | np.conjugate(result, out=result, where=w < 0) 321 | return result 322 | 323 | 324 | def _shift_xmid(knots, dx): 325 | r"""Return midpoint relative to the nearest integer plus a shift. 326 | 327 | Return the midpoints ``xmid`` of the segments, as pair ``(diff, shift)``, 328 | where shift is in ``(0,1,-1)`` and ``diff`` is a float such that 329 | ``xmid == shift + diff`` to floating point accuracy. 330 | """ 331 | dx_half = dx / 2 332 | xmid_m1 = dx.cumsum() - dx_half 333 | xmid_p1 = -dx[::-1].cumsum()[::-1] + dx_half 334 | xmid_0 = knots[1:] - dx_half 335 | 336 | shift = np.round(xmid_0).astype(int) 337 | diff = np.choose(shift+1, (xmid_m1, xmid_0, xmid_p1)) 338 | return diff, shift 339 | 340 | 341 | def _phase_stable(poly, wn): 342 | """Phase factor for the piecewise Legendre to Matsubara transform. 343 | 344 | Compute the following phase factor in a stable way:: 345 | 346 | np.exp(1j * np.pi/2 * wn[:,None] * poly.dx.cumsum()[None,:]) 347 | """ 348 | # A naive implementation is losing precision close to x=1 and/or x=-1: 349 | # there, the multiplication with `wn` results in `wn//4` almost extra turns 350 | # around the unit circle. The cosine and sines will first map those 351 | # back to the interval [-pi, pi) before doing the computation, which loses 352 | # digits in dx. To avoid this, we extract the nearest integer dx.cumsum() 353 | # and rewrite above expression like below. 354 | # 355 | # Now `wn` still results in extra revolutions, but the mapping back does 356 | # not cut digits that were not there in the first place. 357 | xmid_diff, extra_shift = _shift_xmid(poly.knots, poly.dx) 358 | 359 | if np.issubdtype(wn.dtype, np.integer): 360 | shift_arg = wn[None,:] * xmid_diff[:,None] 361 | else: 362 | delta_wn, wn = np.modf(wn) 363 | wn = wn.astype(int) 364 | shift_arg = wn[None,:] * xmid_diff[:,None] 365 | shift_arg += delta_wn[None,:] * (extra_shift + xmid_diff)[:,None] 366 | 367 | phase_shifted = np.exp(0.5j * np.pi * shift_arg) 368 | corr = _imag_power((extra_shift[:,None] + 1) * wn[None,:]) 369 | return corr * phase_shifted 370 | 371 | 372 | def _compute_unl_inner(poly, wn): 373 | """Compute piecewise Legendre to Matsubara transform.""" 374 | dx_half = poly.dx / 2 375 | 376 | data_flat = poly.data.reshape(*poly.data.shape[:2], -1) 377 | data_sc = data_flat * np.sqrt(dx_half/2)[None,:,None] 378 | p = np.arange(poly.polyorder) 379 | 380 | wred = np.pi/2 * wn 381 | phase_wi = _phase_stable(poly, wn) 382 | t_pin = _get_tnl(p[:,None,None], wred[None,:] * dx_half[:,None]) * phase_wi 383 | 384 | # Perform the following, but faster: 385 | # resulth = einsum('pin,pil->nl', t_pin, data_sc) 386 | npi = poly.polyorder * poly.nsegments 387 | result_flat = (t_pin.reshape(npi,-1).T @ data_sc.reshape(npi,-1)).T 388 | return result_flat.reshape(*poly.data.shape[2:], wn.size) 389 | 390 | 391 | class _PowerModel: 392 | """Model from a high-frequency series expansion:: 393 | 394 | A(iw) == sum(A[n] / (iw)**(n+1) for n in range(1, N)) 395 | 396 | where ``iw == 1j * pi/2 * wn`` is a reduced imaginary frequency, i.e., 397 | ``wn`` is an odd/even number for fermionic/bosonic frequencies. 398 | """ 399 | def __init__(self, moments): 400 | """Initialize model""" 401 | if moments.ndim == 1: 402 | moments = moments[:, None] 403 | self.moments = np.asarray(moments) 404 | self.nmom, self.nl = self.moments.shape 405 | 406 | @_util.ravel_argument() 407 | def giw(self, wn): 408 | """Return model Green's function for vector of frequencies""" 409 | wn = _util.check_reduced_matsubara(wn) 410 | result_dtype = np.result_type(1j, wn, self.moments) 411 | result = np.zeros((wn.size, self.nl), result_dtype) 412 | inv_iw = 1j * np.pi/2 * wn 413 | np.reciprocal(inv_iw, out=inv_iw, where=(wn != 0)) 414 | for mom in self.moments[::-1]: 415 | result += mom 416 | result *= inv_iw[:, None] 417 | return result 418 | 419 | def __getitem__(self, l): 420 | return self.__class__(self.moments[:,l]) 421 | 422 | 423 | def _derivs(ppoly, x): 424 | """Evaluate polynomial and its derivatives at specific x""" 425 | yield ppoly(x) 426 | for _ in range(ppoly.polyorder-1): 427 | ppoly = ppoly.deriv() 428 | yield ppoly(x) 429 | 430 | 431 | def _power_moments(stat, deriv_x1): 432 | """Return moments""" 433 | statsign = {'odd': -1, 'even': 1}[stat] 434 | mmax, lmax = deriv_x1.shape 435 | m = np.arange(mmax)[:,None] 436 | l = np.arange(lmax)[None,:] 437 | coeff_lm = ((-1.0)**(m+1) + statsign * (-1.0)**l) * deriv_x1 438 | return -statsign/np.sqrt(2.0) * coeff_lm 439 | 440 | 441 | def _power_model(stat, poly): 442 | deriv_x1 = np.asarray(list(_derivs(poly, x=1))) 443 | if deriv_x1.ndim == 1: 444 | deriv_x1 = deriv_x1[:,None] 445 | moments = _power_moments(stat, deriv_x1) 446 | return _PowerModel(moments) 447 | 448 | 449 | def _refine_grid(knots, alpha): 450 | """Linear refinement of grid""" 451 | result = np.linspace(knots[:-1], knots[1:], alpha, endpoint=False) 452 | return np.hstack((result.T.ravel(), knots[-1])) 453 | 454 | 455 | def _symmetrize_matsubara(x0): 456 | if not x0.size: 457 | return x0 458 | if not (x0[1:] >= x0[:-1]).all(): 459 | raise ValueError("set of Matsubara points not ordered") 460 | if not (x0[0] >= 0): 461 | raise ValueError("points must be non-negative") 462 | if x0[0] == 0: 463 | x0 = np.hstack([-x0[::-1], x0[1:]]) 464 | else: 465 | x0 = np.hstack([-x0[::-1], x0]) 466 | return x0 467 | 468 | 469 | def _compute_overlap(poly, f, rtol=2.3e-16, radix=2, max_refine_levels=40, 470 | max_refine_points=2000, points=None): 471 | base_rule = _gauss.kronrod_31_15() 472 | if points is None: 473 | knots = poly.knots 474 | else: 475 | points = np.asarray(points) 476 | knots = np.unique(np.hstack((poly.knots, points))) 477 | xstart = knots[:-1] 478 | xstop = knots[1:] 479 | 480 | f_shape = None 481 | res_value = 0 482 | res_error = 0 483 | res_magn = 0 484 | for _ in range(max_refine_levels): 485 | #print(f"Level {_}: {xstart.size} segments") 486 | if xstart.size > max_refine_points: 487 | warn("Refinement is too broad, aborting (increase rtol)") 488 | break 489 | 490 | rule = base_rule.reseat(xstart[:, None], xstop[:, None]) 491 | 492 | fx = np.array(list(map(f, rule.x.ravel()))) 493 | if f_shape is None: 494 | f_shape = fx.shape[1:] 495 | elif fx.shape[1:] != f_shape: 496 | raise ValueError("inconsistent shapes") 497 | fx = fx.reshape(rule.x.shape + (-1,)) 498 | 499 | valx = poly(rule.x).reshape(-1, *rule.x.shape, 1) * fx 500 | int21 = (valx[:, :, :, :] * rule.w[:, :, None]).sum(2) 501 | int10 = (valx[:, :, rule.vsel, :] * rule.v[:, :, None]).sum(2) 502 | intdiff = np.abs(int21 - int10) 503 | intmagn = np.abs(int10) 504 | 505 | magn = res_magn + intmagn.sum(1).max(1) 506 | relerror = intdiff.max(2) / magn[:, None] 507 | 508 | xconverged = (relerror <= rtol).all(0) 509 | res_value += int10[:, xconverged].sum(1) 510 | res_error += intdiff[:, xconverged].sum(1) 511 | res_magn += intmagn[:, xconverged].sum(1).max(1) 512 | if xconverged.all(): 513 | break 514 | 515 | xrefine = ~xconverged 516 | xstart = xstart[xrefine] 517 | xstop = xstop[xrefine] 518 | xedge = np.linspace(xstart, xstop, radix + 1, axis=-1) 519 | xstart = xedge[:, :-1].ravel() 520 | xstop = xedge[:, 1:].ravel() 521 | else: 522 | warn("Integration did not converge after refinement") 523 | 524 | res_shape = poly.shape + f_shape 525 | return res_value.reshape(res_shape), res_error.reshape(res_shape) 526 | -------------------------------------------------------------------------------- /src/sparse_ir/sampling.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | from warnings import warn 5 | 6 | from . import _util 7 | 8 | 9 | class AbstractSampling: 10 | """Base class for sparse sampling. 11 | 12 | Encodes the "basis transformation" of a propagator from the truncated IR 13 | basis coefficients ``G_ir[l]`` to time/frequency sampled on sparse points 14 | ``G(x[i])`` together with its inverse, a least squares fit:: 15 | 16 | ________________ ___________________ 17 | | | evaluate | | 18 | | Basis |---------------->| Value on | 19 | | coefficients |<----------------| sampling points | 20 | |________________| fit |___________________| 21 | 22 | """ 23 | def evaluate(self, al, axis=None, *, points=None): 24 | """Evaluate the basis coefficients at sampling points. 25 | 26 | Arguments: 27 | al (array): 28 | Array where the `l`-th item along `axis` corresponds to the 29 | `l`-th basis coefficient 30 | axis (integer): 31 | Axis or dimension of `al` along which to evaluate the function. 32 | Defaults to the last, i.e., rightmost axis. 33 | points (vector): 34 | Points on which the results should be evaluated. Defaults 35 | to the sampling points for which the sampling objects was 36 | created. 37 | 38 | .. versionadded:: 1.1 39 | 40 | Return: 41 | Array where the `n`-th item along `axis` corresponds to the 42 | value on the `n`-th sampling point (or value on `point[n]`, if 43 | given.) 44 | 45 | Note: 46 | If `points` is given, a new sampling is created at each invocation, 47 | which can result in a performance hit. Consider caching sampling 48 | objects or simply using the `.u()` and `.uhat()` methods of the 49 | underlying basis. 50 | """ 51 | if points is not None: 52 | return self._for_sampling_points(points).evaluate(al, axis) 53 | 54 | return self.matrix.matmul(al, axis) 55 | 56 | def fit(self, ax, axis=None, *, points=None): 57 | """Fit the basis coefficients from the sampling points. 58 | 59 | Arguments: 60 | ax (array): 61 | Array where the `n`-th item along `axis` corresponds to the 62 | value on the `n`-th sampling point (or value on `point[n]`, if 63 | given.) 64 | axis (integer): 65 | Axis or dimension of `ax` along which to fit the function. 66 | Defaults to the last, i.e., rightmost axis. 67 | points (vector): 68 | Points on which the `ax` is given. Defaults to the sampling 69 | points for which the sampling objects was created. 70 | 71 | .. versionadded:: 1.1 72 | 73 | Return: 74 | Array where the `l`-th item along `axis` corresponds to the 75 | `l`-th basis coefficient 76 | 77 | Note: 78 | If `points` is given, a new sampling is created at each invocation, 79 | which can result in a performance hit. Consider caching sampling 80 | objects. 81 | """ 82 | if points is not None: 83 | return self._for_sampling_points(points).fit(ax, axis) 84 | 85 | matrix = self.matrix 86 | if self.basis.is_well_conditioned and not (matrix.cond <= 1e8): 87 | warn(f"Sampling matrix is poorly conditioned " 88 | f"(kappa = {matrix.cond:.2g})", ConditioningWarning) 89 | 90 | return matrix.lstsq(ax, axis) 91 | 92 | @property 93 | def cond(self): 94 | """Condition number of the fitting problem""" 95 | return self.matrix.cond 96 | 97 | @property 98 | def sampling_points(self): 99 | """Set of sampling points""" 100 | raise NotImplementedError() 101 | 102 | @property 103 | def matrix(self): 104 | """Evaluation matrix is decomposed form""" 105 | raise NotImplementedError() 106 | 107 | @property 108 | def basis(self): 109 | """Basis instance""" 110 | raise NotImplementedError() 111 | 112 | def _for_sampling_points(self, x): 113 | raise RuntimeError("Changing sampling points is not possible") 114 | 115 | 116 | class TauSampling(AbstractSampling): 117 | """Sparse sampling in imaginary time. 118 | 119 | Allows the transformation between the IR basis and a set of sampling points 120 | in (scaled/unscaled) imaginary time. 121 | """ 122 | def __init__(self, basis, sampling_points=None): 123 | if sampling_points is None: 124 | sampling_points = basis.default_tau_sampling_points() 125 | else: 126 | sampling_points = np.asarray(sampling_points) 127 | if sampling_points.ndim != 1: 128 | raise ValueError("sampling points must be vector") 129 | 130 | matrix = basis.u(sampling_points).T 131 | self._basis = basis 132 | self._sampling_points = sampling_points 133 | self._matrix = DecomposedMatrix(matrix) 134 | 135 | @property 136 | def basis(self): return self._basis 137 | 138 | @property 139 | def sampling_points(self): return self._sampling_points 140 | 141 | @property 142 | def matrix(self): return self._matrix 143 | 144 | @property 145 | def tau(self): 146 | """Sampling points in (reduced) imaginary time""" 147 | return self._sampling_points 148 | 149 | def _for_sampling_points(self, x): 150 | x = np.asarray(x) 151 | return TauSampling(self._basis, x) 152 | 153 | 154 | class MatsubaraSampling(AbstractSampling): 155 | """Sparse sampling in Matsubara frequencies. 156 | 157 | Allows the transformation between the IR basis and a set of sampling points 158 | in (scaled/unscaled) imaginary frequencies. 159 | 160 | By setting ``positive_only=True``, one assumes that functions to be fitted 161 | are symmetric in Matsubara frequency, i.e.:: 162 | 163 | Ghat(iv) == Ghat(-iv).conj() 164 | 165 | or equivalently, that they are purely real in imaginary time. In this 166 | case, sparse sampling is performed over non-negative frequencies only, 167 | cutting away half of the necessary sampling space. 168 | """ 169 | def __init__(self, basis, sampling_points=None, *, positive_only=False): 170 | if sampling_points is None: 171 | sampling_points = basis.default_matsubara_sampling_points( 172 | positive_only=positive_only) 173 | else: 174 | sampling_points = _util.check_reduced_matsubara(sampling_points) 175 | if sampling_points.ndim != 1: 176 | raise ValueError("sampling points must be vector") 177 | sampling_points = np.sort(sampling_points) 178 | if positive_only and not sampling_points[0] >= 0: 179 | raise ValueError("invalid negative sampling frequencies") 180 | 181 | matrix = basis.uhat(sampling_points).T 182 | self._basis = basis 183 | self._sampling_points = sampling_points 184 | self._positive_only = positive_only 185 | 186 | if positive_only: 187 | ssvd_result = _split_complex(matrix, sampling_points[0] == 0) 188 | self._matrix = SplitDecomposedMatrix(matrix, ssvd_result) 189 | else: 190 | self._matrix = DecomposedMatrix(matrix) 191 | 192 | @property 193 | def basis(self): return self._basis 194 | 195 | @property 196 | def sampling_points(self): return self._sampling_points 197 | 198 | @property 199 | def matrix(self): return self._matrix 200 | 201 | @property 202 | def positive_only(self): 203 | """Sampling is performed only on non-negative sampling frequencies""" 204 | return self._positive_only 205 | 206 | @property 207 | def wn(self): 208 | """Sampling points as (reduced) Matsubara frequencies""" 209 | return self._sampling_points 210 | 211 | def _for_sampling_points(self, x): 212 | x = np.asarray(x) 213 | return MatsubaraSampling(self._basis, x, 214 | positive_only=self._positive_only) 215 | 216 | 217 | class DecomposedMatrix: 218 | """Matrix in SVD decomposed form for fast and accurate fitting. 219 | 220 | Stores a matrix ``A`` together with its thin SVD form:: 221 | 222 | A == (u * s) @ vH. 223 | 224 | This allows for fast and accurate least squares fits using ``A.lstsq(x)``. 225 | """ 226 | def __init__(self, a, svd_result=None): 227 | a = np.asarray(a) 228 | if a.ndim != 2: 229 | raise ValueError("a must be of matrix form") 230 | if svd_result is None: 231 | u, s, vH = np.linalg.svd(a, full_matrices=False) 232 | else: 233 | u, s, vH = _util.check_svd_result(svd_result, a.shape) 234 | 235 | # Remove singular values which are exactly zero 236 | where = s.astype(bool) 237 | if not where.all(): 238 | u, s, vH = u[:, where], s[where], vH[where] 239 | 240 | self._a = a 241 | self._uH = np.array(u.conj().T) 242 | self._s = s 243 | self._v = np.array(vH.conj().T) 244 | 245 | def __matmul__(self, x): 246 | """Matrix-matrix multiplication.""" 247 | return self._a @ x 248 | 249 | def matmul(self, x, axis=None): 250 | """Compute ``A @ x`` (optionally along specified axis of x)""" 251 | return _matop_along_axis(self._a.__matmul__, x, axis) 252 | 253 | def _lstsq(self, x): 254 | r = self._uH @ x 255 | r = r / (self._s[:, None] if r.ndim > 1 else self._s) 256 | return self._v @ r 257 | 258 | def lstsq(self, x, axis=None): 259 | """Return ``y`` such that ``np.linalg.norm(A @ y - x)`` is minimal""" 260 | return _matop_along_axis(self._lstsq, x, axis) 261 | 262 | def __array__(self, dtype=""): 263 | """Convert to numpy array.""" 264 | return self._a if dtype == "" else self._a.astype(dtype) 265 | 266 | @property 267 | def a(self): 268 | """Full matrix""" 269 | return self._a 270 | 271 | @property 272 | def u(self): 273 | """Left singular vectors, aranged column-wise""" 274 | return self._uH.conj().T 275 | 276 | @property 277 | def s(self): 278 | """Most significant, nonzero singular values""" 279 | return self._s 280 | 281 | @property 282 | def vH(self): 283 | """Right singular vectors, transposed""" 284 | return self._v.conj().T 285 | 286 | @property 287 | def cond(self): 288 | """Condition number of matrix""" 289 | return self._s[0] / self._s[-1] 290 | 291 | 292 | class SplitDecomposedMatrix: 293 | """Matrix in "split" SVD decomposed form for fast and accurate fitting. 294 | 295 | Stores a matrix ``A`` together with its "split SVD" form:: 296 | 297 | A == u * s @ vT 298 | 299 | where `vT` is a real matrix and `u` is a complex matrix. The "split" SVD 300 | form differs from the SVD in that the least squares fit has to be 301 | constructed as follows: 302 | 303 | fit(A, x) == vT.T / s @ (u.conj().T * x).real 304 | 305 | This again allows for fast and accurate least squares fits using 306 | ``A.lstsq(x)``. This is useful in the case where. 307 | """ 308 | def __init__(self, a, ssvd_result): 309 | a = np.asarray(a) 310 | if a.ndim != 2: 311 | raise ValueError("a must be of matrix form") 312 | 313 | u, s, vT = _util.check_svd_result(ssvd_result, a.shape) 314 | if np.issubdtype(vT.dtype, np.complexfloating): 315 | raise ValueError("Split SVD part vT shall be a real matrix") 316 | if not np.issubdtype(u.dtype, np.complexfloating): 317 | raise ValueError("Split SVD part u shall be a complex matrix") 318 | 319 | # Remove singular values which are exactly zero 320 | where = s.astype(bool) 321 | if not where.all(): 322 | u, s, vT = u[:, where], s[where], vT[where] 323 | 324 | self._a = a 325 | self._urealT = np.array(u.T.real) 326 | self._uimagT = np.array(u.T.imag) 327 | self._s = s 328 | self._v = np.array(vT.T) 329 | 330 | def __matmul__(self, x): 331 | """Matrix-matrix multiplication.""" 332 | x = np.asarray(x) 333 | if np.issubdtype(x.dtype, np.complexfloating): 334 | warn("Expecting array of real numbers in expansion", UserWarning, 2) 335 | return self._a @ x 336 | 337 | def matmul(self, x, axis=None): 338 | """Compute ``A @ x`` (optionally along specified axis of x)""" 339 | return _matop_along_axis(self._a.__matmul__, x, axis) 340 | 341 | def _lstsq(self, x): 342 | r = self._urealT @ x.real 343 | r += self._uimagT @ x.imag 344 | r = r / (self._s[:, None] if r.ndim > 1 else self._s) 345 | return self._v @ r 346 | 347 | def lstsq(self, x, axis=None): 348 | """Return ``y`` such that ``np.linalg.norm(A @ y - x)`` is minimal""" 349 | return _matop_along_axis(self._lstsq, x, axis) 350 | 351 | def __array__(self, dtype=""): 352 | """Convert to numpy array.""" 353 | return self._a if dtype == "" else self._a.astype(dtype) 354 | 355 | @property 356 | def a(self): 357 | """Full matrix""" 358 | return self._a 359 | 360 | @property 361 | def u(self): 362 | """Split left singular vectors, aranged column-wise""" 363 | return (self._urealT + 1j * self._uimagT).T 364 | 365 | @property 366 | def s(self): 367 | """Most significant, nonzero singular values""" 368 | return self._s 369 | 370 | @property 371 | def vH(self): 372 | """Right singular vectors, transposed""" 373 | return self._v.conj().T 374 | 375 | @property 376 | def cond(self): 377 | """Condition number of matrix""" 378 | return self._s[0] / self._s[-1] 379 | 380 | 381 | class ConditioningWarning(RuntimeWarning): 382 | """Warns about a poorly conditioned problem. 383 | 384 | This warning is issued if the library detects a poorly conditioned fitting 385 | problem. This essentially means there is a high degree of ambiguity in how 386 | to choose the solution. One must therefore expect to lose significant 387 | precision in the parameter values. 388 | """ 389 | pass 390 | 391 | 392 | def _matop_along_axis(op, x, axis=None): 393 | if axis is None: 394 | return op(x) 395 | 396 | x = np.asarray(x) 397 | target_axis = max(x.ndim - 2, 0) 398 | x = np.moveaxis(x, axis, target_axis) 399 | r = op(x) 400 | return np.moveaxis(r, target_axis, axis) 401 | 402 | 403 | def _split_complex(mat, has_zero=False, svd_algo=np.linalg.svd): 404 | mat = np.asarray(mat) 405 | n, _l = mat.shape 406 | if not np.issubdtype(mat.dtype, np.complexfloating): 407 | raise ValueError("mat must be complex matrix") 408 | 409 | # split real and imaginary part into separate matrices 410 | offset_imag = 1 if has_zero else 0 411 | rmat = np.vstack((mat.real, mat[offset_imag:].imag)) 412 | 413 | # perform real-valued SVD 414 | ur, s, vT = svd_algo(rmat, full_matrices=False) 415 | 416 | # undo the split of the resulting vT vector 417 | u = np.empty((n, s.size), mat.dtype) 418 | u.real = ur[:n] 419 | u[:offset_imag].imag = 0 420 | u[offset_imag:].imag = ur[n:] 421 | return u, s, vT 422 | -------------------------------------------------------------------------------- /src/sparse_ir/svd.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | from warnings import warn 4 | import numpy as np 5 | import scipy.linalg.interpolative as intp_decomp 6 | 7 | try: 8 | from xprec import ddouble as _ddouble, finfo 9 | import xprec.linalg as _xprec_linalg 10 | 11 | MAX_DTYPE = _ddouble 12 | MAX_EPS = 5e-32 13 | except ImportError: 14 | _ddouble = None 15 | _xprec_linalg = None 16 | 17 | MAX_DTYPE = np.double 18 | MAX_EPS = np.finfo(MAX_DTYPE).eps 19 | finfo = np.finfo 20 | 21 | try: 22 | from scipy.linalg.lapack import dgejsv as _lapack_dgejsv 23 | except ImportError: 24 | _lapack_dgejsv = None 25 | 26 | 27 | def compute(a_matrix, n_sv_hint=None, strategy='fast'): 28 | """Compute thin/truncated singular value decomposition 29 | 30 | Computes the thin/truncated singular value decomposition of a matrix ``A`` 31 | into ``U``, ``s``, ``V``: 32 | 33 | A == (U * s) @ V.T 34 | 35 | Depending on the strategy, only as few as ``n_sv_hint`` most significant 36 | singular values may be returned, but applications should not rely on this 37 | behvaiour. The ``strategy`` parameter can be ``fast`` (RRQR/t-SVD), 38 | ``default`` (full SVD) or ``accurate`` (Jacobi rotation SVD). 39 | """ 40 | a_matrix = np.asarray(a_matrix) 41 | m, n = a_matrix.shape 42 | if n_sv_hint is None: 43 | n_sv_hint = min(m, n) 44 | n_sv_hint = min(m, n, n_sv_hint) 45 | 46 | if _ddouble is not None and a_matrix.dtype == _ddouble: 47 | u, s, v = _ddouble_svd_trunc(a_matrix) 48 | elif strategy == 'fast': 49 | u, s, v = _idsvd(a_matrix, n_sv=n_sv_hint) 50 | elif strategy == 'default': 51 | # Usual (simple) SVD 52 | u, s, vh = np.linalg.svd(a_matrix, full_matrices=False) 53 | v = vh.T.conj() 54 | elif strategy == 'accurate': 55 | # Most accurate SVD 56 | if _lapack_dgejsv is None: 57 | warn("dgejsv (accurate SVD) is not available. Falling back to\n" 58 | "default SVD. Expect slightly lower precision.\n" 59 | "Use xprec or scipy >= 1.5 to fix the issue.") 60 | return compute(a_matrix, n_sv_hint, strategy='default') 61 | u, s, v = _dgejsv(a_matrix, mode='F') 62 | else: 63 | raise ValueError("invalid strategy:" + str(strategy)) 64 | 65 | return u, s, v 66 | 67 | 68 | def _idsvd(a, n_sv): 69 | # Use interpolative decomposition, since it scales favorably to a full 70 | # SVD when we are only interested in a small subset of singular values. 71 | # NOTE: this returns the right singular vectors, not their conjugate! 72 | intp_decomp.seed(4711) 73 | return intp_decomp.svd(a, n_sv) 74 | 75 | 76 | def _dgejsv(a, mode='A'): 77 | """Compute SVD using the (more accurate) Jacobi method""" 78 | # GEJSV can only handle tall matrices 79 | m, n = a.shape 80 | if m < n: 81 | u, s, v = _dgejsv(a.T, mode) 82 | return v, s, u 83 | 84 | mode = mode.upper() 85 | joba = dict(zip("CEFGAR", range(6)))[mode] 86 | s, u, v, _stat, istat, info = _lapack_dgejsv(a, joba) 87 | if info < 0: 88 | raise ValueError("LAPACK error - invalid parameter") 89 | if istat[2] != 0: 90 | warn("a contained denormalized floats - possible loss of accuracy", 91 | UserWarning, 2) 92 | if info > 0: 93 | warn("SVD did not converge", UserWarning, 2) 94 | return u, s, v 95 | 96 | 97 | def _ddouble_svd_trunc(a): 98 | """Truncated SVD with double double precision""" 99 | if _xprec_linalg is None: 100 | raise RuntimeError("require xprec package for this precision") 101 | u, s, vh = _xprec_linalg.svd_trunc(a) 102 | return u, s, vh.T 103 | -------------------------------------------------------------------------------- /src/sparse_ir/sve.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | from warnings import warn 4 | import numpy as np 5 | 6 | from . import _gauss 7 | from . import poly 8 | from . import svd 9 | from . import kernel 10 | 11 | HAVE_XPREC = svd._ddouble is not None 12 | 13 | 14 | def compute(K, eps=None, cutoff=None, n_sv=None, n_gauss=None, dtype=float, 15 | work_dtype=None, sve_strat=None, svd_strat=None): 16 | """Perform truncated singular value expansion of a kernel. 17 | 18 | Perform a truncated singular value expansion (SVE) of an integral 19 | kernel ``K : [xmin, xmax] x [ymin, ymax] -> R``:: 20 | 21 | K(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in (0, 1, 2, ...)), 22 | 23 | where ``s[l]`` are the singular values, which are ordered in non-increasing 24 | fashion, ``u[l](x)`` are the left singular functions, which form an 25 | orthonormal system on ``[xmin, xmax]``, and ``v[l](y)`` are the right 26 | singular functions, which form an orthonormal system on ``[ymin, ymax]``. 27 | 28 | The SVE is mapped onto the singular value decomposition (SVD) of a matrix 29 | by expanding the kernel in piecewise Legendre polynomials (by default by 30 | using a collocation). 31 | 32 | Arguments: 33 | K (kernel.AbstractKernel): 34 | Integral kernel to take SVE from 35 | eps (float): 36 | Accuracy target for the basis: attempt to have singular values down 37 | to a relative magnitude of ``eps``, and have each singular value 38 | and singular vector be accurate to ``eps``. A ``work_dtype`` with 39 | a machine epsilon of ``eps**2`` or lower is required to satisfy 40 | this. Defaults to ``2.2e-16`` if xprec is available, and ``1e-8`` 41 | otherwise. 42 | cutoff (float): 43 | Relative cutoff for the singular values. A ``work_dtype`` with 44 | machine epsilon of ``cutoff`` is required to satisfy this. 45 | Defaults to a small multiple of the machine epsilon. 46 | 47 | Note that ``cutoff`` and ``eps`` serve distinct purposes. ``cutoff`` 48 | reprsents the accuracy to which the kernel is reproduced, whereas 49 | ``eps`` is the accuracy to which the singular values and vectors 50 | are guaranteed. 51 | n_sv (int): 52 | Maximum basis size. If given, only at most the ``n_sv`` most 53 | significant singular values and associated singular functions are 54 | returned. 55 | n_gauss (int): 56 | Order of Legendre polynomials. Defaults to kernel hinted value. 57 | dtype (np.dtype): 58 | Data type of the result. 59 | work_dtype (np.dtype): 60 | Working data type. Defaults to a data type with machine epsilon of 61 | at most ``eps**2`` and at most ``cutoff``, or otherwise most 62 | accurate data type available. 63 | sve_strat (AbstractSVE): 64 | SVE to SVD translation strategy. Defaults to ``SamplingSVE``, 65 | optionally wrapped inside of a ``CentrosymmSVE`` if the kernel 66 | is centrosymmetric. 67 | svd_strat ('fast' or 'default' or 'accurate'): 68 | SVD solver. Defaults to fast (ID/RRQR) based solution 69 | when accuracy goals are moderate, and more accurate Jacobi-based 70 | algorithm otherwise. 71 | 72 | Returns: 73 | An ``SVEResult`` containing the truncated singular value expansion. 74 | """ 75 | safe_eps, work_dtype, svd_strat = _safe_eps(eps, work_dtype, svd_strat) 76 | if sve_strat is None: 77 | sve_strat = CentrosymmSVE if K.is_centrosymmetric else SamplingSVE 78 | if cutoff is None: 79 | cutoff = 2 * svd.finfo(work_dtype).eps 80 | sve = sve_strat(K, safe_eps, n_gauss=n_gauss, dtype=work_dtype) 81 | u, s, v = zip(*(svd.compute(matrix, sve.nsvals_hint, svd_strat) 82 | for matrix in sve.matrices)) 83 | u, s, v = _truncate(u, s, v, cutoff, n_sv) 84 | return sve.postprocess(u, s, v, dtype) 85 | 86 | 87 | class AbstractSVE: 88 | """Truncated singular value expansion (SVE) of an integral kernel. 89 | 90 | Given an integral kernel `K`, this provides methods for computing its 91 | truncated singular value expansion (SVE), given by:: 92 | 93 | K(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in range(L)), 94 | 95 | where `L` is the truncation, `u[l](x)` is the `l`-th left singular 96 | function, `s[l]` is the `l`-th singular value, and `v[l](y)` is the `l`-th 97 | right singular function. The left and right singular functions form 98 | orthonormal systems on their respective spaces. 99 | 100 | Computing the SVE involves introducing two sets of basis functions on the 101 | `x` and `y` axis and then translating the SVE into one or more matrices, 102 | the computing the singular value decomposition of those matrices, and 103 | finally postprocessing the data. 104 | """ 105 | @property 106 | def matrices(self): 107 | """SVD problems underlying the SVE.""" 108 | raise NotImplementedError() 109 | 110 | def postprocess(self, u, s, v, dtype=None): 111 | """Constructs the SVE result from the SVD""" 112 | raise NotImplementedError() 113 | 114 | 115 | class SVEResult: 116 | """Result of singular value expansion""" 117 | def __init__(self, u, s, v, K, eps=None): 118 | self.u = u 119 | self.s = s 120 | self.v = v 121 | 122 | # In addition to its SVE, we remember the type of kernel and also the 123 | # accuracy to which the SVE was computed. 124 | self.K = K 125 | self.eps = eps 126 | 127 | def part(self, eps=None, max_size=None): 128 | if eps is None: 129 | eps = self.eps 130 | cut = (self.s >= eps * self.s[0]).sum() 131 | if max_size is not None and max_size < cut: 132 | cut = max_size 133 | if cut == self.s.size: 134 | return self.u, self.s, self.v 135 | else: 136 | return self.u[:cut], self.s[:cut], self.v[:cut] 137 | 138 | def __iter__(self): 139 | return iter((self.u, self.s, self.v)) 140 | 141 | 142 | class SamplingSVE(AbstractSVE): 143 | """SVE to SVD translation by sampling technique [1]. 144 | 145 | Maps the singular value expansion (SVE) of a kernel ``K`` onto the singular 146 | value decomposition of a matrix ``A``. This is achieved by chosing two 147 | sets of Gauss quadrature rules: ``(x, wx)`` and ``(y, wy)`` and 148 | approximating the integrals in the SVE equations by finite sums. This 149 | implies that the singular values of the SVE are well-approximated by the 150 | singular values of the following matrix:: 151 | 152 | A[i, j] = sqrt(wx[i]) * K(x[i], y[j]) * sqrt(wy[j]) 153 | 154 | and the values of the singular functions at the Gauss sampling points can 155 | be reconstructed from the singular vectors ``u`` and ``v`` as follows:: 156 | 157 | u[l,i] ≈ sqrt(wx[i]) u[l](x[i]) 158 | v[l,j] ≈ sqrt(wy[j]) u[l](y[j]) 159 | 160 | [1] P. Hansen, Discrete Inverse Problems, Ch. 3.1 161 | """ 162 | def __init__(self, K, eps, *, n_gauss=None, dtype=float): 163 | self.K = K 164 | sve_hints = K.sve_hints(eps) 165 | if n_gauss is None: 166 | n_gauss = sve_hints.ngauss 167 | 168 | self.eps = eps 169 | self.n_gauss = n_gauss 170 | self.nsvals_hint = sve_hints.nsvals 171 | self._rule = _gauss.legendre(n_gauss, dtype) 172 | self._segs_x = sve_hints.segments_x.astype(dtype) 173 | self._segs_y = sve_hints.segments_y.astype(dtype) 174 | self._gauss_x = self._rule.piecewise(self._segs_x) 175 | self._gauss_y = self._rule.piecewise(self._segs_y) 176 | self._sqrtw_x = np.sqrt(self._gauss_x.w) 177 | self._sqrtw_y = np.sqrt(self._gauss_y.w) 178 | 179 | @property 180 | def matrices(self): 181 | result = kernel.matrix_from_gauss(self.K, self._gauss_x, self._gauss_y) 182 | result *= self._sqrtw_x[:, None] 183 | result *= self._sqrtw_y[None, :] 184 | return result, 185 | 186 | def postprocess(self, u, s, v, dtype=None): 187 | u, = u 188 | s, = s 189 | v, = v 190 | if dtype is None: 191 | dtype = np.result_type(u, s, v) 192 | 193 | s = s.astype(dtype) 194 | u_x = u / self._sqrtw_x[:,None] 195 | v_y = v / self._sqrtw_y[:,None] 196 | 197 | u_x = u_x.reshape(self._segs_x.size - 1, self.n_gauss, s.size) 198 | v_y = v_y.reshape(self._segs_y.size - 1, self.n_gauss, s.size) 199 | 200 | cmat = _gauss.legendre_collocation(self._rule) 201 | # lx,ixs -> ils -> lis 202 | u_data = (cmat @ u_x).transpose(1, 0, 2) 203 | v_data = (cmat @ v_y).transpose(1, 0, 2) 204 | 205 | dsegs_x = self._segs_x[1:] - self._segs_x[:-1] 206 | dsegs_y = self._segs_y[1:] - self._segs_y[:-1] 207 | u_data *= np.sqrt(.5 * dsegs_x)[None,:,None] 208 | v_data *= np.sqrt(.5 * dsegs_y)[None,:,None] 209 | 210 | # Construct polynomial 211 | ulx = poly.PiecewiseLegendrePoly( 212 | u_data.astype(dtype), self._segs_x.astype(dtype)) 213 | vly = poly.PiecewiseLegendrePoly( 214 | v_data.astype(dtype), self._segs_y.astype(dtype)) 215 | _canonicalize(ulx, vly) 216 | return SVEResult(ulx, s, vly, self.K, self.eps) 217 | 218 | 219 | class CentrosymmSVE(AbstractSVE): 220 | """SVE of centrosymmetric kernel in block-diagonal (even/odd) basis. 221 | 222 | For a centrosymmetric kernel ``K``, i.e., a kernel satisfying: 223 | ``K(x, y) == K(-x, -y)``, one can make the following ansatz for the 224 | singular functions:: 225 | 226 | u[l](x) = ured[l](x) + sign[l] * ured[l](-x) 227 | v[l](y) = vred[l](y) + sign[l] * ured[l](-y) 228 | 229 | where ``sign[l]`` is either +1 or -1. This means that the singular value 230 | expansion can be block-diagonalized into an even and an odd part by 231 | (anti-)symmetrizing the kernel:: 232 | 233 | Keven = K(x, y) + K(x, -y) 234 | Kodd = K(x, y) - K(x, -y) 235 | 236 | The l-th basis function, restricted to the positive interval, is then 237 | the singular function of one of these kernels. If the kernel generates a 238 | Chebyshev system [1], then even and odd basis functions alternate. 239 | 240 | [1]: A. Karlin, Total Positivity (1968). 241 | """ 242 | def __init__(self, K, eps, *, InnerSVE=None, **inner_args): 243 | if InnerSVE is None: 244 | InnerSVE = SamplingSVE 245 | self.K = K 246 | self.eps = eps 247 | 248 | # Inner kernels for even and odd functions 249 | self.even = InnerSVE(K.get_symmetrized(+1), eps, **inner_args) 250 | self.odd = InnerSVE(K.get_symmetrized(-1), eps, **inner_args) 251 | 252 | # Now extract the hints 253 | self.nsvals_hint = max(self.even.nsvals_hint, self.odd.nsvals_hint) 254 | 255 | @property 256 | def matrices(self): 257 | m, = self.even.matrices 258 | yield m 259 | m, = self.odd.matrices 260 | yield m 261 | 262 | def postprocess(self, u, s, v, dtype): 263 | u_even, s_even, v_even = self.even.postprocess(u[:1], s[:1], v[:1], dtype) 264 | u_odd, s_odd, v_odd = self.odd.postprocess(u[1:], s[1:], v[1:], dtype) 265 | 266 | # Merge two sets - data is [legendre, segment, l] 267 | u_data = np.concatenate([u_even.data, u_odd.data], axis=2) 268 | v_data = np.concatenate([v_even.data, v_odd.data], axis=2) 269 | s = np.concatenate([s_even, s_odd]) 270 | signs = np.concatenate([np.ones(s_even.size), -np.ones(s_odd.size)]) 271 | 272 | # Sort: now for totally positive kernels like defined in this module, 273 | # this strictly speaking is not necessary as we know that the even/odd 274 | # functions intersperse. 275 | sort = s.argsort()[::-1] 276 | u_data = u_data[:, :, sort] 277 | v_data = v_data[:, :, sort] 278 | s = s[sort] 279 | signs = signs[sort] 280 | 281 | # Extend to the negative side 282 | inv_sqrt2 = 1/np.sqrt(np.array(2, dtype=u_data.dtype)) 283 | u_data *= inv_sqrt2 284 | v_data *= inv_sqrt2 285 | poly_flip_x = ((-1)**np.arange(u_data.shape[0]))[:, None, None] 286 | u_neg = u_data[:, ::-1, :] * poly_flip_x * signs 287 | v_neg = v_data[:, ::-1, :] * poly_flip_x * signs 288 | u_data = np.concatenate([u_neg, u_data], axis=1) 289 | v_data = np.concatenate([v_neg, v_data], axis=1) 290 | 291 | # TODO: this relies on specific symmetrization behaviour ... 292 | full_hints = self.K.sve_hints(self.eps) 293 | u = poly.PiecewiseLegendrePoly(u_data, full_hints.segments_x, symm=signs) 294 | v = poly.PiecewiseLegendrePoly(v_data, full_hints.segments_y, symm=signs) 295 | return SVEResult(u, s, v, self.K, self.eps) 296 | 297 | 298 | def _safe_eps(eps_required, work_dtype, svd_strat): 299 | # First, choose the working dtype based on the eps required 300 | if work_dtype is None: 301 | if eps_required is None or eps_required < 1e-8: 302 | work_dtype = svd.MAX_DTYPE 303 | else: 304 | work_dtype = np.float64 305 | else: 306 | work_dtype = np.dtype(work_dtype) 307 | 308 | # Next, work out the actual epsilon 309 | if work_dtype == np.float64: 310 | # This is technically a bit too low (the true value is about 1.5e-8), 311 | # but it's not too far off and easier to remember for the user. 312 | safe_eps = 1e-8 313 | else: 314 | safe_eps = np.sqrt(svd.finfo(work_dtype).eps) 315 | 316 | # Work out the SVD strategy to be used. If the user sets this, we 317 | # assume they know what they are doing and do not warn if they compute 318 | # the basis. 319 | warn_acc = False 320 | if svd_strat is None: 321 | if eps_required is not None and eps_required < safe_eps: 322 | svd_strat = 'accurate' 323 | warn_acc = True 324 | else: 325 | svd_strat = 'fast' 326 | 327 | if warn_acc: 328 | msg = (f"\n" 329 | f"Requested accuracy is {eps_required:.2g}, which is below the\n" 330 | f"accuracy {safe_eps:.2g} for the work data type {work_dtype}.\n" 331 | f"Expect singular values and basis functions for large l to\n" 332 | f"have lower precision than the cutoff.\n") 333 | if not HAVE_XPREC: 334 | msg += "Install the xprec package to gain more precision.\n" 335 | warn(msg, UserWarning, 3) 336 | 337 | return safe_eps, work_dtype, svd_strat 338 | 339 | 340 | def _canonicalize(ulx, vly): 341 | """Canonicalize basis. 342 | 343 | Each SVD (u_l, v_l) pair is unique only up to a global phase, which may 344 | differ from implementation to implementation and also platform. We 345 | fix that gauge by demanding u_l(1) > 0. This ensures a diffeomorphic 346 | connection to the Legendre polynomials for lambda_ -> 0. 347 | """ 348 | gauge = np.sign(ulx(1)) 349 | ulx.data[None, None, :] *= 1/gauge 350 | vly.data[None, None, :] *= gauge 351 | 352 | 353 | def _truncate(u, s, v, rtol=0, lmax=None): 354 | """Truncate singular value expansion. 355 | 356 | Arguments: 357 | 358 | - ``u``, ``s``, ``v``: Thin singular value expansion 359 | - ``rtol`` : If given, only singular values satisfying 360 | ``s[l]/s[0] > rtol`` are retained. 361 | - ``lmax`` : If given, at most the ``lmax`` most significant singular 362 | values are retained. 363 | """ 364 | if lmax is not None and (lmax < 0 or int(lmax) != lmax): 365 | raise ValueError("invalid value of maximum number of singular values") 366 | if not (0 <= rtol <= 1): 367 | raise ValueError("invalid relative tolerance") 368 | 369 | sall = np.hstack(s) 370 | 371 | # Determine singular value cutoff. Note that by selecting a cutoff even 372 | # in the case of lmax, we make sure to never remove parts of a degenerate 373 | # singular value space, rather, we reduce the size of the basis. 374 | ssort = np.sort(sall) 375 | cutoff = rtol * ssort[-1] 376 | if lmax is not None and lmax < sall.size: 377 | cutoff = max(cutoff, ssort[sall.size - lmax - 1]) 378 | 379 | # Determine how many singular values survive in each group 380 | scount = [(si > cutoff).sum() for si in s] 381 | 382 | u_cut = [ui[:, :counti] for (ui, counti) in zip(u, scount)] 383 | s_cut = [si[:counti] for (si, counti) in zip(s, scount)] 384 | v_cut = [vi[:, :counti] for (vi, counti) in zip(v, scount)] 385 | return u_cut, s_cut, v_cut 386 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2021 Markus Wallerberger and others 2 | # SPDX-License-Identifier: MIT 3 | # 4 | # This file is available from EVERY test in the directory. This is why 5 | # we use it to compute the bases ONCE. 6 | import pytest 7 | import sparse_ir 8 | 9 | 10 | @pytest.fixture(scope="package") 11 | def sve_logistic(): 12 | """SVE of the logistic kernel for Lambda = 42""" 13 | print("Precomputing SVEs for logistic kernel ...") 14 | return { 15 | 10: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10)), 16 | 42: sparse_ir.compute_sve(sparse_ir.LogisticKernel(42)), 17 | 10_000: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10_000)) 18 | } 19 | 20 | 21 | @pytest.fixture(scope="package") 22 | def sve_reg_bose(): 23 | """SVE of the logistic kernel for Lambda = 42""" 24 | print("Precomputing SVEs for regularized Bose kernel ...") 25 | return { 26 | 10: sparse_ir.compute_sve(sparse_ir.RegularizedBoseKernel(10)), 27 | 10_000: sparse_ir.compute_sve(sparse_ir.RegularizedBoseKernel(10_000)) 28 | } 29 | -------------------------------------------------------------------------------- /test/test_augment.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | import sparse_ir 5 | from sparse_ir import poly 6 | from sparse_ir import augment 7 | 8 | import pytest 9 | 10 | 11 | def test_augmented_bosonic_basis(): 12 | """Augmented bosonic basis""" 13 | wmax = 2 14 | beta = 1000 15 | basis = sparse_ir.FiniteTempBasis("B", beta, wmax, eps=1e-6) 16 | basis_comp = augment.AugmentedBasis(basis, augment.TauConst, augment.TauLinear) 17 | 18 | # G(tau) = c - e^{-tau*pole}/(1 - e^{-beta*pole}) 19 | pole = 1.0 20 | const = 1e-2 21 | tau_smpl = sparse_ir.TauSampling(basis_comp) 22 | assert tau_smpl.sampling_points.size == basis_comp.size 23 | gtau = const + basis.u(tau_smpl.tau).T @ (-basis.s * basis.v(pole)) 24 | magn = np.abs(gtau).max() 25 | 26 | # This illustrates that "naive" fitting is a problem if the fitting matrix 27 | # is not well-conditioned. 28 | gl_fit_bad = np.linalg.pinv(tau_smpl.matrix) @ gtau 29 | gtau_reconst_bad = tau_smpl.evaluate(gl_fit_bad) 30 | assert not np.allclose(gtau_reconst_bad, gtau, atol=1e-13 * magn, rtol=0) 31 | np.testing.assert_allclose(gtau_reconst_bad, gtau, 32 | atol=5e-16 * tau_smpl.cond * magn, rtol=0) 33 | 34 | # Now do the fit properly 35 | gl_fit = tau_smpl.fit(gtau) 36 | gtau_reconst = tau_smpl.evaluate(gl_fit) 37 | np.testing.assert_allclose(gtau_reconst, gtau, atol=1e-14 * magn, rtol=0) 38 | 39 | 40 | @pytest.mark.parametrize("stat", ["F", "B"]) 41 | def test_vertex_basis(stat): 42 | """Vertex basis""" 43 | wmax = 2 44 | beta = 1000 45 | basis = sparse_ir.FiniteTempBasis(stat, beta, wmax, eps=1e-6) 46 | basis_comp = augment.AugmentedBasis(basis, augment.MatsubaraConst) 47 | assert basis_comp.uhat is not None 48 | 49 | # G(iv) = c + 1/(iv-pole) 50 | pole = 1.0 51 | c = 1.0 52 | matsu_smpl = sparse_ir.MatsubaraSampling(basis_comp) 53 | giv = c + 1/(1J*matsu_smpl.sampling_points * np.pi/beta - pole) 54 | gl = matsu_smpl.fit(giv) 55 | 56 | giv_reconst = matsu_smpl.evaluate(gl) 57 | 58 | np.testing.assert_allclose(giv, giv_reconst, 59 | atol=np.abs(giv).max() * 1e-7, rtol=0) 60 | -------------------------------------------------------------------------------- /test/test_basis_set.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2021 Markus Wallerberger and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | 5 | from sparse_ir import FiniteTempBasisSet,\ 6 | TauSampling, MatsubaraSampling, finite_temp_bases 7 | import pytest 8 | 9 | def test_consistency(sve_logistic): 10 | beta = 2 11 | wmax = 5 12 | eps = 1e-5 13 | 14 | sve_result = sve_logistic[beta * wmax] 15 | basis_f, basis_b = finite_temp_bases(beta, wmax, eps, sve_result) 16 | smpl_tau_f = TauSampling(basis_f) 17 | smpl_tau_b = TauSampling(basis_b) 18 | smpl_wn_f = MatsubaraSampling(basis_f) 19 | smpl_wn_b = MatsubaraSampling(basis_b) 20 | 21 | bs = FiniteTempBasisSet(beta, wmax, eps, sve_result=sve_result) 22 | np.testing.assert_array_equal(smpl_tau_f.sampling_points, smpl_tau_b.sampling_points) 23 | np.testing.assert_array_equal(bs.smpl_tau_f.matrix.a, smpl_tau_f.matrix.a) 24 | np.testing.assert_array_equal(bs.smpl_tau_b.matrix.a, smpl_tau_b.matrix.a) 25 | 26 | np.testing.assert_array_equal(bs.smpl_wn_f.matrix.a, smpl_wn_f.matrix.a) 27 | np.testing.assert_array_equal(bs.smpl_wn_b.matrix.a, smpl_wn_b.matrix.a) 28 | -------------------------------------------------------------------------------- /test/test_compare.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | import pytest 5 | 6 | from sparse_ir import adapter 7 | 8 | try: 9 | import irbasis 10 | except ImportError: 11 | pytest.skip("no irbasis library for comparison", allow_module_level=True) 12 | raise 13 | 14 | COMPARE_PARAMS = [ 15 | ('F', 10), 16 | ('F', 10_000), 17 | ('B', 10), 18 | ('B', 10_000), 19 | ] 20 | 21 | # Return of fixture functions are passed as parameters to test functions 22 | # if the parameter has the same name as the fixture function. scope="module" 23 | # ensures that fixtures are cached. 24 | 25 | @pytest.fixture(scope="module") 26 | def adapters(sve_logistic, sve_reg_bose): 27 | table = {'F': sve_logistic, 'B': sve_reg_bose} 28 | return {(stat, lambda_): adapter.Basis(stat, lambda_, table[stat][lambda_]) 29 | for (stat, lambda_) in COMPARE_PARAMS} 30 | 31 | 32 | @pytest.fixture(scope="module") 33 | def references(): 34 | return {params: irbasis.load(*params) for params in COMPARE_PARAMS} 35 | 36 | 37 | @pytest.mark.parametrize("stat,lambda_", COMPARE_PARAMS) 38 | def test_svals(stat, lambda_, adapters, references): 39 | adapt = adapters[stat, lambda_] 40 | ref = references[stat, lambda_] 41 | shared = slice(min(adapt.dim(), ref.dim())) 42 | eps = np.finfo(float).eps 43 | 44 | assert adapt.statistics == ref.statistics == stat 45 | assert adapt.Lambda == ref.Lambda == lambda_ 46 | assert adapt.dim() > 10 47 | 48 | np.testing.assert_allclose(adapt.sl(shared), ref.sl(shared), 49 | atol=10 * ref.sl(0) * eps, rtol=0) 50 | 51 | @pytest.mark.parametrize("stat,lambda_", COMPARE_PARAMS) 52 | def test_vly(stat, lambda_, adapters, references): 53 | adapt = adapters[stat, lambda_] 54 | ref = references[stat, lambda_] 55 | shared_dim = min(adapt.dim(), ref.dim()) 56 | 57 | y = [-1., -.5, -.1, -.01, -.001, -.0001, 0, .0001, .001, .01, .1, .5, 1.] 58 | for li in [0, 1, 2, shared_dim//2, shared_dim//2 + 1, shared_dim-1]: 59 | uly_adapt = adapt.vly(li, y) 60 | uly_ref = ref.vly(li, y) 61 | tol = 1e-10 * np.abs(uly_ref).max() * ref.sl(0) / ref.sl(li) 62 | np.testing.assert_allclose(uly_adapt, uly_ref, atol=tol, rtol=0) 63 | 64 | 65 | @pytest.mark.parametrize("stat,lambda_", COMPARE_PARAMS) 66 | def test_ulx2(stat, lambda_, adapters, references): 67 | adapt = adapters[stat, lambda_] 68 | ref = references[stat, lambda_] 69 | shared_dim = min(adapt.dim(), ref.dim()) 70 | 71 | x = [-1., -.9999, -.999, -.99, -.9, -.5, 0., .5, .9, .99, .999, .9999, 1.] 72 | for li in [0, 1, 2, shared_dim//2, shared_dim//2 + 1, shared_dim-1]: 73 | uly_adapt = adapt.ulx(li, x) 74 | uly_ref = ref.ulx(li, x) 75 | tol = 1e-12 * np.abs(uly_ref).max() * ref.sl(0) / ref.sl(li) 76 | np.testing.assert_allclose(uly_adapt, uly_ref, atol=tol, rtol=0) 77 | 78 | 79 | @pytest.mark.parametrize("stat,lambda_", COMPARE_PARAMS) 80 | def test_ulx(stat, lambda_, adapters, references): 81 | adapt = adapters[stat, lambda_] 82 | ref = references[stat, lambda_] 83 | shared_dim = min(adapt.dim(), ref.dim()) 84 | 85 | n = [-20, -2, -1, 0, 1, 2, 20, 100, 300, 1000] 86 | for li in [0, 1, 2, shared_dim//2, shared_dim//2 + 1, shared_dim-1]: 87 | ulxhat_adapt = adapt.compute_unl(n, li) 88 | ulxhat_ref = ref.compute_unl(n, li).ravel() 89 | tol = 1e-13 * np.abs(ulxhat_ref).max() * ref.sl(0) / ref.sl(li) 90 | np.testing.assert_allclose(ulxhat_adapt, ulxhat_ref, atol=tol, rtol=0) 91 | 92 | 93 | @pytest.mark.parametrize("stat,lambda_", COMPARE_PARAMS) 94 | def test_matasubara_sampling(stat, lambda_, adapters, references): 95 | adapt = adapters[stat, lambda_] 96 | ref = references[stat, lambda_] 97 | shared_dim = min(adapt.dim(), ref.dim()) 98 | zeta = {'F': 1, 'B': 0}[stat] 99 | 100 | l = shared_dim - 1 101 | 102 | sp_adapt = adapt.sampling_points_matsubara(whichl=shared_dim-1) 103 | u_adapt = adapt.compute_unl(sp_adapt, shared_dim-1) 104 | u_adapt_real = u_adapt.real if l % 2 == zeta else u_adapt.imag 105 | 106 | sp_ref = ref.sampling_points_matsubara(whichl=shared_dim-1) 107 | u_ref = ref.compute_unl(sp_ref, shared_dim-1) 108 | u_ref_real = u_ref.real if l % 2 == zeta else u_ref.imag 109 | 110 | num_sign_changes_adapt = np.sum(u_adapt_real[:-1] * u_adapt_real[1:] < 0) 111 | num_sign_changes_ref = np.sum(u_ref_real[:-1] * u_ref_real[1:] < 0) 112 | assert num_sign_changes_adapt == num_sign_changes_ref 113 | 114 | 115 | @pytest.mark.parametrize("stat,lambda_", COMPARE_PARAMS) 116 | def test_sampling_x(stat, lambda_, adapters, references): 117 | adapt = adapters[stat, lambda_] 118 | ref = references[stat, lambda_] 119 | shared_dim = min(adapt.dim(), ref.dim()) 120 | sp_adapt = adapt.sampling_points_x(whichl=shared_dim-1) 121 | sp_ref = ref.sampling_points_x(whichl=shared_dim-1) 122 | np.testing.assert_allclose(sp_adapt, sp_ref, rtol=0, atol=1e-8) 123 | 124 | 125 | @pytest.mark.parametrize("stat,lambda_", COMPARE_PARAMS) 126 | def test_sampling_y(stat, lambda_, adapters, references): 127 | adapt = adapters[stat, lambda_] 128 | ref = references[stat, lambda_] 129 | shared_dim = min(adapt.dim(), ref.dim()) 130 | sp_adapt = adapt.sampling_points_y(whichl=shared_dim-1) 131 | sp_ref = ref.sampling_points_y(whichl=shared_dim-1) 132 | np.testing.assert_allclose(sp_adapt, sp_ref, rtol=0, atol=1e-8) 133 | -------------------------------------------------------------------------------- /test/test_dlr.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import sparse_ir 4 | from sparse_ir.dlr import DiscreteLehmannRepresentation 5 | from sparse_ir.sampling import MatsubaraSampling, TauSampling 6 | import numpy as np 7 | import pytest 8 | 9 | 10 | """ 11 | Model: 12 | G(iv) = sum_p c_p U^{SPR}(iv, ω_p), 13 | where 14 | Fermion: 15 | U^{SPR}(iv, ω_p) = 1/(iv - ω_p) 16 | 17 | Boson: 18 | U^{SPR}(iv, ω_p) = w_p/(iv - ω_p) 19 | with w_p = tanh(0.5*β*ω_p) 20 | """ 21 | @pytest.mark.parametrize("stat", ["F", "B"]) 22 | def test_compression(sve_logistic, stat): 23 | beta = 10_000 24 | wmax = 1 25 | eps = 1e-12 26 | basis = sparse_ir.FiniteTempBasis(stat, beta, wmax, eps=eps, 27 | sve_result=sve_logistic[beta*wmax]) 28 | dlr = DiscreteLehmannRepresentation(basis) 29 | 30 | np.random.seed(4711) 31 | 32 | num_poles = 10 33 | poles = wmax * (2*np.random.rand(num_poles) - 1) 34 | coeffs = 2*np.random.rand(num_poles) - 1 35 | assert np.abs(poles).max() <= wmax 36 | 37 | Gl = DiscreteLehmannRepresentation(basis, poles).to_IR(coeffs) 38 | 39 | g_dlr = dlr.from_IR(Gl) 40 | 41 | # Comparison on Matsubara frequencies 42 | smpl = MatsubaraSampling(basis) 43 | smpl_for_dlr = MatsubaraSampling(dlr, smpl.sampling_points) 44 | 45 | giv = smpl_for_dlr.evaluate(g_dlr) 46 | 47 | giv_ref = smpl.evaluate(Gl, axis=0) 48 | 49 | np.testing.assert_allclose(giv, giv_ref, atol=300*eps, rtol=0) 50 | 51 | # Comparison on tau 52 | smpl_tau = TauSampling(basis) 53 | gtau = smpl_tau.evaluate(Gl) 54 | 55 | smpl_tau_for_dlr = TauSampling(dlr) 56 | gtau2 = smpl_tau_for_dlr.evaluate(g_dlr) 57 | 58 | np.testing.assert_allclose(gtau, gtau2, atol=300*eps, rtol=0) 59 | 60 | 61 | def test_boson(sve_logistic): 62 | beta = 2 63 | wmax = 21 64 | eps = 1e-7 65 | basis_b = sparse_ir.FiniteTempBasis("B", beta, wmax, eps=eps, 66 | sve_result=sve_logistic[beta * wmax]) 67 | 68 | # G(iw) = sum_p coeff_p U^{SPR}(iw, omega_p) 69 | coeff = np.array([1.1, 2.0]) 70 | omega_p = np.array([2.2, -1.0]) 71 | 72 | rhol_pole = np.einsum('lp,p->l', basis_b.v(omega_p), coeff) 73 | gl_pole = - basis_b.s * rhol_pole 74 | 75 | sp = DiscreteLehmannRepresentation(basis_b, omega_p) 76 | gl_pole2 = sp.to_IR(coeff) 77 | 78 | np.testing.assert_allclose(gl_pole, gl_pole2, atol=300*eps, rtol=0) 79 | -------------------------------------------------------------------------------- /test/test_gauss.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | import numpy.polynomial.legendre as np_legendre 5 | 6 | from sparse_ir import _gauss 7 | 8 | 9 | def test_collocate(): 10 | r = _gauss.legendre(20) 11 | cmat = _gauss.legendre_collocation(r) 12 | emat = np_legendre.legvander(r.x, r.x.size-1) 13 | np.testing.assert_allclose(emat.dot(cmat), np.eye(20), atol=1e-13, rtol=0) 14 | 15 | 16 | def _gauss_validate(rule): 17 | if not (rule.a <= rule.b): 18 | raise ValueError("a,b must be a valid interval") 19 | if not (rule.x <= rule.b).all(): 20 | raise ValueError("x must be smaller than b") 21 | if not (rule.x >= rule.a).all(): 22 | raise ValueError("x must be larger than a") 23 | if not (rule.x[:-1] < rule.x[1:]).all(): 24 | raise ValueError("x must be well-ordered") 25 | if rule.x.shape != rule.w.shape: 26 | raise ValueError("shapes are inconsistent") 27 | 28 | np.testing.assert_allclose(rule.x_forward, rule.x - rule.a) 29 | np.testing.assert_allclose(rule.x_backward, rule.b - rule.x) 30 | 31 | def test_gauss_leg(): 32 | rule = _gauss.legendre(200) 33 | _gauss_validate(rule) 34 | x, w = np.polynomial.legendre.leggauss(200) 35 | np.testing.assert_allclose(rule.x, x) 36 | np.testing.assert_allclose(rule.w, w) 37 | 38 | 39 | def test_piecewise(): 40 | edges = [-4, -1, 1, 3] 41 | rule = _gauss.legendre(20).piecewise(edges) 42 | _gauss_validate(rule) 43 | -------------------------------------------------------------------------------- /test/test_kernel.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | import pytest 5 | 6 | from sparse_ir import kernel 7 | from sparse_ir import _gauss 8 | 9 | KERNELS = [ 10 | kernel.LogisticKernel(9), 11 | kernel.RegularizedBoseKernel(8), 12 | kernel.LogisticKernel(120_000), 13 | kernel.RegularizedBoseKernel(127_500), 14 | kernel.LogisticKernel(40_000).get_symmetrized(-1), 15 | kernel.RegularizedBoseKernel(35_000).get_symmetrized(-1), 16 | ] 17 | 18 | 19 | @pytest.mark.parametrize("K", KERNELS) 20 | def test_accuracy(K): 21 | dtype = np.float32 22 | dtype_x = np.float64 23 | 24 | rule = _gauss.legendre(10, dtype) 25 | hints = K.sve_hints(2.2e-16) 26 | gauss_x = rule.piecewise(hints.segments_x) 27 | gauss_y = rule.piecewise(hints.segments_y) 28 | eps = np.finfo(dtype).eps 29 | tiny = np.finfo(dtype).tiny / eps 30 | 31 | result = kernel.matrix_from_gauss(K, gauss_x, gauss_y) 32 | result_x = kernel.matrix_from_gauss( 33 | K, gauss_x.astype(dtype_x), gauss_y.astype(dtype_x)) 34 | magn = np.abs(result_x).max() 35 | np.testing.assert_allclose(result, result_x, atol = 2 * magn * eps, rtol=0, 36 | err_msg="absolute precision too poor") 37 | 38 | with np.errstate(invalid='ignore'): 39 | reldiff = np.where(np.abs(result_x) < tiny, 1, result / result_x) 40 | np.testing.assert_allclose(reldiff, 1, atol=100 * eps, rtol=0, 41 | err_msg="relative precision too poor") 42 | 43 | @pytest.mark.parametrize("lambda_", [10, 42, 10_000]) 44 | def test_singularity(lambda_): 45 | x = np.random.rand(1000) * 2 - 1 46 | K = kernel.RegularizedBoseKernel(lambda_) 47 | np.testing.assert_allclose(K(x, [0.0]), 1 / lambda_) 48 | -------------------------------------------------------------------------------- /test/test_poly.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | from argparse import ArgumentError 4 | from _pytest.mark import param 5 | import numpy as np 6 | 7 | import sparse_ir 8 | from sparse_ir import poly 9 | from scipy.integrate import quad 10 | 11 | import pytest 12 | 13 | 14 | def test_shape(sve_logistic): 15 | u, s, v = sve_logistic[42].part() 16 | l = s.size 17 | assert u.shape == (l,) 18 | 19 | assert u[3].shape == () 20 | assert u[2:5].shape == (3,) 21 | 22 | 23 | def test_slice(sve_logistic): 24 | sve_result = sve_logistic[42] 25 | 26 | basis = sparse_ir.FiniteTempBasis('F', 4.2, 10, sve_result=sve_result) 27 | assert basis[:4].size == 4 28 | 29 | 30 | @pytest.mark.parametrize("fn", ["u", "v"]) 31 | def test_broadcast_uv(sve_logistic, fn): 32 | sve_result = sve_logistic[42] 33 | basis = sparse_ir.FiniteTempBasis('F', 4.2, 10, sve_result=sve_result) 34 | 35 | f = getattr(basis, fn) 36 | assert_eq = np.testing.assert_array_equal 37 | 38 | l = [1, 2, 4] 39 | x = [0.5, 0.3, 1.0, 2.0] 40 | 41 | # Broadcast over x 42 | assert_eq(f[1](x), [f[1](xi) for xi in x]) 43 | 44 | # Broadcast over l 45 | assert_eq(f[l](x[0]), [f[li](x[0]) for li in l]) 46 | 47 | # Broadcast over both l, x 48 | assert_eq(f[l](x), np.reshape([f[li](xi) for li in l for xi in x], (3, 4))) 49 | 50 | # Tensorial 51 | assert_eq(f[l](np.reshape(x, (2, 2))), f[l](x).reshape(3, 2, 2)) 52 | 53 | 54 | def test_broadcast_uhat(sve_logistic): 55 | sve_result = sve_logistic[42] 56 | basis = sparse_ir.FiniteTempBasis('B', 4.2, 10, sve_result=sve_result) 57 | 58 | f = basis.uhat 59 | def assert_eq(x, y): np.testing.assert_allclose(x, y, rtol=0, atol=1e-15) 60 | 61 | l = [1, 2, 4] 62 | x = [-2, 8, 4, 6] 63 | 64 | # Broadcast over x 65 | assert_eq(f[1](x), [f[1](xi) for xi in x]) 66 | 67 | # Broadcast over l 68 | assert_eq(f[l](x[0]), [f[li](x[0]) for li in l]) 69 | 70 | # Broadcast over both l, x 71 | assert_eq(f[l](x), np.reshape([f[li](xi) for li in l for xi in x], (3, 4))) 72 | 73 | # Tensorial 74 | assert_eq(f[l](np.reshape(x, (2, 2))), f[l](x).reshape(3, 2, 2)) 75 | 76 | 77 | def test_violate(sve_logistic): 78 | u, s, v = sve_logistic[42].part() 79 | 80 | with pytest.raises(ValueError): 81 | u(1.5) 82 | with pytest.raises(ValueError): 83 | v(-3.0) 84 | 85 | 86 | def test_eval(sve_logistic): 87 | u, s, v = sve_logistic[42].part() 88 | l = s.size 89 | 90 | # evaluate 91 | np.testing.assert_array_equal( 92 | u(0.4), [u[i](0.4) for i in range(l)]) 93 | np.testing.assert_array_equal( 94 | u([0.4, -0.2]), 95 | [[u[i](x) for x in (0.4, -0.2)] for i in range(l)]) 96 | 97 | 98 | def test_broadcast(sve_logistic): 99 | u, s, v = sve_logistic[42].part() 100 | 101 | x = [0.3, 0.5] 102 | l = [2, 7] 103 | np.testing.assert_array_equal( 104 | u.value(l, x), [u[ll](xx) for (ll, xx) in zip(l, x)]) 105 | 106 | 107 | def test_matrix_hat(sve_logistic): 108 | u, s, v = sve_logistic[42].part() 109 | uhat = poly.PiecewiseLegendreFT(u, "odd") 110 | 111 | n = np.array([1, 3, 5, -1, -3, 5]) 112 | result = uhat(n.reshape(3, 2)) 113 | result_iter = uhat(n).reshape(-1, 3, 2) 114 | assert result.shape == result_iter.shape 115 | np.testing.assert_array_equal(result, result_iter) 116 | 117 | 118 | @pytest.mark.parametrize("lambda_, atol", [(42, 1e-13), (1E+4, 1e-13)]) 119 | def test_overlap(sve_logistic, lambda_, atol): 120 | u, s, v = sve_logistic[lambda_].part() 121 | 122 | # Keep only even number of polynomials 123 | u, s, v = u[:2*(s.size//2)], s[:2*(s.size//2)], v[:2*(s.size//2)] 124 | 125 | np.testing.assert_allclose(u[0].overlap(u[0]), 1, rtol=0, atol=atol) 126 | 127 | ref = (np.arange(s.size) == 0).astype(float) 128 | np.testing.assert_allclose(u.overlap(u[0]), ref, rtol=0, atol=atol) 129 | 130 | 131 | @pytest.mark.parametrize("lambda_, atol", [(42, 1e-13), (1E+4, 1e-13)]) 132 | def test_overlap_break_points(sve_logistic, lambda_, atol): 133 | u, s, v = sve_logistic[lambda_].part() 134 | 135 | D = 0.5 * v.xmax 136 | rhow = lambda omega: np.where(abs(omega)<=D, 1, 0) 137 | rhol = v.overlap(rhow, points=[-D, D]) 138 | rhol_ref = [quad(v[l], -D, D)[0] for l in range(v.size)] 139 | 140 | np.testing.assert_allclose(rhol, rhol_ref, rtol=0, atol=1e-12*np.abs(rhol_ref).max()) 141 | 142 | 143 | def test_eval_unique(sve_logistic): 144 | u, s, v = sve_logistic[42].part() 145 | uhat = poly.PiecewiseLegendreFT(u, "odd") 146 | 147 | # evaluate 148 | res1 = uhat(np.array([1, 3, 3, 1])) 149 | idx = np.array([0, 1, 1, 0]) 150 | res2 = uhat(np.array([1,3]))[:,idx] 151 | np.testing.assert_array_equal(res1, res2) 152 | -------------------------------------------------------------------------------- /test/test_sampling.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | import pytest 5 | 6 | import sparse_ir 7 | from sparse_ir import sampling 8 | from sparse_ir.basis import FiniteTempBasis 9 | 10 | 11 | def test_decomp(): 12 | rng = np.random.RandomState(4711) 13 | A = rng.randn(49, 39) 14 | 15 | Ad = sampling.DecomposedMatrix(A) 16 | norm_A = Ad.s[0] / Ad.s[-1] 17 | np.testing.assert_allclose(A, np.asarray(Ad), atol=1e-15 * norm_A, rtol=0) 18 | 19 | x = rng.randn(39) 20 | np.testing.assert_allclose(A @ x, Ad @ x, atol=1e-14 * norm_A, rtol=0) 21 | 22 | x = rng.randn(39, 3) 23 | np.testing.assert_allclose(A @ x, Ad @ x, atol=1e-14 * norm_A, rtol=0) 24 | 25 | y = rng.randn(49) 26 | np.testing.assert_allclose(np.linalg.lstsq(A, y, rcond=None)[0], 27 | Ad.lstsq(y), atol=1e-14 * norm_A, rtol=0) 28 | 29 | y = rng.randn(49, 2) 30 | np.testing.assert_allclose(np.linalg.lstsq(A, y, rcond=None)[0], 31 | Ad.lstsq(y), atol=1e-14 * norm_A, rtol=0) 32 | 33 | 34 | def test_axis(): 35 | rng = np.random.RandomState(4712) 36 | A = rng.randn(17, 21) 37 | 38 | Ad = sampling.DecomposedMatrix(A) 39 | norm_A = Ad.s[0] / Ad.s[-1] 40 | 41 | x = rng.randn(2, 21, 4, 7) 42 | ref = np.tensordot(A, x, (-1,1)).transpose((1,0,2,3)) 43 | np.testing.assert_allclose( 44 | Ad.matmul(x, axis=1), ref, 45 | atol=1e-13 * norm_A, rtol=0) 46 | 47 | 48 | def test_axis0(): 49 | rng = np.random.RandomState(4712) 50 | A = rng.randn(17, 21) 51 | 52 | Ad = sampling.DecomposedMatrix(A) 53 | norm_A = Ad.s[0] / Ad.s[-1] 54 | 55 | x = rng.randn(21, 2) 56 | 57 | np.testing.assert_allclose( 58 | Ad.matmul(x, axis=0), A@x, 59 | atol=1e-13 * norm_A, rtol=0) 60 | 61 | np.testing.assert_allclose( 62 | Ad.matmul(x), A@x, 63 | atol=1e-13 * norm_A, rtol=0) 64 | 65 | 66 | @pytest.mark.parametrize("stat, lambda_", [('B', 42), ('F', 42)]) 67 | def test_tau_noise(sve_logistic, stat, lambda_): 68 | basis = sparse_ir.FiniteTempBasis(stat, 1, lambda_, 69 | sve_result=sve_logistic[lambda_]) 70 | smpl = sparse_ir.TauSampling(basis) 71 | rng = np.random.RandomState(4711) 72 | 73 | rhol = basis.v([-.999, -.01, .5]) @ [0.8, -.2, 0.5] 74 | Gl = basis.s * rhol 75 | Gl_magn = np.linalg.norm(Gl) 76 | Gtau = smpl.evaluate(Gl) 77 | 78 | noise = 1e-5 79 | Gtau_n = Gtau + noise * np.linalg.norm(Gtau) * rng.randn(*Gtau.shape) 80 | Gl_n = smpl.fit(Gtau_n) 81 | 82 | np.testing.assert_allclose(Gl, Gl_n, atol=12 * noise * Gl_magn, rtol=0) 83 | 84 | 85 | @pytest.mark.parametrize("stat, lambda_", [('B', 42), ('F', 42)]) 86 | @pytest.mark.parametrize("positive_only", [False, True]) 87 | def test_wn_noise(sve_logistic, stat, lambda_, positive_only): 88 | basis = sparse_ir.FiniteTempBasis(stat, 1, lambda_, 89 | sve_result=sve_logistic[lambda_]) 90 | smpl = sparse_ir.MatsubaraSampling(basis, positive_only=positive_only) 91 | rng = np.random.RandomState(4711) 92 | 93 | rhol = basis.v([-.999, -.01, .5]) @ [0.8, -.2, 0.5] 94 | Gl = basis.s * rhol 95 | Gl_magn = np.linalg.norm(Gl) 96 | Giw = smpl.evaluate(Gl) 97 | 98 | noise = 1e-5 99 | Giw_n = Giw + noise * np.linalg.norm(Giw) * rng.randn(*Giw.shape) 100 | Gl_n = smpl.fit(Giw_n) 101 | np.testing.assert_allclose(Gl, Gl_n, 102 | atol=12 * np.sqrt(1 + positive_only) * noise * Gl_magn, rtol=0) 103 | 104 | 105 | @pytest.mark.parametrize("stat, lambda_", [('F', 42)]) 106 | @pytest.mark.parametrize("positive_only", [False, True]) 107 | def test_wn_eval_other(sve_logistic, stat, lambda_, positive_only): 108 | basis = sparse_ir.FiniteTempBasis(stat, 1, lambda_, 109 | sve_result=sve_logistic[lambda_]) 110 | smpl = sparse_ir.MatsubaraSampling(basis, positive_only=positive_only) 111 | 112 | n2 = [1, 3, 7] 113 | smpl2 = sparse_ir.MatsubaraSampling(basis, sampling_points=n2) 114 | 115 | rhol = basis.v([+.998, -.01, .5]) @ [0.8, -.2, 0.5] 116 | Gl = basis.s * rhol 117 | Gl_magn = np.linalg.norm(Gl) 118 | np.testing.assert_allclose(smpl.evaluate(Gl, points=n2), smpl2.evaluate(Gl), 119 | rtol=1e-15 * Gl_magn) 120 | 121 | 122 | @pytest.mark.parametrize("stat, lambda_", [('F', 42)]) 123 | def test_tau_eval_other(sve_logistic, stat, lambda_): 124 | basis = sparse_ir.FiniteTempBasis(stat, 1, lambda_, 125 | sve_result=sve_logistic[lambda_]) 126 | smpl = sparse_ir.TauSampling(basis) 127 | 128 | n2 = (0.1, 0.4) 129 | smpl2 = sparse_ir.TauSampling(basis, sampling_points=n2) 130 | 131 | rhol = basis.v([+.998, -.01, .5]) @ [0.8, -.2, 0.5] 132 | Gl = basis.s * rhol 133 | Gl_magn = np.linalg.norm(Gl) 134 | np.testing.assert_allclose(smpl.evaluate(Gl, points=n2), smpl2.evaluate(Gl), 135 | rtol=1e-15 * Gl_magn) 136 | -------------------------------------------------------------------------------- /test/test_scipost_sample_code.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | 4 | 5 | # Sample codes in the SciPost review paper 6 | 7 | def test_sample2(): 8 | # Compute IR basis for fermions and \beta = 100 and \omega_max = 10 9 | import sparse_ir 10 | import numpy as np 11 | 12 | lambda_ = 1000 13 | beta = 100 14 | wmax = lambda_/beta 15 | eps = 1e-8 # cut-off value for singular values 16 | b = sparse_ir.FiniteTempBasis('F', beta, wmax, eps=eps) 17 | 18 | x = y = 0.1 19 | tau = 0.5 * beta * (x+1) 20 | omega = wmax * y 21 | 22 | # All singular values 23 | print("singular values: ", b.s) 24 | print("U_0(0.1)", b.u[0](tau)) 25 | print("V_0(0.1)", b.v[0](omega)) 26 | 27 | print("n-th derivative of U_l(tau) and V_l(omega)") 28 | for n in range(1,3): 29 | u_n = b.u.deriv(n) 30 | v_n = b.v.deriv(n) 31 | print(" n= ", n, u_n[0](tau)) 32 | print(" n= ", n, v_n[0](omega)) 33 | 34 | # Compute u_{ln} as a matrix for the first 35 | # 10 non-nagative fermionic Matsubara frequencies 36 | # Fermionic/bosonic frequencies are denoted by odd/even integers. 37 | hatF_t = b.uhat(2*np.arange(10)+1) 38 | print(hatF_t.shape) 39 | 40 | def test_sample3(): 41 | import sparse_ir 42 | import numpy as np 43 | from numpy.fft import fftn, ifftn 44 | 45 | beta = 1e+3 46 | lambda_ = 1e+5 47 | 48 | wmax = lambda_/beta 49 | eps = 1e-15 50 | print("wmax", wmax) 51 | 52 | b = sparse_ir.FiniteTempBasis('F', beta , wmax, eps=eps) 53 | print("Number of basis functions", b.size) 54 | 55 | # Sparse sampling in tau 56 | smpl_tau = sparse_ir.TauSampling(b) 57 | 58 | # Sparse sampling in Matsubara frequencies 59 | smpl_matsu = sparse_ir.MatsubaraSampling(b) 60 | 61 | # Parameters 62 | nk_lin = 64 63 | U, kps = 2.0, np.array([nk_lin, nk_lin]) 64 | nw = smpl_matsu.sampling_points.size 65 | ntau = smpl_tau.sampling_points.size 66 | 67 | # Generate k mesh and non-interacting band energies 68 | nk = np.prod(kps) 69 | kgrid = [2*np.pi*np.arange(kp)/kp for kp in kps] 70 | k1, k2 = np.meshgrid(*kgrid, indexing='ij') 71 | ek = -2*(np.cos(k1) + np.cos(k2)) 72 | iw = 1j*np.pi*smpl_matsu.sampling_points/beta 73 | 74 | # G(iw, k): (nw, nk) 75 | gkf = 1.0 / (iw[:,None] - ek.ravel()[None,:]) 76 | 77 | # G(l, k): (L, nk) 78 | gkl = smpl_matsu.fit(gkf) 79 | 80 | # G(tau, k): (ntau, nk) 81 | gkt = smpl_tau.evaluate(gkl) 82 | 83 | # G(tau, r): (ntau, nk) 84 | grt = np.fft.fftn(gkt.reshape(ntau, *kps), axes=(1,2)).\ 85 | reshape(ntau, nk) 86 | 87 | # Sigma(tau, r): (ntau, nk) 88 | srt = U*U*grt*grt*grt[::-1,:] 89 | 90 | # Sigma(l, r): (L, nk) 91 | srl = smpl_tau.fit(srt) 92 | 93 | # Sigma(iw, r): (nw, nk) 94 | srf = smpl_matsu.evaluate(srl) 95 | 96 | # Sigma(l, r): (L, nk) 97 | srl = smpl_tau.fit(srt) 98 | 99 | # Sigma(iw, r): (nw, nk) 100 | srf = smpl_matsu.evaluate(srl) 101 | 102 | # Sigma(iw, k): (nw, kps[0], kps[1]) 103 | srf = srf.reshape(nw, *kps) 104 | skf = ifftn(srf, axes=(1,2))/nk**2 105 | -------------------------------------------------------------------------------- /test/test_sve.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import numpy as np 4 | import sparse_ir 5 | 6 | import pytest 7 | 8 | 9 | def _check_smooth(u, s, uscale, fudge_factor): 10 | eps = np.finfo(s.dtype).eps 11 | x = u.knots[1:-1] 12 | 13 | jump = np.abs(u(x + eps) - u(x - eps)) 14 | compare = np.abs(u(x + 3 * eps) - u(x + eps)) 15 | compare = np.maximum(compare, uscale * eps) 16 | 17 | # loss of precision 18 | compare *= fudge_factor * (s[0] / s)[:, None] 19 | try: 20 | np.testing.assert_array_less(jump, compare) 21 | except: 22 | print((jump > compare).nonzero()) 23 | raise 24 | 25 | 26 | @pytest.mark.parametrize("lambda_", [10, 42, 10_000]) 27 | def test_smooth(sve_logistic, lambda_): 28 | basis = sparse_ir.FiniteTempBasis('F', 1, lambda_, 29 | sve_result=sve_logistic[lambda_]) 30 | _check_smooth(basis.u, basis.s, 2*basis.u(1).max(), 24) 31 | _check_smooth(basis.v, basis.s, 50, 20) 32 | 33 | 34 | @pytest.mark.parametrize("lambda_", [10, 42, 10_000]) 35 | def test_num_roots_u(sve_logistic, lambda_): 36 | basis = sparse_ir.FiniteTempBasis('F', 1, lambda_, 37 | sve_result=sve_logistic[lambda_]) 38 | for i in range(basis.u.size): 39 | ui_roots = basis.u[i].roots() 40 | assert ui_roots.size == i 41 | 42 | 43 | @pytest.mark.parametrize("stat", ['F', 'B']) 44 | @pytest.mark.parametrize("lambda_", [10, 42, 10_000]) 45 | def test_num_roots_uhat(sve_logistic, stat, lambda_): 46 | basis = sparse_ir.FiniteTempBasis(stat, 1, lambda_, 47 | sve_result=sve_logistic[lambda_]) 48 | for i in [0, 1, 7, 10]: 49 | x0 = basis.uhat[i].extrema() 50 | assert i + 1 <= x0.size <= i + 2 51 | 52 | 53 | @pytest.mark.parametrize("stat", ['F', 'B']) 54 | @pytest.mark.parametrize("lambda_", [10, 42, 10_000]) 55 | def test_accuracy(sve_logistic, stat, lambda_): 56 | basis = sparse_ir.FiniteTempBasis(stat, 4, lambda_, 57 | sve_result=sve_logistic[lambda_]) 58 | 59 | assert 0 < basis.accuracy <= basis.significance[-1] 60 | assert basis.significance[0] == 1 61 | assert basis.accuracy <= basis.s[-1] / basis.s[0] 62 | -------------------------------------------------------------------------------- /test/test_whitespace.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020-2022 Markus Wallerberger, Hiroshi Shinaoka, and others 2 | # SPDX-License-Identifier: MIT 3 | import os 4 | 5 | HEREPATH = os.path.abspath(os.path.dirname(__file__)) 6 | ROOTDIR = os.path.abspath(os.path.join(HEREPATH, os.path.pardir)) 7 | SRCDIR = os.path.join(ROOTDIR, "src", "sparse_ir") 8 | DOCDIR = os.path.join(ROOTDIR, "doc") 9 | 10 | 11 | def check_whitespace(files): 12 | errors = [] 13 | blank = 0 14 | lineno = 0 15 | line = "" 16 | def add_error(fmt, *params): 17 | errors.append((fname, lineno, line, fmt.format(*params))) 18 | 19 | for fname in files: 20 | with open(fname, "r") as file: 21 | line = "" 22 | for lineno, line in enumerate(file, start=1): 23 | if line[-1:] != '\n': 24 | add_error("file must end in blank line") 25 | line = line[:-1] 26 | if line: 27 | blank = 0 28 | else: 29 | blank += 1 30 | if line[-1:] == '\r': 31 | add_error("file must only have unix line endings") 32 | if line[-1:] == ' ': 33 | add_error("line ends in whitespace") 34 | if '\t' in line: 35 | add_error("line contains tab characters") 36 | if len(line) > 90: 37 | add_error("line is too long: {:d} chars", len(line)) 38 | # end of file 39 | if blank != 0: 40 | add_error("file has {:d} superflouos blank lines", blank) 41 | 42 | msg = "" 43 | for fname, lineno, line, lmsg in errors: 44 | msg += "{}:{}: {}\n".format(fname.name, lineno, lmsg) 45 | if msg: 46 | raise ValueError("Whitespace errors\n" + msg) 47 | 48 | 49 | def all_files(path, ext): 50 | for entry in os.scandir(path): 51 | if entry.is_file() and entry.name.endswith(ext): 52 | yield entry 53 | 54 | 55 | def test_ws_testdir(): 56 | check_whitespace(all_files(HEREPATH, ".py")) 57 | 58 | 59 | def test_ws_srcdir(): 60 | check_whitespace(all_files(SRCDIR, ".py")) 61 | 62 | 63 | def test_ws_setup(): 64 | check_whitespace(all_files(ROOTDIR, ".py")) 65 | 66 | 67 | def test_ws_doc(): 68 | check_whitespace(all_files(DOCDIR, ".rst")) 69 | --------------------------------------------------------------------------------