├── .gitattributes
├── .gitignore
├── .travis.yml
├── CMakeLists.txt
├── LICENSE.TXT
├── README.md
├── c++
└── irbasis.hpp
├── database
├── irbasis.h5
└── make_h5.py
├── doc
├── unl.pdf
└── unl.tex
├── python
├── CMakeLists.txt
├── __init__.py
├── irbasis.py
├── requirements.txt
├── setup.cfg
└── setup.py
├── sample
├── api.cpp
├── api.py
├── compile.sh
├── computing_gl.py
├── giwn.py
├── how_to_use_from_Julia.ipynb
├── run_all_python_scripts.sh
├── singular_values.ipynb
├── step_by_step_examples.cpp
├── step_by_step_examples.ipynb
├── uv.py
└── zeros.ipynb
├── script
└── gauss_legendre.py
├── test
├── CMakeLists.txt
├── c++
│ ├── CMakeLists.txt
│ ├── dummy.cpp
│ ├── gtest-all.cc
│ ├── gtest.h
│ ├── gtest_main.cc
│ ├── hdf5.cpp
│ ├── hdf5_test.h5
│ ├── interpolation.cpp
│ ├── mk_hdf5_test_h5.py
│ └── multi_array.cpp
└── python
│ ├── .gitignore
│ ├── CMakeLists.txt
│ ├── check_ulx_vly.py
│ ├── check_unl.py
│ ├── sparse_sampling.py
│ └── utility.py
└── version
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.py text eol=lf
2 | *.hpp text eol=lf
3 | *.cpp text eol=lf
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.gitignore.io/api/macos,python
2 |
3 | ### macOS ###
4 | *.DS_Store
5 | .AppleDouble
6 | .LSOverride
7 |
8 | # Icon must end with two \r
9 | Icon
10 |
11 | # Thumbnails
12 | ._*
13 |
14 | # Files that might appear in the root of a volume
15 | .DocumentRevisions-V100
16 | .fseventsd
17 | .Spotlight-V100
18 | .TemporaryItems
19 | .Trashes
20 | .VolumeIcon.icns
21 | .com.apple.timemachine.donotpresent
22 |
23 | # Directories potentially created on remote AFP share
24 | .AppleDB
25 | .AppleDesktop
26 | Network Trash Folder
27 | Temporary Items
28 | .apdisk
29 |
30 | ### Python ###
31 | # Byte-compiled / optimized / DLL files
32 | __pycache__/
33 | *.py[cod]
34 | *$py.class
35 |
36 | # C extensions
37 | *.so
38 |
39 | # Distribution / packaging
40 | .Python
41 | build/
42 | develop-eggs/
43 | dist/
44 | downloads/
45 | eggs/
46 | .eggs/
47 | lib/
48 | lib64/
49 | parts/
50 | sdist/
51 | var/
52 | wheels/
53 | *.egg-info/
54 | .installed.cfg
55 | *.egg
56 |
57 | # PyInstaller
58 | # Usually these files are written by a python script from a template
59 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
60 | *.manifest
61 | *.spec
62 |
63 | # Installer logs
64 | pip-log.txt
65 | pip-delete-this-directory.txt
66 |
67 | # Unit test / coverage reports
68 | htmlcov/
69 | .tox/
70 | .coverage
71 | .coverage.*
72 | .cache
73 | .pytest_cache/
74 | nosetests.xml
75 | coverage.xml
76 | *.cover
77 | .hypothesis/
78 |
79 | # Translations
80 | *.mo
81 | *.pot
82 |
83 | # Flask stuff:
84 | instance/
85 | .webassets-cache
86 |
87 | # Scrapy stuff:
88 | .scrapy
89 |
90 | # Sphinx documentation
91 | docs/_build/
92 |
93 | # PyBuilder
94 | target/
95 |
96 | # Jupyter Notebook
97 | .ipynb_checkpoints
98 |
99 | # pyenv
100 | .python-version
101 |
102 | # celery beat schedule file
103 | celerybeat-schedule.*
104 |
105 | # SageMath parsed files
106 | *.sage.py
107 |
108 | # Environments
109 | .env
110 | .venv
111 | env/
112 | venv/
113 | ENV/
114 | env.bak/
115 | venv.bak/
116 |
117 | # Spyder project settings
118 | .spyderproject
119 | .spyproject
120 |
121 | # Rope project settings
122 | .ropeproject
123 |
124 | # mkdocs documentation
125 | /site
126 |
127 | # mypy
128 | .mypy_cache/
129 |
130 | .idea
131 |
132 | # End of https://www.gitignore.io/api/macos,python
133 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | sudo: required
3 |
4 | python:
5 | - 3.4
6 | - 3.5
7 | - 3.6
8 | - 3.7
9 | - 3.8
10 |
11 | branches:
12 | only:
13 | - master
14 | - devel
15 | - travis # To debug .travis.yml
16 |
17 | addons:
18 | apt:
19 | packages:
20 | - libhdf5-serial-dev
21 | - libboost-dev
22 |
23 | install:
24 | - pip install -r python/requirements.txt -U
25 |
26 | script:
27 | # Stop on first error
28 | - set -e
29 |
30 | # Build, test irbasis
31 | - export CTEST_OUTPUT_ON_FAILURE=1
32 | - cd $TRAVIS_BUILD_DIR/..
33 | - mkdir build
34 | - cd build
35 | - |
36 | cmake ../irbasis \
37 | -DCMAKE_BUILD_TYPE=Debug
38 | - make
39 | - make test
40 | - python setup.py bdist_wheel
41 | - cd dist
42 | # run sample scripts
43 | - pip install irbasis-*.whl
44 | - pip install scipy matplotlib
45 | - cd $TRAVIS_BUILD_DIR/sample
46 | - bash run_all_python_scripts.sh
47 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required (VERSION 2.8.12)
2 |
3 | # use only CXX compiler
4 | project (irbasis CXX C)
5 |
6 | # Print build type
7 | if(NOT CMAKE_BUILD_TYPE)
8 | message("Using default build type: Release")
9 | set(CMAKE_BUILD_TYPE Release CACHE STRING "" FORCE)
10 | endif()
11 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
12 |
13 | enable_testing(test)
14 | add_subdirectory(test)
15 |
16 | add_subdirectory(python)
17 |
--------------------------------------------------------------------------------
/LICENSE.TXT:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2018 Kazuyoshi Yoshimi, Hiroshi Shinaoka, Chikano Naoya, Junya Otsuki.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://travis-ci.org/SpM-lab)
2 |
3 | irbasis
4 | ======
5 | Open-source database and software for intermediate-representation basis functions of imaginary-time Green's function and Python and C++ libraries
6 |
7 | Detailed instructions are available [online](https://github.com/SpM-lab/irbasis/wiki).
8 | Please also check [our citation policy](https://github.com/SpM-lab/irbasis/wiki/Citation-policy).
9 |
10 | Below we will briefly describe this software.
11 |
12 | # Table of Contents
13 | - [License](#license)
14 | - [Structure](#structure)
15 | - [Installation](#installation)
16 | - [Usage](#usage)
17 |
18 | ## License
19 | This software is released under the MIT License, see LICENSE.txt.
20 |
21 | ## Structure
22 | We briefly describe files constituting this software below.
23 |
24 | * c++/irbasis.hpp
C++ library
25 | * python/irbasis.py
Python library
26 | * database/irbasis.h5
Database file (Lambda=10,102, 103, 104, 105, 106, 107).
27 | * sample/
Directory including samples in C++ and Python
28 | * test/
Unit tests in C++ and Python
29 |
30 | ## Installation
31 | ### Python
32 |
33 | You need to install only a few standard scientific libraries (such as numpy, h5py) shown in [our PyPI project page](https://pypi.org/project/irbasis/).
34 | If you install irbasis through pip, pip will take care of these dependencies properly.
35 |
36 | We strongly recommend to install the irbasis library using the standard Python package system.
37 | This package contains the data file (irbasis.h5) as well.
38 | ```
39 | python -mpip install -U pip
40 | python -mpip install -U irbasis
41 | ```
42 |
43 | Alternatively, we can put [irbasis.py](https://github.com/SpM-lab/irbasis/blob/master/python/irbasis.py) and [irbasis.h5](https://github.com/SpM-lab/irbasis/blob/master/database/irbasis.h5) into your working directory.
44 | You can load irbasis and use the full functionality.
45 |
46 | If you want run [sample Python scripts](Samples),
47 | please also install additional Python packages (scipy, matplotlib) using the following command.
48 |
49 | ```
50 | pip install scipy matplotlib
51 | ```
52 |
53 |
54 | ### C++
55 |
56 | You need a C++03-compatible compiler.
57 | The use of the C++ irbasis library requires only the HDF5 C library (not C++).
58 |
59 | The C++ library consists of a single header file.
60 | All what you need to do is to include [irbasis.hpp](https://github.com/SpM-lab/irbasis/blob/master/c++/irbasis.hpp) in your C++ project.
61 | The data file [irbasis.h5](https://github.com/SpM-lab/irbasis/blob/master/database/irbasis.h5) will be read at runtime.
62 | Please do not forget to link your executable to the HDF5 C library.
63 |
64 |
65 | ## Usage
66 | In the following, we demonstrate how to use irbasis database.
67 | The irbasis database is available in Python and C++.
68 | irbasis can calculate the IR basis functions, its Fourier transform, the derivatives and corresponding singular values.
69 |
70 | **In the following, we assume that you have installed the irbasis Python library via pip.**
71 | If not, please modify the sample script files appropriately to specify the location of a database file (see a comment in api.py).
72 |
73 | **Some of sample Python scripts depend on scipy and matplotlib.**
74 |
75 | For other examples, please refer to our online document.
76 |
77 | ### Python
78 | You can download [api.py](https://github.com/SpM-lab/irbasis/blob/master/sample/api.py)
79 | and save it to your working directory.
80 | Then, please run the following command.
81 |
82 | ```python
83 | python api.py
84 | ```
85 |
86 | You can study also our step-by-step examples in [a jupyter notebook](https://github.com/SpM-lab/irbasis/blob/master/sample/step_by_step_examples.ipynb).
87 |
88 | ### C++
89 | You can download [api.cpp](https://github.com/SpM-lab/irbasis/blob/master/sample/api.cpp) and [step\_by\_step\_examples.cpp](https://github.com/SpM-lab/irbasis/blob/master/sample/step_by_step_examples.cpp) to your working directory.
90 | After copying irbasis.hpp into the same directory,
91 | you can build the sample program as follows (see [compile.sh](https://github.com/SpM-lab/irbasis/blob/master/sample/compile.sh)).
92 |
93 | ```c++
94 | g++ api.cpp -o api -I /usr/local/include -L /usr/local/lib -lhdf5 -DNDEBUG -O3
95 | g++ step_by_step_examples.cpp -o step_by_step_examples -I /usr/local/include -L /usr/local/lib -lhdf5 -DNDEBUG -O3
96 | ```
97 |
98 | Here, we assume that the header file and the library file of the HDF5 C library are installed into "/usr/local/include" and "/usr/local/lib", respectively.
99 | When running the executable, irbasis.h5 must exist in your working directory.
100 |
--------------------------------------------------------------------------------
/c++/irbasis.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #include
19 | #include
20 | #include
21 |
22 | //debug
23 | //#include
24 | //
25 | //#ifdef IRBASIS_USE_EIGEN3
26 | //#include
27 | //#endif
28 |
29 | namespace irbasis {
30 | namespace mp = boost::multiprecision;
31 | typedef mp::number > mpf;
32 |
33 | namespace internal {
34 |
35 | // Simple implementation without meta programming...
36 | template
37 | class multi_array {
38 |
39 | template
40 | friend class multi_array;
41 |
42 | public:
43 | multi_array() : owner_(true), p_data_(), num_elements_(0) {
44 | for (int i = 0; i < DIM; ++i) {
45 | extents_[i] = 0;
46 | }
47 | }
48 |
49 | multi_array(const multi_array& other) {
50 | owner_ = true;
51 | p_data_ = new T[other.num_elements_];
52 | for (auto i=0; iowner_) {
85 | delete[] p_data_;
86 | }
87 | }
88 |
89 | multi_array &operator=(const multi_array &other) {
90 | if (!this->owner_) {
91 | throw std::logic_error("Error: assignment to a view is not supported.");
92 | }
93 |
94 | for (int i = 0; i < DIM; ++i) {
95 | this->extents_[i] = other.extents_[i];
96 | }
97 | this->num_elements_ = other.num_elements_;
98 |
99 | // allocate memoery and copy data
100 | if (this->p_data_ != NULL) {
101 | delete[] this->p_data_;
102 | this->p_data_ = NULL;
103 | }
104 | this->p_data_ = new T[this->num_elements_];
105 |
106 | for (int i = 0; i < this->num_elements_; ++i) {
107 | *(this->p_data_ + i) = *(other.p_data_ + i);
108 | }
109 |
110 | this->owner_ = true;
111 | return *this;
112 | }
113 |
114 | std::size_t extent(int i) const {
115 | assert(i >= 0);
116 | assert(i < DIM);
117 | return extents_[i];
118 | }
119 |
120 | const std::size_t* extents() const {
121 | return &(extents_[0]);
122 | }
123 |
124 | void resize(const std::size_t *const dims) {
125 | if (!owner_) {
126 | throw std::runtime_error("resize is not permitted for a view");
127 | }
128 |
129 | std::size_t tot_size = std::accumulate(dims, dims + DIM, 1, std::multiplies());
130 | delete[] p_data_;
131 | p_data_ = new T[tot_size];
132 | num_elements_ = tot_size;
133 | for (int i = 0; i < DIM; ++i) {
134 | extents_[i] = dims[i];
135 | }
136 | }
137 |
138 | void fill(T value) {
139 | if (!owner_) {
140 | throw std::runtime_error("resize is not permitted for a view");
141 | }
142 | for (int i = 0; i < this->num_elements_; ++i) {
143 | *(this->p_data_ + i) = value;
144 | }
145 | }
146 |
147 | multi_array make_view(std::size_t most_left_index) const {
148 | multi_array view;
149 | view.owner_ = false;
150 | std::size_t new_size = 1;
151 | for (int i = 0; i < DIM; ++i) {
152 | if (this->extents_[i] > 100000) {
153 | throw std::runtime_error("Invalid shape!");
154 | }
155 | }
156 | for (int i = 0; i < DIM - 1; ++i) {
157 | view.extents_[i] = this->extents_[i + 1];
158 | new_size *= view.extents_[i];
159 | }
160 | view.num_elements_ = new_size;
161 | view.p_data_ = p_data_ + most_left_index * new_size;
162 |
163 | return view;
164 | }
165 |
166 | multi_array make_matrix_view(std::size_t size1, std::size_t size2) const {
167 | multi_array view;
168 | view.owner_ = false;
169 | view.extents_[0] = size1;
170 | view.extents_[1] = size2;
171 | view.num_elements_ = num_elements_;
172 | view.p_data_ = p_data_;
173 |
174 | return view;
175 | }
176 |
177 | std::size_t num_elements() const {
178 | return num_elements_;
179 | }
180 |
181 | bool is_view() const {
182 | return !owner_;
183 | }
184 |
185 | T *origin() const {
186 | return p_data_;
187 | }
188 |
189 | inline T &operator()(int i) {
190 | assert(DIM == 1);
191 | int idx = i;
192 | assert(idx >= 0 && idx < num_elements());
193 | return *(p_data_ + idx);
194 | }
195 |
196 | inline const T &operator()(int i) const {
197 | assert(DIM == 1);
198 | int idx = i;
199 | assert(idx >= 0 && idx < num_elements());
200 | return *(p_data_ + idx);
201 | }
202 |
203 | inline T &operator()(int i, int j) {
204 | assert(DIM == 2);
205 | int idx = extents_[1] * i + j;
206 | assert(idx >= 0 && idx < num_elements());
207 | return *(p_data_ + idx);
208 | }
209 |
210 | inline const T &operator()(int i, int j) const {
211 | assert(DIM == 2);
212 | int idx = extents_[1] * i + j;
213 | assert(idx >= 0 && idx < num_elements());
214 | return *(p_data_ + idx);
215 | }
216 |
217 | inline T &operator()(int i, int j, int k) {
218 | assert(DIM == 3);
219 | int idx = (i * extents_[1] + j) * extents_[2] + k;
220 | assert(idx >= 0 && idx < num_elements());
221 | return *(p_data_ + idx);
222 | }
223 |
224 | inline const T &operator()(int i, int j, int k) const {
225 | assert(DIM == 3);
226 | int idx = (i * extents_[1] + j) * extents_[2] + k;
227 | assert(idx >= 0 && idx < num_elements());
228 | return *(p_data_ + idx);
229 | }
230 |
231 | private:
232 | bool owner_;
233 | T *p_data_;
234 | std::size_t num_elements_;
235 | std::size_t extents_[DIM];
236 | };
237 |
238 | template
239 | void multiply(const multi_array &A, const multi_array &B, multi_array &AB) {
240 | std::size_t N1 = A.extent(0);
241 | std::size_t N2 = A.extent(1);
242 | std::size_t N3 = B.extent(1);
243 |
244 | assert(B.extent(0) == N2);
245 |
246 | if (AB.extent(0) != N1 || AB.extent(1) != N3) {
247 | std::size_t dims[2];
248 | dims[0] = N1;
249 | dims[1] = N3;
250 | AB.resize(dims);
251 | }
252 |
253 | AB.fill(0);
254 | for (int i = 0; i < N1; ++i) {
255 | for (int k = 0; k < N2; ++k) {
256 | for (int j = 0; j < N3; ++j) {
257 | AB(i, j) += A(i, k) * B(k, j);
258 | }
259 | }
260 | }
261 |
262 | }
263 |
264 | // https://www.physics.ohio-state.edu/~wilkins/computing/HDF/hdf5tutorial/examples/C/h5_rdwt.c
265 | // https://support.hdfgroup.org/ftp/HDF5/current/src/unpacked/examples/h5_read.c
266 | template hid_t get_native_type();
267 |
268 | template<>
269 | inline
270 | hid_t
271 | get_native_type() {
272 | return H5T_NATIVE_DOUBLE;
273 | }
274 |
275 | template<>
276 | inline
277 | hid_t
278 | get_native_type() {
279 | return H5T_NATIVE_INT;
280 | }
281 |
282 | typedef struct {
283 | double re; /*real part*/
284 | double im; /*imaginary part*/
285 | } complex_t;
286 |
287 | template<>
288 | inline
289 | hid_t
290 | get_native_type >() {
291 | hid_t complex_id = H5Tcreate(H5T_COMPOUND, sizeof(complex_t));
292 | H5Tinsert(complex_id, "r", HOFFSET(complex_t, re), H5T_NATIVE_DOUBLE);
293 | H5Tinsert(complex_id, "i", HOFFSET(complex_t, im), H5T_NATIVE_DOUBLE);
294 | return complex_id;
295 | }
296 |
297 | // read a scalar
298 | template
299 | T hdf5_read_scalar(hid_t &file, const std::string &name) {
300 | hid_t dataset = H5Dopen2(file, name.c_str(), H5P_DEFAULT);
301 | if (dataset < 0) {
302 | throw std::runtime_error("Failed to load dataset" + name);
303 | }
304 | T data;
305 | H5Dread(dataset, get_native_type(), H5S_ALL, H5S_ALL, H5P_DEFAULT, &data);
306 | H5Dclose(dataset);
307 | return data;
308 | }
309 |
310 | // read array of double
311 | template
312 | void hdf5_read_double_array(hid_t &file, const std::string &name, std::vector &extents,
313 | std::vector &data) {
314 | hid_t dataset = H5Dopen2(file, name.c_str(), H5P_DEFAULT);
315 | if (dataset < 0) {
316 | throw std::runtime_error("Failed to load dataset" + name);
317 | }
318 | hid_t space = H5Dget_space(dataset);
319 | std::vector dims(DIM);
320 | int n_dims = H5Sget_simple_extent_dims(space, &dims[0], NULL);
321 | assert(n_dims == DIM);
322 | std::size_t tot_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies());
323 | data.resize(tot_size);
324 | extents.resize(DIM);
325 | for (int i = 0; i < DIM; ++i) {
326 | extents[i] = static_cast(dims[i]);
327 | }
328 | H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data[0]);
329 | H5Dclose(dataset);
330 | }
331 |
332 | template
333 | void hdf5_read_mpf_array(hid_t &file, const std::string &name, std::vector &extents,
334 | std::vector &data) {
335 | std::vector data_tmp;
336 | hdf5_read_double_array(file, name, extents, data_tmp);
337 | data.resize(data_tmp.size());
338 | for (int i = 0; i< data.size(); ++i) {
339 | data[i] = data_tmp[i];
340 | }
341 |
342 | hdf5_read_double_array(file, name+"_corr", extents, data_tmp);
343 | for (int i = 0; i< data.size(); ++i) {
344 | data[i] += data_tmp[i];
345 | }
346 |
347 | }
348 |
349 | // read a multi_array
350 | template
351 | multi_array load_multi_array(hid_t &file, const std::string &name) {
352 | hid_t dataset = H5Dopen2(file, name.c_str(), H5P_DEFAULT);
353 | if (dataset < 0) {
354 | throw std::runtime_error("Faild to open a dataset.");
355 | }
356 | hid_t space = H5Dget_space(dataset);
357 | std::vector dims(DIM);
358 | int n_dims = H5Sget_simple_extent_dims(space, &dims[0], NULL);
359 | assert(n_dims == DIM);
360 | std::size_t
361 | tot_size = static_cast(std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()));
362 | std::vector extents(DIM);
363 | for (int i = 0; i < DIM; ++i) {
364 | extents[i] = static_cast(dims[i]);
365 | }
366 | multi_array a;
367 | a.resize(&extents[0]);
368 | H5Dread(dataset, get_native_type(), H5S_ALL, H5S_ALL, H5P_DEFAULT, a.origin());
369 | H5Dread(dataset, get_native_type(), H5S_ALL, H5S_ALL, H5P_DEFAULT, a.origin());
370 | H5Dclose(dataset);
371 | return a;
372 | }
373 |
374 | template
375 | multi_array load_mpf_multi_array(hid_t &file, const std::string &name) {
376 | multi_array data = load_multi_array(file, name);
377 | multi_array data_corr = load_multi_array(file, name + "_corr");
378 |
379 | multi_array result;
380 | result.resize(data.extents());
381 |
382 | for (int i=0; i §ion_edges, double x) {
391 | std::size_t idx = std::upper_bound(
392 | section_edges.origin(),
393 | section_edges.origin() + section_edges.num_elements(),
394 | x) - section_edges.origin() - 1;
395 | auto r = std::min(idx, section_edges.num_elements() - 2);
396 | assert(r >= 0);
397 | assert(r < section_edges.extent(0));
398 | return r;
399 | }
400 |
401 | inline
402 | void compute_legendre(double x, std::vector &val) {
403 | for (int l = 0; l < val.size(); l++) {
404 | if (l == 0) {
405 | val[l] = 1;
406 | } else if (l == 1) {
407 | val[l] = x;
408 | } else {
409 | val[l] = ((2 * l - 1) * x * val[l - 1] - (l - 1) * val[l - 2]) / l;
410 | }
411 | }
412 | }
413 |
414 |
415 | inline
416 | int even_odd_sign(const int l) {
417 | return (l % 2 == 0 ? 1 : -1);
418 | }
419 |
420 | inline
421 | multi_array, 2>
422 | compute_unl_tail(std::vector &w_vec,
423 | const std::string &statistics,
424 | const multi_array &derive_x1,
425 | const int n) {
426 | // n : target number to calculate
427 | int sign = statistics == "B" ? 1 : -1;
428 | std::size_t n_iw = w_vec.size();
429 | std::size_t Nl = derive_x1.extent(0);
430 | std::size_t num_deriv = derive_x1.extent(1);
431 | multi_array, 2> result(n_iw, Nl);
432 | result.fill(std::complex(0.0, 0.0));
433 | if (n > 0)
434 | num_deriv -= n;
435 | multi_array, 2> coeffs_nm(n_iw, num_deriv);
436 | coeffs_nm.fill(std::complex(0.0, 0.0));
437 |
438 | //coeffs_nm
439 | for (int i_iw = 0; i_iw < n_iw; i_iw++) {
440 | if (statistics == "B" && w_vec[i_iw] == 0) {
441 | continue;
442 | }
443 | std::complex fact = std::complex(0.0, 1.0 / w_vec[i_iw]);
444 | coeffs_nm(i_iw, 0) = fact;
445 | for (int m = 1; m < num_deriv; m++) {
446 | coeffs_nm(i_iw, m) = fact * coeffs_nm(i_iw, m - 1);
447 | }
448 | }
449 |
450 | //coeffs_lm ()
451 | multi_array, 2> coeffs_lm(Nl, num_deriv);
452 | coeffs_lm.fill(std::complex(0.0, 0.0));
453 | for (int l = 0; l < Nl; l++) {
454 | for (int m = 0; m < num_deriv; m++) {
455 | coeffs_lm(l, m) = (1.0 - sign * even_odd_sign(l + m)) * derive_x1(l, m);
456 | }
457 | }
458 |
459 | for (int i = 0; i < n_iw; i++) {
460 | for (int k = 0; k < Nl; k++) {
461 | for (int j = 0; j < num_deriv; j++) {
462 | result(i, k) += coeffs_nm(i, j) * coeffs_lm(k, j);
463 | }
464 | result(i, k) *= -sign / sqrt(2.0);
465 | }
466 | }
467 | return result;
468 | }
469 |
470 | struct func {
471 | void load_from_h5(hid_t file, const std::string &prefix) {
472 | data = internal::load_multi_array(file, prefix + std::string("/data"));
473 | np = internal::hdf5_read_scalar(file, prefix + std::string("/np"));
474 | ns = internal::hdf5_read_scalar(file, prefix + std::string("/ns"));
475 | nl = data.extent(0);
476 | section_edges = internal::load_mpf_multi_array<1>(file, prefix + std::string("/section_edges"));
477 |
478 | std::size_t extents[3];
479 | extents[0] = ns;
480 | extents[1] = np;
481 | extents[2] = nl;
482 | data_for_vec.resize(&extents[0]);
483 | for (int l = 0; l < nl; ++l) {
484 | for (int s = 0; s < ns; ++s) {
485 | for (int p = 0; p < np; ++p) {
486 | data_for_vec(s, p, l) = data(l, s, p);
487 | }
488 | }
489 | }
490 | }
491 | multi_array section_edges;
492 | multi_array data; //(nl, ns, np)
493 | multi_array data_for_vec; //(ns, np, nl). Just a copy of data.
494 | int np;
495 | int ns;
496 | int nl;
497 | };
498 |
499 | struct ref {
500 | multi_array data;
501 | multi_array max;
502 | };
503 |
504 | }
505 |
506 | class basis {
507 | public:
508 | basis() {}
509 |
510 | basis(
511 | const std::string &file_name,
512 | const std::string &prefix = ""
513 | ) {
514 | hid_t file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
515 |
516 | if (file < 0) {
517 | throw std::runtime_error("Failed to open " + file_name + "!");
518 | }
519 |
520 | //read info
521 | Lambda_ = internal::hdf5_read_scalar(file, prefix + std::string("/info/Lambda"));
522 | dim_ = internal::hdf5_read_scalar(file, prefix + std::string("/info/dim"));
523 | statistics_ = internal::hdf5_read_scalar(file, prefix + std::string("/info/statistics")) == 0 ? "B" : "F";
524 |
525 | //read sl
526 | sl_ = internal::load_multi_array(file, prefix + std::string("/sl"));
527 |
528 | //read ulx
529 | ulx_.load_from_h5(file, prefix + "/ulx");
530 |
531 | //read ref_ulx
532 | ref_ulx_.data = internal::load_multi_array(file, prefix + std::string("/ulx/ref/data"));
533 | ref_ulx_.max = internal::load_multi_array(file, prefix + std::string("/ulx/ref/max"));
534 |
535 | //read vly
536 | vly_.load_from_h5(file, prefix + "/vly");
537 |
538 | //read ref_vly
539 | ref_vly_.data = internal::load_multi_array(file, prefix + std::string("/vly/ref/data"));
540 | ref_vly_.max = internal::load_multi_array(file, prefix + std::string("/vly/ref/max"));
541 |
542 | H5Fclose(file);
543 |
544 | std::size_t np = ulx_.np;
545 | {
546 | std::vector coeffs(np);
547 | for (int p=0; p=0; m-=2) {
557 | deriv_mat_(m, l) = (1/coeffs[m]) * (2*m + 1) * coeffs[l];
558 | }
559 | }
560 | }
561 |
562 | norm_coeff_.resize(np);
563 | for (int p=0; p= 0 && l < dim());
590 | return sl_(l);
591 | }
592 |
593 | double ulx(int l, double x) const {
594 | using namespace internal;
595 |
596 | if (std::abs(x) > 1) {
597 | throw std::runtime_error("x must be in [-1,1]!");
598 | }
599 |
600 | if (x >= 0) {
601 | return eval(x, ulx_.data.make_view(l), ulx_.section_edges);
602 | } else {
603 | return eval(-x, ulx_.data.make_view(l), ulx_.section_edges) * even_odd_sign(l);
604 | }
605 | }
606 |
607 | std::vector > check_ulx() const {
608 | double ulx_max = ref_ulx_.max(2);
609 | std::vector > ref_data(ref_ulx_.data.extent(0));
610 | int count = 0;
611 | for (int i = 0; i < ref_ulx_.data.extent(0); i++) {
612 | if (ref_ulx_.data(i, 2) == 0) {
613 | ref_data[i].push_back(ref_ulx_.data(i, 0));
614 | ref_data[i].push_back(ref_ulx_.data(i, 1));
615 | ref_data[i].push_back(
616 | fabs(ulx(ref_ulx_.data(i, 0) - 1, ref_ulx_.data(i, 1)) - ref_ulx_.data(i, 3)) / ulx_max);
617 | count++;
618 | }
619 | }
620 | ref_data.resize(count);
621 | return ref_data;
622 | }
623 |
624 | std::vector > check_vly() const {
625 | double vly_max = ref_vly_.max(2);
626 | std::vector > ref_data(ref_vly_.data.extent(0));
627 | int count = 0;
628 | for (int i = 0; i < ref_vly_.data.extent(0); i++) {
629 | if (ref_vly_.data(i, 2) == 0) {
630 | ref_data[i].push_back(ref_vly_.data(i, 0));
631 | ref_data[i].push_back(ref_vly_.data(i, 1));
632 | ref_data[i].push_back(
633 | fabs(vly(ref_vly_.data(i, 0) - 1, ref_vly_.data(i, 1)) - ref_vly_.data(i, 3)) / vly_max);
634 | count++;
635 | }
636 | }
637 | ref_data.resize(count);
638 | return ref_data;
639 | }
640 |
641 | double d_ulx(int l, double x, std::size_t order) const {
642 | using namespace internal;
643 |
644 | if (std::abs(x) > 1) {
645 | throw std::runtime_error("x must be in [-1,1]!");
646 | }
647 |
648 | if (x >= 0) {
649 | return eval_derivative(x, order, ulx_.data.make_view(l), ulx_.section_edges);
650 | } else {
651 | return eval_derivative(-x, order, ulx_.data.make_view(l), ulx_.section_edges) * even_odd_sign(l + order);
652 | }
653 | }
654 |
655 | double vly(int l, double y) const {
656 | using namespace internal;
657 |
658 | if (std::abs(y) > 1) {
659 | throw std::runtime_error("y must be in [-1,1]!");
660 | }
661 | if (y >= 0) {
662 | return eval(y, vly_.data.make_view(l), vly_.section_edges);
663 | } else {
664 | return eval(-y, vly_.data.make_view(l), vly_.section_edges) * even_odd_sign(l);
665 | }
666 | }
667 |
668 | double d_vly(int l, double y, std::size_t order) const {
669 | using namespace internal;
670 |
671 | if (std::abs(y) > 1) {
672 | throw std::runtime_error("y must be in [-1,1]!");
673 | }
674 | if (y >= 0) {
675 | return eval_derivative(y, order, vly_.data.make_view(l), vly_.section_edges);
676 | } else {
677 | return eval_derivative(-y, order, vly_.data.make_view(l), vly_.section_edges) * even_odd_sign(l + order);
678 | }
679 | }
680 |
681 | double get_ref_ulx(std::size_t order) const {
682 | double ref_data;
683 | for (int i = 0; i < ref_ulx_.data.extent(0); i++) {
684 | if (ref_ulx_.data(i, 2) == order) {
685 | ref_data = ref_ulx_.data(i, 3);
686 | }
687 | }
688 | return ref_data;
689 | }
690 |
691 | int num_sections_x() const {
692 | return ulx_.data.extent(1);
693 | }
694 |
695 | double section_edge_x(std::size_t index) const {
696 | assert(index >= 0 && index <= num_sections_x());
697 | return ulx_.section_edges(index).convert_to();
698 | }
699 |
700 | int num_sections_y() const {
701 | return vly_.data.extent(1);
702 | }
703 |
704 | std::vector > > compute_unl(long long n) const {
705 | std::vector n_vec;
706 | n_vec.push_back(n);
707 | return compute_unl(n_vec);
708 | }
709 |
710 | std::vector > > compute_unl(const std::vector &n) const {
711 | using namespace internal;
712 |
713 | typedef std::complex dcomplex;
714 | typedef std::complex mcomplex; // may be illegal to instantiate std::complex with mpf?
715 |
716 | mpf mpi = boost::math::constants::pi();
717 |
718 | int num_n = n.size();
719 |
720 | std::vector o_vec(n.size());
721 | if (this->statistics_ == "F") {
722 | for (int i = 0; i < num_n; i++) {
723 | o_vec[i] = (mpf(2) * n[i] + 1);
724 | }
725 | } else {
726 | for (int i = 0; i < num_n; i++) {
727 | o_vec[i] = (mpf(2) * n[i]);
728 | }
729 | }
730 |
731 | //w_vec = 0.5 * pi * o_vec
732 | std::vector w_vec(o_vec);
733 | std::transform(w_vec.begin(), w_vec.end(), w_vec.begin(), std::bind1st(std::multiplies(), mpi/2));
734 | std::vector w_vec_f(num_n);
735 | for (int i=0; i();
737 | }
738 |
739 | std::size_t num_deriv = this->ulx_.data.extent(2);
740 |
741 | //Compute tail
742 | std::vector > replaced_with_tail(num_n, std::vector(this->dim_, 0));
743 | multi_array deriv_x1(this->dim_, num_deriv);
744 | deriv_x1.fill(0.0);
745 | std::vector d_ulx_result;
746 | for (int l = 0; l < dim_; ++l) {
747 | for (int p = 0; p < num_deriv; ++p) {
748 | deriv_x1(l, p) = d_ulx(l, 1.0, p);
749 | }
750 | }
751 |
752 | multi_array, 2> unl_tail = compute_unl_tail(w_vec_f, statistics_, deriv_x1, -1);
753 | multi_array, 2> unl_tail_without_last_two = compute_unl_tail(w_vec_f, statistics_, deriv_x1, 2);
754 |
755 | for (int i = 0; i < num_n; i++) {
756 | if (statistics_ == "B" && n[i] == 0)
757 | continue;
758 | for (int l = 0; l < dim_; ++l) {
759 | if (std::abs((unl_tail(i, l) - unl_tail_without_last_two(i, l)) / unl_tail(i, l)) < 1e-10) {
760 | replaced_with_tail[i][l] = 1;
761 | }
762 | }
763 | }
764 |
765 | multi_array,2> tilde_unl = compute_tilde_unl_fast(w_vec);
766 |
767 | std::vector > > result_vec(num_n, std::vector >(dim_, 0));
768 | int sign_shift = statistics_ == "F" ? 1 : 0;
769 | for (int l = 0; l < dim_; ++l) {
770 | if ((l + sign_shift)%2 == 1) {
771 | for (int i=0; i(
773 | 0, 2*tilde_unl(i, l).imag().convert_to()
774 | );
775 | }
776 | } else {
777 | for (int i=0; i();
779 | }
780 | }
781 | }
782 |
783 | //Overwrite by tail
784 | for (int i = 0; i < num_n; i++) {
785 | for (int l = 0; l < dim_; l++) {
786 | if (replaced_with_tail[i][l] == 1) {
787 | result_vec[i][l] = unl_tail(i, l);
788 | }
789 | }
790 | }
791 |
792 | return result_vec;
793 | }
794 |
795 | double section_edge_y(std::size_t index) const {
796 | assert(index >= 0 && index <= num_sections_y());
797 | return vly_.section_edges(index).convert_to();
798 | }
799 |
800 |
801 | public://debug
802 | double Lambda_;
803 | int dim_;
804 | std::string statistics_;
805 | internal::multi_array sl_;
806 | internal::func ulx_;
807 | internal::func vly_;
808 | internal::ref ref_ulx_;
809 | internal::ref ref_vly_;
810 | internal::multi_array deriv_mat_;
811 | std::vector norm_coeff_;
812 |
813 | public://debug
814 | // Evaluate the value of function at given x
815 | double eval(double x, const internal::multi_array &data, const internal::multi_array §ion_edges) const {
816 | if (x < section_edges(0) || x > section_edges(section_edges.extent(0)-1)) {
817 | throw std::runtime_error("Invalid x!");
818 | }
819 | std::size_t section_idx = find_section(section_edges, x);
820 | return eval_impl(x, section_edges(section_idx), section_edges(section_idx+1), data.make_view(section_idx));
821 | };
822 |
823 | double eval_impl(double x, mpf x_s, mpf x_sp, const internal::multi_array &coeffs) const {
824 | mpf dx = x_sp - x_s;
825 | mpf tilde_x = (2*x - x_sp - x_s)/dx;
826 |
827 | std::vector leg_vals(coeffs.extent(0));
828 | internal::compute_legendre(tilde_x.convert_to(), leg_vals);
829 | double eval_result = 0.0;
830 | for (int p=0; p());
834 | }
835 |
836 | inline
837 | internal::multi_array
838 | differentiate_coeff(const internal::multi_array &coeffs, std::size_t order) const {
839 | const std::size_t np = coeffs.num_elements();
840 |
841 | internal::multi_array tmp2(np, 1);
842 | internal::multi_array tmp(np, 1);
843 | for (int i=0; i coeffs_deriv(np);
851 | for (int p=0; p &data,
860 | const internal::multi_array §ion_edges,
861 | int section = -1) const {
862 | using namespace internal;
863 | std::size_t section_idx = section >= 0 ? section : find_section(section_edges, x);
864 |
865 | multi_array coeffs_deriv = differentiate_coeff(data.make_view(section_idx), order);
866 | double dx = static_cast(section_edges(section_idx+1) - section_edges(section_idx)).convert_to();
867 | return eval_impl(x, section_edges(section_idx), section_edges(section_idx+1), coeffs_deriv) * std::pow(2/dx, order);
868 | }
869 |
870 | internal::multi_array,2> compute_tilde_unl_fast(const std::vector& w_vec) const {
871 | const int num_n = w_vec.size();
872 | const int np = ulx_.np;
873 |
874 | typedef std::complex mcomplex;
875 | typedef std::complex dcomplex;
876 |
877 | internal::multi_array tilde_unl(num_n, dim_);
878 | tilde_unl.fill(mcomplex(0,0));
879 |
880 | internal::multi_array tmp_lp(dim_, np);
881 | internal::multi_array tmp_np(num_n, np);
882 | std::vector exp_n(num_n);
883 |
884 | for (int s=0; s())/2;
894 | for (int l=0; l(c.convert_to(), s.convert_to());
907 | }
908 | for (int n=0; n(dx * w_vec[n]/2).convert_to();
910 | dcomplex phase_p(1, 0);
911 | for (int p = 0; p < np; ++p) {
912 | if (w_tmp >= 0) {
913 | tmp_np(n, p) = 2.0 * phase_p * boost::math::sph_bessel(p, w_tmp) * exp_n[n];
914 | } else {
915 | tmp_np(n, p) = std::conj(2.0 * phase_p * boost::math::sph_bessel(p, -w_tmp)) * exp_n[n];
916 | }
917 | phase_p *= dcomplex(0, 1);
918 | }
919 | }
920 |
921 | for (int n=0; n eps:
71 | #print(a,b)
72 | half_point = 0.5*(a+b)
73 | if ulx(half_point) * u_a > 0:
74 | a = half_point
75 | else:
76 | b = half_point
77 | zeros.append(0.5*(a+b))
78 | return numpy.array(zeros)
79 |
80 | def _get_max_abs_value(self, l, basis, type):
81 | Nl = l
82 | if type == "ulx":
83 | func_l = (lambda x : basis.ulx(Nl-1,x))
84 | func_l_derivative = (lambda x : basis.ulx_derivative(Nl-1,x,1))
85 | elif type == "vly":
86 | func_l = (lambda x : basis.vly(Nl-1,x))
87 | func_l_derivative = (lambda x : basis.vly_derivative(Nl-1,x,1))
88 | else:
89 | return None
90 | zeros_data=self._find_zeros(func_l_derivative)
91 | values_zeros = numpy.array( [ abs(func_l(_x)) for _x in zeros_data] )
92 | max_index = numpy.argmax(values_zeros)
93 | max_point=zeros_data[max_index]
94 | if abs(func_l(1.0)) > values_zeros[max_index]:
95 | max_point = 1.0
96 | elif abs(func_l(-1.0)) > values_zeros[max_index]:
97 | max_point = -1.0
98 | return (int(l), max_point, abs(func_l(max_point)))
99 |
100 | def save_ref_values(self, basis):
101 | Nl = self._dim
102 | Lambda = self._Lambda
103 | dir = self._prefix_name
104 |
105 | if Nl % 2 == 1 : Nl-=1
106 | #Get ulx data
107 | points=self._get_max_abs_value(Nl, basis, "ulx")
108 | edges = numpy.array([basis.section_edge_ulx(s) for s in range(basis.num_sections_ulx()+1)])
109 | Region=numpy.append(numpy.linspace(edges[0], edges[1], 10),\
110 | numpy.linspace(edges[basis.num_sections_ulx()-1], edges[basis.num_sections_ulx()], 10))
111 | ulx_data = numpy.array( [ (int(Nl), _x, 0, basis.ulx(Nl-1, _x)) for _x in Region] )
112 | for _order in range(1, 3):
113 | ulx_data = numpy.append(ulx_data, numpy.array( [ (int(Nl), 1.0, _order, basis.ulx_derivative(Nl-1, 1.0, _order))]), axis=0 )
114 | self._write_data(dir+"/ulx/ref/max", data=points)
115 | self._write_data(dir + "/ulx/ref/data", data=ulx_data)
116 |
117 | #Get vly data
118 | points=self._get_max_abs_value(Nl, basis, "vly")
119 | edges = numpy.array([basis.section_edge_vly(s) for s in range(basis.num_sections_vly()+1)])
120 | Region = numpy.append(numpy.linspace(edges[0], edges[1], 10),\
121 | numpy.linspace(edges[basis.num_sections_vly()-1], edges[basis.num_sections_vly()], 10))
122 | vly_data = numpy.array( [ (int(Nl), _y, 0, basis.vly(Nl-1, _y)) for _y in Region] )
123 | for _order in range(1, 3):
124 | numpy.append(vly_data, numpy.array([(int(Nl), 1.0, _order, basis.vly_derivative(Nl-1, 1.0, _order))]), axis=0)
125 | self._write_data(dir+"/vly/ref/max", data=points)
126 | self._write_data(dir+"/vly/ref/data", data=vly_data)
127 |
128 | if __name__ == '__main__':
129 |
130 | parser = argparse.ArgumentParser(
131 | prog='save.py',
132 | description='Output results to hdf5 file.',
133 | epilog='end',
134 | add_help=True,
135 | )
136 |
137 | parser.add_argument('-o', '--output', action='store', dest='outputfile',
138 | default='irbasis.h5',
139 | type=str, choices=None,
140 | help=('Path to output hdf5 file.'),
141 | metavar=None)
142 | parser.add_argument('-i', '--input', action='store', dest='inputfile',
143 | type=str, choices=None,
144 | required=True,
145 | help=('Path to input file.'),
146 | metavar=None)
147 | parser.add_argument('-l', '--lambda', action='store', dest='lambda',
148 | required=True,
149 | type=float, choices=None,
150 | help=('Value of lambda.'),
151 | metavar=None)
152 | parser.add_argument('-p', '--prefix', action='store', dest='prefix',
153 | type=str, choices=None,
154 | default='/',
155 | help=('Data will be stored in this HF5 group.'),
156 | metavar=None)
157 |
158 | args = parser.parse_args()
159 | if os.path.exists(args.inputfile):
160 | b = irlib.loadtxt(args.inputfile)
161 | else:
162 | print("Input file does not exist.")
163 | exit(-1)
164 |
165 | h5file = h5py.File(args.outputfile, "a")
166 | irset = BasisSet(h5file, args.prefix)
167 | nl = b.dim()
168 |
169 | # set info
170 | irset.set_info(b.Lambda(), nl, b.get_statistics_str())
171 |
172 | sl = numpy.array([b.sl(i) for i in range(0, nl)])
173 | irset.set_sl(sl)
174 |
175 | # input ulx
176 | ns = b.num_sections_ulx()
177 | n_local_poly = b.num_local_poly_ulx()
178 | coeff = numpy.zeros((nl, ns, n_local_poly), dtype=float)
179 | for l in range(nl):
180 | for s in range(ns):
181 | for p in range(n_local_poly):
182 | coeff[l, s, p] = b.coeff_ulx(l, s, p)
183 | section_edge_ulx = numpy.array([b.section_edge_ulx(i) for i in range(ns + 1)])
184 | irset.set_func("ulx", coeff, n_local_poly, ns, section_edge_ulx)
185 |
186 | # input vly
187 | ns = b.num_sections_vly()
188 | n_local_poly = b.num_local_poly_vly()
189 | coeff = numpy.zeros((nl, ns, n_local_poly), dtype=float)
190 | for l in range(nl):
191 | for s in range(ns):
192 | for p in range(n_local_poly):
193 | coeff[l, s, p] = b.coeff_vly(l, s, p)
194 | section_edge_vly = numpy.array([b.section_edge_vly(i) for i in range(ns + 1)])
195 | irset.set_func("vly", coeff, n_local_poly, ns, section_edge_vly)
196 | irset.save_ref_values(b)
197 |
198 | h5file.flush()
199 | h5file.close()
200 |
--------------------------------------------------------------------------------
/doc/unl.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpM-lab/irbasis/c38e044887a1fdb6f0b5495ba4cebd6d9f724d68/doc/unl.pdf
--------------------------------------------------------------------------------
/doc/unl.tex:
--------------------------------------------------------------------------------
1 | \documentclass[disablejfam,12pt]{article}
2 | \usepackage{graphicx}
3 | \usepackage{color}
4 | \pagestyle{empty}
5 | \setlength{\textheight}{240mm}
6 | \setlength{\textwidth}{160mm}
7 | \setlength{\columnsep}{5mm}
8 | \setlength{\topmargin}{-30mm}
9 | \setlength{\oddsidemargin}{0mm}
10 | \setlength{\evensidemargin}{0mm}
11 | \usepackage{amssymb}
12 | \usepackage{amsmath}
13 | \usepackage{braket}
14 |
15 | \newcommand{\mathi}{\ensuremath{\mathrm{i}}}
16 | \newcommand{\mi}{\ensuremath{\mathrm{i}}}
17 | \newcommand{\barT}{\ensuremath{\bar{T}}}
18 |
19 | \def\deltaB{\delta_{\alpha, \mathrm{B}}}
20 | \def\deltaF{\delta_{\alpha, \mathrm{F}}}
21 |
22 | \author{Hiroshi SHINAOKA}
23 |
24 | \begin{document}
25 | \title{Matsubara representation}
26 | \maketitle
27 | \thispagestyle{empty}
28 | \begin{align}
29 | u_{nl}^\alpha &\equiv \frac{1}{\sqrt{2}} \int_{-1}^{1} d x~e^{\mathi \pi \{n+(1/2)\deltaF\}(x+1)} u^\alpha_l(x),\label{eq:unl}\\
30 | &= \tilde{u}_{nl} + (-1)^{\deltaF+ l}\tilde{u}_{nl}^*, \\
31 | \tilde{u}_{nl}^\alpha &\equiv \frac{1}{\sqrt{2}} \int_{0}^{1} d x~e^{\mathi \pi \{n+(1/2)\deltaF\}(x+1)} u^\alpha_l(x),\\
32 | &= \sum_s \frac{\sqrt{\Delta x_s}}{2} \sqrt{p+\frac{1}{2}}e^{i\omega((x_{s+1}+x_s)/2+1)} \int_{-1}^1 dxP_p(x)e^{i\omega \Delta x_s/2 x}.\\
33 | \int_{-1}^1 dx P_l(x)e^{ia} &= 2i^l j_l(a),\\
34 | \omega &\equiv \pi \{n+(1/2)\deltaF\}.
35 | \end{align}
36 |
37 | \end{document}
38 |
--------------------------------------------------------------------------------
/python/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Copy files into a build directories
2 | configure_file(${CMAKE_SOURCE_DIR}/python/irbasis.py ${CMAKE_BINARY_DIR}/irbasis/irbasis.py COPYONLY)
3 | configure_file(${CMAKE_SOURCE_DIR}/python/__init__.py ${CMAKE_BINARY_DIR}/irbasis/__init__.py COPYONLY)
4 | configure_file(${CMAKE_SOURCE_DIR}/version ${CMAKE_BINARY_DIR}/irbasis/version COPYONLY)
5 | configure_file(${CMAKE_SOURCE_DIR}/python/setup.py ${CMAKE_BINARY_DIR}/setup.py COPYONLY)
6 | configure_file(${CMAKE_SOURCE_DIR}/python/setup.cfg ${CMAKE_BINARY_DIR}/setup.cfg COPYONLY)
7 | configure_file(${CMAKE_SOURCE_DIR}/database/irbasis.h5 ${CMAKE_BINARY_DIR}/irbasis/irbasis.h5 COPYONLY)
8 | configure_file(${CMAKE_SOURCE_DIR}/README.md ${CMAKE_BINARY_DIR}/README.md COPYONLY)
9 |
--------------------------------------------------------------------------------
/python/__init__.py:
--------------------------------------------------------------------------------
1 | from .irbasis import load, basis, sampling_points_matsubara, __version__
2 |
--------------------------------------------------------------------------------
/python/irbasis.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 | import numpy
5 | import h5py
6 | from numpy.polynomial.legendre import legval, legder
7 | import scipy.special
8 |
9 | # Get version string
10 | here = os.path.abspath(os.path.dirname(__file__))
11 | with open(os.path.join(here, 'version'), 'r', encoding='ascii') as f:
12 | __version__ = f.read().strip()
13 |
14 | def _check_type(obj, *types):
15 | if not isinstance(obj, types):
16 | raise RuntimeError(
17 | "Passed the argument is type of %s, but expected one of %s"
18 | % (type(obj), str(types)))
19 |
20 |
21 | def load(statistics, Lambda, h5file=""):
22 | assert statistics == "F" or statistics == "B"
23 |
24 | Lambda = float(Lambda)
25 |
26 | if h5file == "":
27 | name = os.path.dirname(os.path.abspath(__file__))
28 | file_name = os.path.normpath(os.path.join(name, './irbasis.h5'))
29 | else:
30 | file_name = h5file
31 |
32 | prefix = "basis_f-mp-Lambda"+str(Lambda) if statistics == 'F' else "basis_b-mp-Lambda"+str(Lambda)
33 |
34 | with h5py.File(file_name, 'r') as f:
35 | if not prefix in f:
36 | raise RuntimeError("No data available!")
37 |
38 | return basis(file_name, prefix)
39 |
40 |
41 | class _PiecewiseLegendrePoly:
42 | """Piecewise Legendre polynomial.
43 |
44 | Models a function on the interval `[-1, 1]` as a set of segments on the
45 | intervals `S[i] = [a[i], a[i+1]]`, where on each interval the function
46 | is expanded in scaled Legendre polynomials.
47 | """
48 | def __init__(self, data, knots, dx):
49 | """Piecewise Legendre polynomial"""
50 | data = numpy.array(data)
51 | knots = numpy.array(knots)
52 | polyorder, nsegments = data.shape[:2]
53 | if knots.shape != (nsegments+1,):
54 | raise ValueError("Invalid knots array")
55 | if (numpy.diff(knots) < 0).any():
56 | raise ValueError("Knots must be monotonically increasing")
57 | if not numpy.allclose(dx, knots[1:] - knots[:-1]):
58 | raise ValueError("dx must work with knots")
59 |
60 | self.nsegments = nsegments
61 | self.polyorder = polyorder
62 | self.xmin = knots[0]
63 | self.xmax = knots[-1]
64 |
65 | self.knots = knots
66 | self.dx = dx
67 | self.data = data
68 | self._xm = .5 * (knots[1:] + knots[:-1])
69 | self._inv_xs = 2/dx
70 | self._norm = numpy.sqrt(self._inv_xs)
71 |
72 | def _split(self, x):
73 | """Split segment"""
74 | if (x < self.xmin).any() or (x > self.xmax).any():
75 | raise ValueError("x must be in [%g, %g]" % (self.xmin, self.xmax))
76 |
77 | i = self.knots.searchsorted(x, 'right').clip(None, self.nsegments)
78 | i -= 1
79 | xtilde = x - self._xm[i]
80 | xtilde *= self._inv_xs[i]
81 | return i, xtilde
82 |
83 | def __call__(self, x, l=None):
84 | """Evaluate polynomial at position x"""
85 | i, xtilde = self._split(numpy.asarray(x))
86 |
87 | if l is None:
88 | # Evaluate for all values of l. xtilde and data array must be
89 | # broadcast'able against each other, so we append a dimension here
90 | xtilde = xtilde[(slice(None),) * xtilde.ndim + (None,)]
91 | data = self.data[:,i,:]
92 | else:
93 | numpy.broadcast(xtilde, l)
94 | data = self.data[:,i,l]
95 |
96 | res = legval(xtilde, data, tensor=False)
97 | res *= self._norm[i]
98 | return res
99 |
100 | def deriv(self, n=1):
101 | """Get polynomial for the n'th derivative"""
102 | ddata = legder(self.data, n)
103 | scale = self._inv_xs ** n
104 | ddata *= scale[None, :, None]
105 | return _PiecewiseLegendrePoly(ddata, self.knots, self.dx)
106 |
107 |
108 | def _preprocess_irdata(data, knots, knots_corr=None):
109 | """Perform preprocessing of IR data"""
110 | data = numpy.array(data)
111 | dim, nsegments, polyorder = data.shape
112 | if knots_corr is None:
113 | knots_corr = numpy.zeros_like(knots)
114 |
115 | # First, the basis is given by *normalized* Legendre function,
116 | # so we have to undo the normalization here:
117 | norm = numpy.sqrt(numpy.arange(polyorder) + 0.5)
118 | data *= norm
119 |
120 | # The functions are stored for [0,1] only, since they are
121 | # either even or odd for even or odd orders, respectively. We
122 | # undo this here, because it simplifies the logic.
123 | mdata = data[:,::-1].copy()
124 | mdata[1::2,:,0::2] *= -1
125 | mdata[0::2,:,1::2] *= -1
126 | data = numpy.concatenate((mdata, data), axis=1)
127 | knots = numpy.concatenate((-knots[::-1], knots[1:]), axis=0)
128 | knots_corr = numpy.concatenate((-knots_corr[::-1], knots_corr[1:]), axis=0)
129 | dx = (knots[1:] - knots[:-1]) + (knots_corr[1:] - knots_corr[:-1])
130 |
131 | # Transpose following numpy polynomial convention
132 | data = data.transpose(2,1,0)
133 | return data, knots, dx
134 |
135 |
136 | class basis(object):
137 | def __init__(self, file_name, prefix=""):
138 |
139 | with h5py.File(file_name, 'r') as f:
140 | self._Lambda = f[prefix+'/info/Lambda'][()]
141 | self._dim = f[prefix+'/info/dim'][()]
142 | self._statistics = 'B' if f[prefix+'/info/statistics'][()] == 0 else 'F'
143 |
144 | self._sl = f[prefix+'/sl'][()]
145 |
146 | ulx_data = f[prefix+'/ulx/data'][()] # (l, section, p)
147 | ulx_section_edges = f[prefix+'/ulx/section_edges'][()]
148 | ulx_section_edges_corr = f[prefix+'/ulx/section_edges_corr'][()]
149 | assert ulx_data.shape[0] == self._dim
150 | assert ulx_data.shape[1] == f[prefix+'/ulx/ns'][()]
151 | assert ulx_data.shape[2] == f[prefix+'/ulx/np'][()]
152 |
153 | vly_data = f[prefix+'/vly/data'][()]
154 | vly_section_edges = f[prefix+'/vly/section_edges'][()]
155 | assert vly_data.shape[0] == self._dim
156 | assert vly_data.shape[1] == f[prefix+'/vly/ns'][()]
157 | assert vly_data.shape[2] == f[prefix+'/vly/np'][()]
158 |
159 | # Reference data:
160 | # XXX: shall we move this to the tests?
161 | self._ulx_ref_max = f[prefix+'/ulx/ref/max'][()]
162 | self._ulx_ref_data = f[prefix+'/ulx/ref/data'][()]
163 | self._vly_ref_max = f[prefix+'/vly/ref/max'][()]
164 | self._vly_ref_data = f[prefix+'/vly/ref/data'][()]
165 |
166 | assert f[prefix+'/ulx/np'][()] == f[prefix+'/vly/np'][()]
167 |
168 | np = f[prefix+'/vly/np'][()]
169 | self._np = np
170 |
171 | self._ulx_ppoly = _PiecewiseLegendrePoly(
172 | *_preprocess_irdata(ulx_data, ulx_section_edges, ulx_section_edges_corr))
173 | self._vly_ppoly = _PiecewiseLegendrePoly(
174 | *_preprocess_irdata(vly_data, vly_section_edges))
175 |
176 | deriv_x1 = numpy.asarray(list(_derivs(self._ulx_ppoly, x=1)))
177 | moments = _power_moments(self._statistics, deriv_x1)
178 | self._ulw_model = _PowerModel(self._statistics, moments)
179 |
180 | @property
181 | def Lambda(self):
182 | """
183 | Dimensionless parameter of IR basis
184 |
185 | Returns
186 | -------
187 | Lambda : float
188 | """
189 | return self._Lambda
190 |
191 | @property
192 | def statistics(self):
193 | """
194 | Statistics
195 |
196 | Returns
197 | -------
198 | statistics : string
199 | "F" for fermions, "B" for bosons
200 | """
201 | return self._statistics
202 |
203 | def dim(self):
204 | """
205 | Return dimension of basis
206 |
207 | Returns
208 | -------
209 | dim : int
210 | """
211 | return self._dim
212 |
213 | def sl(self, l=None):
214 | """
215 | Return the singular value for the l-th basis function
216 |
217 | Parameters
218 | ----------
219 | l : int, int-like array or None
220 | index of the singular values/basis functions. If None, return all.
221 |
222 | Returns
223 | sl : float
224 | singular value
225 | -------
226 |
227 | """
228 | if l is None: l = Ellipsis
229 |
230 | return self._sl[l]
231 |
232 | def ulx(self, l, x):
233 | """
234 | Return value of basis function for x
235 |
236 | Parameters
237 | ----------
238 | l : int, int-like array or None
239 | index of basis functions. If None, return array with all l
240 | x : float or float-like array
241 | dimensionless parameter x (-1 <= x <= 1)
242 |
243 | Returns
244 | -------
245 | ulx : float
246 | value of basis function u_l(x)
247 | """
248 | return self._ulx_ppoly(x,l)
249 |
250 | def d_ulx(self, l, x, order, section=None):
251 | """
252 | Return (higher-order) derivatives of u_l(x)
253 |
254 | Parameters
255 | ----------
256 | l : int, int-like array or None
257 | index of basis functions. If None, return array with all l
258 | x : float or float-like array
259 | dimensionless parameter x
260 | order : int
261 | order of derivative (>=0). 1 for the first derivative.
262 | section : int
263 | index of the section where x is located.
264 |
265 | Returns
266 | -------
267 | d_ulx : float
268 | (higher-order) derivative of u_l(x)
269 |
270 | """
271 | return self._ulx_ppoly.deriv(order)(x,l)
272 |
273 | def vly(self, l, y):
274 | """
275 | Return value of basis function for y
276 |
277 | Parameters
278 | ----------
279 | l : int, int-like array or None
280 | index of basis functions. If None, return array with all l
281 | y : float or float-like array
282 | dimensionless parameter y (-1 <= y <= 1)
283 |
284 | Returns
285 | -------
286 | vly : float
287 | value of basis function v_l(y)
288 | """
289 | return self._vly_ppoly(y,l)
290 |
291 | def d_vly(self, l, y, order):
292 | """
293 | Return (higher-order) derivatives of v_l(y)
294 |
295 | Parameters
296 | ----------
297 | l : int, int-like array or None
298 | index of basis functions. If None, return array with all l
299 | y : float or float-like array
300 | dimensionless parameter y
301 | order : int
302 | order of derivative (>=0). 1 for the first derivative.
303 | section : int
304 | index of the section where y is located.
305 |
306 | Returns
307 | -------
308 | d_vly : float
309 | (higher-order) derivative of v_l(y)
310 |
311 | """
312 | return self._vly_ppoly.deriv(order)(y,l)
313 |
314 |
315 | def compute_unl(self, n, whichl=None):
316 | """
317 | Compute transformation matrix from IR to Matsubara frequencies
318 |
319 | Parameters
320 | ----------
321 | n : int or 1D ndarray of integers
322 | Indices of Matsubara frequncies
323 |
324 | whichl : vector of integers or None
325 | Indices of the l values
326 |
327 | Returns
328 | -------
329 | unl : 2D array of complex
330 | The shape is (niw, nl) where niw is the dimension of the input "n"
331 | and nl is the dimension of the basis
332 |
333 | """
334 | n = numpy.asarray(n)
335 | if not numpy.issubdtype(n.dtype, numpy.integer):
336 | RuntimeError("n must be integer")
337 | if whichl is None:
338 | whichl = slice(None)
339 | else:
340 | whichl = numpy.ravel(whichl)
341 |
342 | zeta = 1 if self._statistics == 'F' else 0
343 | wn_flat = 2 * n.ravel() + zeta
344 |
345 | # The radius of convergence of the asymptotic expansion is Lambda/2,
346 | # so for significantly larger frequencies we use the asymptotics,
347 | # since it has lower relative error.
348 | cond_inner = numpy.abs(wn_flat[:,None]) < 40 * self._Lambda
349 | result_inner = _compute_unl(self._ulx_ppoly, wn_flat, whichl)
350 | result_asymp = self._ulw_model.giw(wn_flat)[:,whichl]
351 | result_flat = numpy.where(cond_inner, result_inner, result_asymp)
352 | return result_flat.reshape(n.shape + result_flat.shape[-1:])
353 |
354 |
355 | def num_sections_x(self):
356 | """
357 | Number of sections of piecewise polynomial representation of u_l(x)
358 |
359 | Returns
360 | -------
361 | num_sections_x : int
362 | """
363 | return self._ulx_ppoly.nsegments
364 |
365 | @property
366 | def section_edges_x(self):
367 | """
368 | End points of sections for u_l(x)
369 |
370 | Returns
371 | -------
372 | section_edges_x : 1D ndarray of float
373 | """
374 | return self._ulx_ppoly.knots
375 |
376 | def num_sections_y(self):
377 | """
378 | Number of sections of piecewise polynomial representation of v_l(y)
379 |
380 | Returns
381 | -------
382 | num_sections_y : int
383 | """
384 | return self._vly_ppoly.nsegments
385 |
386 | @property
387 | def section_edges_y(self):
388 | """
389 | End points of sections for v_l(y)
390 |
391 | Returns
392 | -------
393 | section_edges_y : 1D ndarray of float
394 | """
395 | return self._vly_ppoly.knots
396 |
397 | def sampling_points_x(self, whichl):
398 | """
399 | Computes "optimal" sampling points in x space for given basis
400 |
401 | Parameters
402 | ----------
403 | b :
404 | basis object
405 | whichl: int
406 | Index of reference basis function "l"
407 |
408 | Returns
409 | -------
410 | sampling_points: 1D array of float
411 | sampling points in x space
412 | """
413 |
414 | return sampling_points_x(self, whichl)
415 |
416 | def sampling_points_y(self, whichl):
417 | """
418 | Computes "optimal" sampling points in y space for given basis
419 |
420 | Parameters
421 | ----------
422 | b :
423 | basis object
424 | whichl: int
425 | Index of reference basis function "l"
426 |
427 | Returns
428 | -------
429 | sampling_points: 1D array of float
430 | sampling points in y space
431 | """
432 |
433 | return sampling_points_y(self, whichl)
434 |
435 | def sampling_points_matsubara(self, whichl):
436 | """
437 | Computes "optimal" sampling points in Matsubara domain for given basis
438 |
439 | Parameters
440 | ----------
441 | b :
442 | basis object
443 | whichl: int
444 | Index of reference basis function "l"
445 |
446 | Returns
447 | -------
448 | sampling_points: 1D array of int
449 | sampling points in Matsubara domain
450 |
451 | """
452 |
453 | return sampling_points_matsubara(self, whichl)
454 |
455 | def _check_ulx(self):
456 | ulx_max = self._ulx_ref_max[2]
457 | ulx_ref = numpy.array([ (_data[0], _data[1], abs(self.ulx(int(_data[0]-1), _data[1])-_data[3])/ulx_max ) for _data in self._ulx_ref_data[self._ulx_ref_data[:,2]==0]])
458 | return(ulx_ref)
459 |
460 | def _get_d_ulx_ref(self):
461 | return self._ulx_ref_data
462 |
463 | def _check_vly(self):
464 | vly_max = self._vly_ref_max[2]
465 | vly_ref = numpy.array([ (_data[0], _data[1], abs(self.vly(int(_data[0]-1), _data[1])-_data[3])/vly_max ) for _data in self._vly_ref_data[ self._vly_ref_data[:,2]==0]])
466 | return(vly_ref)
467 |
468 | def _get_d_vly_ref(self):
469 | return self._vly_ref_data
470 |
471 |
472 | class _PowerModel:
473 | """Model from a high-frequency series expansion:
474 |
475 | A(iw) = sum(A[n] / (iw)**(n+1) for n in range(1, N))
476 |
477 | where `iw == 1j * pi/2 * wn` is a reduced imaginary frequency, i.e.,
478 | `wn` is an odd/even number for fermionic/bosonic frequencies.
479 | """
480 | def __init__(self, statistics, moments):
481 | """Initialize model"""
482 | self.zeta = {'F': 1, 'B': 0}[statistics]
483 | self.moments = numpy.asarray(moments)
484 | self.nmom, self.nl = self.moments.shape
485 |
486 | @staticmethod
487 | def _inv_iw_safe(wn, result_dtype):
488 | """Return inverse of frequency or zero if freqency is zero"""
489 | result = numpy.zeros(wn.shape, result_dtype)
490 | wn_nonzero = wn != 0
491 | result[wn_nonzero] = 1/(1j * numpy.pi/2 * wn[wn_nonzero])
492 | return result
493 |
494 | def _giw_ravel(self, wn):
495 | """Return model Green's function for vector of frequencies"""
496 | result_dtype = numpy.result_type(1j, wn, self.moments)
497 | result = numpy.zeros((wn.size, self.nl), result_dtype)
498 | inv_iw = self._inv_iw_safe(wn, result_dtype)[:,None]
499 | for mom in self.moments[::-1]:
500 | result += mom
501 | result *= inv_iw
502 | return result
503 |
504 | def giw(self, wn):
505 | """Return model Green's function for reduced frequencies"""
506 | wn = numpy.array(wn)
507 | if (wn % 2 != self.zeta).any():
508 | raise ValueError("expecting 'reduced' frequencies")
509 |
510 | return self._giw_ravel(wn.ravel()).reshape(wn.shape + (self.nl,))
511 |
512 |
513 | def _derivs(ppoly, x):
514 | """Evaluate polynomial and its derivatives at specific x"""
515 | yield ppoly(x)
516 | for _ in range(ppoly.polyorder-1):
517 | ppoly = ppoly.deriv()
518 | yield ppoly(x)
519 |
520 |
521 | def _power_moments(stat, deriv_x1):
522 | """Return moments"""
523 | statsign = {'F': -1, 'B': 1}[stat]
524 | mmax, lmax = deriv_x1.shape
525 | m = numpy.arange(mmax)[:,None]
526 | l = numpy.arange(lmax)[None,:]
527 | coeff_lm = ((-1.0)**(m+1) + statsign * (-1.0)**l) * deriv_x1
528 | return -statsign/numpy.sqrt(2.0) * coeff_lm
529 |
530 |
531 | def _imag_power(n):
532 | """Imaginary unit raised to an integer power without numerical error"""
533 | n = numpy.asarray(n)
534 | if not numpy.issubdtype(n.dtype, numpy.integer):
535 | raise ValueError("expecting set of integers here")
536 | cycle = numpy.array([1, 0+1j, -1, 0-1j], complex)
537 | return cycle[n % 4]
538 |
539 |
540 | def _get_tnl(l, w):
541 | r"""Fourier integral of the l-th Legendre polynomial:
542 |
543 | T_l(w) = \int_{-1}^1 dx \exp(iwx) P_l(x)
544 | """
545 | i_pow_l = _imag_power(l)
546 | return 2 * numpy.where(
547 | w >= 0,
548 | i_pow_l * scipy.special.spherical_jn(l, w),
549 | (i_pow_l * scipy.special.spherical_jn(l, -w)).conj(),
550 | )
551 |
552 |
553 | def _shift_xmid(knots, dx):
554 | r"""Return midpoint relative to the nearest integer plus a shift
555 |
556 | Return the midpoints `xmid` of the segments, as pair `(diff, shift)`,
557 | where shift is in `(0,1,-1)` and `diff` is a float such that
558 | `xmid == shift + diff` to floating point accuracy.
559 | """
560 | dx_half = dx / 2
561 | xmid_m1 = dx.cumsum() - dx_half
562 | xmid_p1 = -dx[::-1].cumsum()[::-1] + dx_half
563 | xmid_0 = knots[1:] - dx_half
564 |
565 | shift = numpy.round(xmid_0).astype(int)
566 | diff = numpy.choose(shift+1, (xmid_m1, xmid_0, xmid_p1))
567 | return diff, shift
568 |
569 |
570 | def _phase_stable(poly, wn):
571 | """Phase factor for the piecewise Legendre to Matsubara transform.
572 |
573 | Compute the following phase factor in a stable way:
574 |
575 | numpy.exp(1j * numpy.pi/2 * wn[:,None] * poly.dx.cumsum()[None,:])
576 | """
577 | # A naive implementation is losing precision close to x=1 and/or x=-1:
578 | # there, the multiplication with `wn` results in `wn//4` almost extra turns
579 | # around the unit circle. The cosine and sines will first map those
580 | # back to the interval [-pi, pi) before doing the computation, which loses
581 | # digits in dx. To avoid this, we extract the nearest integer dx.cumsum()
582 | # and rewrite above expression like below.
583 | #
584 | # Now `wn` still results in extra revolutions, but the mapping back does
585 | # not cut digits that were not there in the first place.
586 | xmid_diff, extra_shift = _shift_xmid(poly.knots, poly.dx)
587 | phase_shifted = numpy.exp(1j * numpy.pi/2 * wn[None,:] * xmid_diff[:,None])
588 | corr = _imag_power((extra_shift[:,None] + 1) * wn[None,:])
589 | return corr * phase_shifted
590 |
591 |
592 | def _compute_unl(poly, wn, whichl):
593 | """Compute piecewise Legendre to Matsubara transform."""
594 | posonly = slice(poly.nsegments//2, None)
595 | dx_half = poly.dx[posonly] / 2
596 | data_sc = poly.data[:,posonly,whichl] * numpy.sqrt(dx_half/2)[None,:,None]
597 | p = numpy.arange(poly.polyorder)
598 |
599 | wred = numpy.pi/2 * wn
600 | phase_wi = _phase_stable(poly, wn)[posonly]
601 | t_pin = _get_tnl(p[:,None,None], wred[None,:] * dx_half[:,None]) * phase_wi
602 |
603 | # Perform the following, but faster:
604 | # resulth = einsum('pin,pil->nl', t_pin, data_sc)
605 | npi = poly.polyorder * poly.nsegments // 2
606 | resulth = t_pin.reshape(npi,-1).T.dot(data_sc.reshape(npi,-1))
607 |
608 | # We have integrated over the positive half only, so we double up here
609 | zeta = wn[0] % 2
610 | l = numpy.arange(poly.data.shape[-1])[whichl]
611 | return numpy.where(l % 2 != zeta, 2j * resulth.imag, 2 * resulth.real)
612 |
613 | #
614 | # The functions below are for sparse sampling
615 | #
616 |
617 | def _funique(x, tol=2e-16):
618 | """Removes duplicates from an 1D array within tolerance"""
619 | x = numpy.sort(x)
620 | unique = numpy.ediff1d(x, to_end=2*tol) > tol
621 | x = x[unique]
622 | return x
623 |
624 |
625 | def _find_roots(ulx):
626 | """Find all roots in (-1, 1) using double exponential mesh + bisection"""
627 | Nx = 10000
628 | eps = 1e-14
629 | tvec = numpy.linspace(-3, 3, Nx) # 3 is a very safe option.
630 | xvec = numpy.tanh(0.5 * numpy.pi * numpy.sinh(tvec))
631 |
632 | zeros = []
633 | for i in range(Nx - 1):
634 | if ulx(xvec[i]) * ulx(xvec[i + 1]) < 0:
635 | a = xvec[i + 1]
636 | b = xvec[i]
637 | u_a = ulx(a)
638 | while a - b > eps:
639 | half_point = 0.5 * (a + b)
640 | if ulx(half_point) * u_a > 0:
641 | a = half_point
642 | else:
643 | b = half_point
644 | zeros.append(0.5 * (a + b))
645 | return numpy.array(zeros)
646 |
647 |
648 | def sampling_points_x(b, whichl):
649 | """
650 | Computes "optimal" sampling points in x space for given basis
651 |
652 | Parameters
653 | ----------
654 | b :
655 | basis object
656 | whichl: int
657 | Index of reference basis function "l"
658 |
659 | Returns
660 | -------
661 | sampling_points: 1D array of float
662 | sampling points in x space
663 | """
664 | _check_type(b, basis)
665 |
666 | xroots = _find_roots(lambda x: b.ulx(whichl, x))
667 | xroots_ex = numpy.hstack((-1.0, xroots, 1.0))
668 | return 0.5 * (xroots_ex[:-1] + xroots_ex[1:])
669 |
670 |
671 | def sampling_points_y(b, whichl):
672 | """
673 | Computes "optimal" sampling points in y space for given basis
674 |
675 | Parameters
676 | ----------
677 | b :
678 | basis object
679 | whichl: int
680 | Index of reference basis function "l"
681 |
682 |
683 | -------
684 | sampling_points: 1D array of float
685 | sampling points in y space
686 | """
687 | _check_type(b, basis)
688 |
689 | roots_positive_half = 0.5 * _find_roots(lambda y: b.vly(whichl, (y + 1)/2)) + 0.5
690 | if whichl % 2 == 0:
691 | roots_ex = numpy.sort(numpy.hstack([-1, -roots_positive_half, roots_positive_half, 1]))
692 | else:
693 | roots_ex = numpy.sort(numpy.hstack([-1, -roots_positive_half, 0, roots_positive_half, 1]))
694 |
695 | return 0.5 * (roots_ex[:-1] + roots_ex[1:])
696 |
697 | def _start_guesses(n=1000):
698 | "Construct points on a logarithmically extended linear interval"
699 | x1 = numpy.arange(n)
700 | x2 = numpy.array(numpy.exp(numpy.linspace(numpy.log(n), numpy.log(1E+8), n)), dtype=int)
701 | x = numpy.unique(numpy.hstack((x1, x2)))
702 | return x
703 |
704 |
705 | def _get_unl_real(basis_xy, x, l):
706 | "Return highest-order basis function on the Matsubara axis"
707 | unl = basis_xy.compute_unl(x, l)
708 | result = numpy.zeros(unl.shape, float)
709 |
710 | # Purely real functions
711 | zeta = 1 if basis_xy.statistics == 'F' else 0
712 | if l % 2 == zeta:
713 | assert numpy.allclose(unl.imag, 0)
714 | return unl.real
715 | else:
716 | assert numpy.allclose(unl.real, 0)
717 | return unl.imag
718 |
719 |
720 | def _sampling_points(fn):
721 | "Given a discretized 1D function, return the location of the extrema"
722 | fn = numpy.asarray(fn)
723 | fn_abs = numpy.abs(fn)
724 | sign_flip = fn[1:] * fn[:-1] < 0
725 | sign_flip_bounds = numpy.hstack((0, sign_flip.nonzero()[0] + 1, fn.size))
726 | points = []
727 | for segment in map(slice, sign_flip_bounds[:-1], sign_flip_bounds[1:]):
728 | points.append(fn_abs[segment].argmax() + segment.start)
729 | return numpy.asarray(points)
730 |
731 |
732 | def _full_interval(sample, stat):
733 | if stat == 'F':
734 | return numpy.hstack((-sample[::-1]-1, sample))
735 | else:
736 | # If we have a bosonic basis and even order (odd maximum), we have a
737 | # root at zero. We have to artifically add that zero back, otherwise
738 | # the condition number will blow up.
739 | if sample[0] == 0:
740 | sample = sample[1:]
741 | return numpy.hstack((-sample[::-1], 0, sample))
742 |
743 |
744 | def _get_mats_sampling(basis_xy, lmax=None):
745 | "Generate Matsubara sampling points from extrema of basis functions"
746 | if lmax is None:
747 | lmax = basis_xy.dim()-1
748 |
749 | x = _start_guesses()
750 | y = _get_unl_real(basis_xy, x, lmax)
751 | x_idx = _sampling_points(y)
752 |
753 | sample = x[x_idx]
754 | return _full_interval(sample, basis_xy.statistics)
755 |
756 |
757 | def sampling_points_matsubara(b, whichl):
758 | """
759 | Computes "optimal" sampling points in Matsubara domain for given basis
760 |
761 | Parameters
762 | ----------
763 | b :
764 | basis object
765 | whichl: int
766 | Index of reference basis function "l"
767 |
768 | Returns
769 | -------
770 | sampling_points: 1D array of int
771 | sampling points in Matsubara domain
772 |
773 | """
774 | _check_type(b, basis)
775 |
776 | stat = b.statistics
777 |
778 | assert stat == 'F' or stat == 'B' or stat == 'barB'
779 |
780 | if whichl > b.dim()-1:
781 | raise RuntimeError("Too large whichl")
782 |
783 | return _get_mats_sampling(b, whichl)
784 |
--------------------------------------------------------------------------------
/python/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | future
4 | h5py
5 |
--------------------------------------------------------------------------------
/python/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal=1
3 |
--------------------------------------------------------------------------------
/python/setup.py:
--------------------------------------------------------------------------------
1 | # Always prefer setuptools over distutils
2 | from setuptools import setup, find_packages
3 | # To use a consistent encoding
4 | from codecs import open
5 | from os import path
6 |
7 | here = path.abspath(path.dirname(__file__))
8 |
9 | # Get the long description from the README file
10 | with open(path.join(here, 'README.md'), encoding='utf-8') as f:
11 | long_description = f.read()
12 |
13 | # Get version string
14 | with open(path.join(here, 'irbasis', 'version'), encoding='ascii') as f:
15 | version = f.read()
16 |
17 | setup(
18 | name='irbasis',
19 |
20 | version=version,
21 |
22 | description='Python libraries for irbasis',
23 |
24 | long_description=long_description,
25 |
26 | long_description_content_type='text/markdown',
27 |
28 | url='https://github.com/SpM-lab/irbasis',
29 |
30 | author='Kazuyoshi Yoshimi, Hiroshi Shinaoka, Chikano Naoya, Junya Otsuki, Markus Wallerberger',
31 |
32 | author_email='h.shinaoka@gmail.com',
33 |
34 | classifiers=[
35 | # How mature is this project? Common values are
36 | # 3 - Alpha
37 | # 4 - Beta
38 | # 5 - Production/Stable
39 | 'Development Status :: 5 - Production/Stable',
40 |
41 | # Indicate who your project is intended for
42 | 'Intended Audience :: Developers',
43 | 'Topic :: Scientific/Engineering',
44 |
45 | # Pick your license as you wish
46 | 'License :: OSI Approved :: MIT License',
47 |
48 | # Specify the Python versions you support here. In particular, ensure
49 | # that you indicate whether you support Python 2, Python 3 or both.
50 | 'Programming Language :: Python :: 2',
51 | 'Programming Language :: Python :: 2.7',
52 | 'Programming Language :: Python :: 3',
53 | 'Programming Language :: Python :: 3.4',
54 | 'Programming Language :: Python :: 3.5',
55 | 'Programming Language :: Python :: 3.6',
56 | ],
57 |
58 | keywords='quantum many-body theory',
59 |
60 | packages = find_packages(exclude=['contrib', 'docs', 'tests']),
61 |
62 | install_requires=['numpy', 'scipy', 'h5py', 'future'],
63 |
64 | package_data={
65 | 'irbasis': ['irbasis.h5', 'version'],
66 | },
67 | )
68 |
--------------------------------------------------------------------------------
/sample/api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "irbasis.hpp"
4 |
5 | int main() {
6 | double Lambda = 1000.0;
7 | double x = 1.0;
8 | double y = 1.0;
9 | int l = 0;
10 | irbasis::basis b = irbasis::load("F", Lambda, "./irbasis.h5");
11 |
12 | //Dimensions of basis
13 | std::cout << b.dim() << std::endl;
14 |
15 | //u_l(x = 1) and v_l(y = 1)
16 | std::cout << b.ulx(l,x) << std::endl;
17 | std::cout << b.vly(l,y) << std::endl;
18 |
19 | //Singular value s_0
20 | std::cout << b.sl(l) << std::endl;
21 |
22 | //The k-th derivative of ulx and vly
23 | for (int k=1; k < 4; ++k) {
24 | std::cout << b.d_ulx(l,k,x) << std::endl;
25 | std::cout << b.d_vly(l,k,y) << std::endl;
26 | }
27 |
28 | //Fourier taransform of ulx
29 | std::vector vec;
30 | for (int n=0; n<1000; ++n) {
31 | vec.push_back(n);
32 | }
33 |
34 | //unl will be a two-dimensional array of size (vec.size(), b.dim)).
35 | std::vector > > unl = b.compute_unl(vec);
36 | std::cout << "Dimensions of unl " << unl.size() << " " << unl[0].size() << std::endl;
37 | }
38 |
--------------------------------------------------------------------------------
/sample/api.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import numpy
3 | import irbasis
4 |
5 | # By default, Lambda = 10, 100, 1000, 10000 are available.
6 | Lambda = 1000.0
7 |
8 | # Fermionic
9 | # If you has not installed the irbasis package via pip,
10 | # you must specify the location of a data file as follows.
11 | # irbasis.load('F', Lambda, './irbasis.h5')
12 | basis = irbasis.load('F', Lambda)
13 |
14 | l = 0
15 | print("l =",l,",Lambda =", Lambda)
16 |
17 | x = 1
18 | y = 1
19 |
20 | # Dimensions of basis
21 | print("Dim ", basis.dim())
22 |
23 | # u_0(x = 1) and v_0(y = 1)
24 | print("ulx ", basis.ulx(l,x))
25 | print("vly ", basis.vly(l,y))
26 |
27 | # "Broadcasting rule" applies to ulx(), vly(), d_ulx(), d_vly() and sl()
28 | nx = 10
29 | all_l = numpy.arange(basis.dim())
30 | xs = numpy.linspace(-1, 1, nx)
31 | ulx_mat = basis.ulx(all_l[:,None], xs[None,:])
32 | ulx_mat_slow = numpy.array([basis.ulx(l, x) for l in range(basis.dim()) for x in xs]).reshape(basis.dim(), nx)
33 | assert numpy.allclose(ulx_mat, ulx_mat_slow)
34 |
35 | # Singular value s_0
36 | print("sl ", basis.sl(l))
37 | print("all sl", basis.sl(all_l))
38 |
39 | # The k-th derivative of u_l(x) and v_l(y) (k = 1,2,3)
40 | for k in [1, 2, 3]:
41 | print("k = ", k)
42 | print("d_ulx ", basis.d_ulx(l,x,k))
43 | print("d_vly ", basis.d_vly(l,y,k))
44 |
45 | # Fourier transform of ulx
46 | n = numpy.arange(1000)
47 | unl = basis.compute_unl(n)
48 | print("dimensions of unl ", unl.shape)
49 |
--------------------------------------------------------------------------------
/sample/compile.sh:
--------------------------------------------------------------------------------
1 | # Here, we like the program to the HDF5 C library installed in /usr/local/lib.
2 | # We define the NDEBUG macro to disable assertions.
3 | # If hdf5.h is not found at compile time, please tell the compiler where that header file is by using "-I" option.
4 |
5 | g++ api.cpp -o api -I /usr/local/include -L /usr/local/lib -lhdf5 -DNDEBUG -O3
6 | g++ step_by_step_examples.cpp -o step_by_step_examples -I /usr/local/include -L /usr/local/lib -lhdf5 -DNDEBUG -O3
7 |
--------------------------------------------------------------------------------
/sample/computing_gl.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from builtins import range
3 |
4 | import numpy
5 | import irbasis
6 |
7 | def _composite_leggauss(deg, section_edges):
8 | """
9 | Composite Gauss-Legendre quadrature.
10 |
11 | :param deg: Number of sample points and weights. It must be >= 1.
12 | :param section_edges: array_like
13 | 1-D array of the two end points of the integral interval
14 | and breaking points in ascending order.
15 | :return ndarray, ndarray: sampling points and weights
16 | """
17 | x_loc, w_loc = numpy.polynomial.legendre.leggauss(deg)
18 |
19 | ns = len(section_edges)-1
20 | x = numpy.zeros((ns, deg))
21 | w = numpy.zeros((ns, deg))
22 | for s in range(ns):
23 | dx = section_edges[s+1] - section_edges[s]
24 | x0 = section_edges[s]
25 | x[s, :] = (dx/2)*(x_loc+1) + x0
26 | w[s, :] = w_loc*(dx/2)
27 | return x.reshape((ns*deg)), w.reshape((ns*deg))
28 |
29 | class transformer(object):
30 | def __init__(self, basis, beta):
31 | section_edges = numpy.asarray(basis.section_edges_x)
32 | self._dim = basis.dim()
33 | self._beta = beta
34 | self._x, self._w = _composite_leggauss(16, section_edges)
35 |
36 | nx = len(self._x)
37 | self._u_smpl = numpy.zeros((nx, self._dim))
38 | for ix in range(nx):
39 | for l in range(self._dim):
40 | self._u_smpl[ix, l] = self._w[ix] * basis.ulx(l, self._x[ix])
41 |
42 | def compute_gl(self, gtau, nl):
43 | assert nl <= self._dim
44 |
45 | nx = len(self._x)
46 | gtau_smpl = numpy.zeros((1, nx), dtype=complex)
47 | for ix in range(nx):
48 | gtau_smpl[0, ix] = gtau(0.5 * (self._x[ix] + 1) * self._beta)
49 |
50 | return numpy.sqrt(self._beta / 2) * numpy.dot(gtau_smpl[:, :], self._u_smpl[:, 0:nl]).reshape((nl))
51 |
52 | if __name__ == '__main__':
53 | beta = 100.0
54 | Lambda = 1000.0
55 | wmax = Lambda/beta
56 |
57 | pole = 1.0
58 |
59 | basis = irbasis.load('F', Lambda)
60 | Nl = basis.dim()
61 |
62 | # Initialize a transformer
63 | trans = transformer(basis, beta)
64 |
65 | # G(tau) generated by a pole at "pole"
66 | gtau = lambda tau: - numpy.exp(- pole * tau)/(1 + numpy.exp(- beta * pole))
67 |
68 | # Compute expansion coefficients in IR by numerical integration
69 | Gl = trans.compute_gl(gtau, Nl)
70 |
71 | # In this special case, Gl can be computed from rho_l.
72 | rhol = numpy.sqrt(1/wmax) * numpy.array([basis.vly(l, pole/wmax) for l in range(Nl)])
73 | Sl = numpy.sqrt(beta * wmax / 2) * numpy.array([basis.sl(l) for l in range(Nl)])
74 | Gl_ref = - Sl * rhol
75 |
76 | for l in range(Nl):
77 | print(l, float(Gl[l]), float(Gl_ref[l]))
78 |
79 | # Transform Gl to Matsubara frequency domain
80 | nvec = numpy.array([0, 10, 100, 1000, 10000, 100000, -10])
81 | Niw = len(nvec)
82 | Unl = numpy.sqrt(beta) * basis.compute_unl(nvec)
83 | Giw = numpy.dot(Unl, Gl)
84 |
85 | # Compare the result with the exact one 1/(i w_n - pole)
86 | for n in range(Niw):
87 | wn = (2*nvec[n] + 1) * numpy.pi/beta
88 | ref = 1/(1J * wn - pole)
89 | print(nvec[n], numpy.abs((Giw[n] - ref)/ref))
90 |
--------------------------------------------------------------------------------
/sample/giwn.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from builtins import range
3 |
4 | import matplotlib
5 | matplotlib.use('Agg')
6 | import matplotlib.pyplot as plt
7 |
8 | matplotlib.rcParams['font.family'] = 'serif'
9 | matplotlib.rcParams['mathtext.fontset'] = 'cm'
10 | matplotlib.rcParams['mathtext.rm'] = 'serif'
11 |
12 | import numpy
13 | import irbasis
14 |
15 | def _composite_leggauss(deg, section_edges):
16 | """
17 | Composite Gauss-Legendre quadrature.
18 | :param deg: Number of sample points and weights. It must be >= 1.
19 | :param section_edges: array_like
20 | 1-D array of the two end points of the integral interval
21 | and breaking points in ascending order.
22 | :return ndarray, ndarray: sampling points and weights
23 | """
24 | x_loc, w_loc = numpy.polynomial.legendre.leggauss(deg)
25 |
26 | ns = len(section_edges)-1
27 | x = numpy.zeros((ns, deg))
28 | w = numpy.zeros((ns, deg))
29 | for s in range(ns):
30 | dx = section_edges[s+1] - section_edges[s]
31 | x0 = section_edges[s]
32 | x[s, :] = (dx/2)*(x_loc+1) + x0
33 | w[s, :] = w_loc*(dx/2)
34 | return x.reshape((ns*deg)), w.reshape((ns*deg))
35 |
36 | class transformer(object):
37 | def __init__(self, basis, beta):
38 | section_edges = numpy.asarray(basis.section_edges_x)
39 | self._dim = basis.dim()
40 | self._beta = beta
41 | self._x, self._w = _composite_leggauss(12, section_edges)
42 |
43 | nx = len(self._x)
44 | self._u_smpl = numpy.zeros((nx, self._dim))
45 | for ix in range(nx):
46 | for l in range(self._dim):
47 | self._u_smpl[ix, l] = self._w[ix] * basis.ulx(l, self._x[ix])
48 |
49 | def compute_gl(self, gtau, nl):
50 | assert nl <= self._dim
51 |
52 | nx = len(self._x)
53 | gtau_smpl = numpy.zeros((1, nx), dtype=complex)
54 | for ix in range(nx):
55 | gtau_smpl[0, ix] = gtau(0.5 * (self._x[ix] + 1) * self._beta)
56 |
57 | return numpy.sqrt(self._beta / 2) * numpy.dot(gtau_smpl[:, :], self._u_smpl[:, 0:nl]).reshape((nl))
58 |
59 | if __name__ == '__main__':
60 |
61 | stat = 'F' # 'F' for Fermionic or 'B' for Bosonic
62 | wmax = 10.0
63 | Lambda = 1000.0
64 | beta = Lambda/wmax
65 |
66 | pole = 2.0
67 |
68 | assert numpy.abs(pole) <= wmax
69 |
70 | basis = irbasis.load(stat, Lambda)
71 | Nl = basis.dim()
72 |
73 | # Initialize a transformer
74 | trans = transformer(basis, beta)
75 |
76 | # G(tau) generated by a pole at "pole = 2.0"
77 | if stat == 'B':
78 | gtau = lambda tau: - numpy.exp(- pole * tau)/(1 - numpy.exp(- beta * pole))
79 | elif stat == 'F':
80 | gtau = lambda tau: - numpy.exp(- pole * tau)/(1 + numpy.exp(- beta * pole))
81 |
82 | #Compute expansion coefficients in IR by numerical integration
83 | Gl = trans.compute_gl(gtau, Nl)
84 |
85 | plt.figure(1)
86 | for l in range(Nl):
87 | plt.scatter(l,numpy.abs(Gl.real[l]),color = "r")
88 |
89 | plt.xlim(1,1e+5)
90 | plt.ylim(1e-4,1)
91 |
92 | plt.xscale("log")
93 | plt.yscale("log")
94 | plt.xlabel(r'$l$',fontsize = 21)
95 | plt.tick_params(labelsize=21)
96 |
97 | plt.ylabel(r'$|G_l|$',fontsize = 21)
98 | plt.legend(frameon=False,fontsize = 21)
99 | plt.tight_layout()
100 | #plt.show()
101 | plt.savefig('Gl.png')
102 |
103 | # In this special case, Gl can be computed from rho_l.
104 | if stat == 'B':
105 | rhol = (1/pole) * numpy.sqrt(1/wmax) * numpy.array([basis.vly(l, pole/wmax) for l in range(Nl)])
106 | Sl = numpy.sqrt(0.5 * beta * wmax**3) * numpy.array([basis.sl(l) for l in range(Nl)])
107 | elif stat == 'F':
108 | rhol = numpy.sqrt(1/wmax) * numpy.array([basis.vly(l, pole/wmax) for l in range(Nl)])
109 | Sl = numpy.sqrt(0.5 * beta * wmax) * numpy.array([basis.sl(l) for l in range(Nl)])
110 | Gl_ref = - Sl * rhol
111 |
112 | # Check Gl is equal to Gl_ref
113 | numpy.testing.assert_allclose(Gl, Gl_ref, atol=1e-10)
114 |
115 | # Reconstruct G(tau) from Gl
116 | Nx = 1000
117 | x_points = numpy.linspace(-1, 1, Nx)
118 | A = numpy.sqrt(2/beta) * numpy.asarray([basis.ulx(l, x) for x in x_points for l in range(Nl)]).reshape((Nx, Nl))
119 | Gtau_reconst = numpy.dot(A, Gl)
120 | Gtau_ref = numpy.asarray([gtau((x+1)*beta/2) for x in x_points])
121 | numpy.testing.assert_allclose(Gtau_reconst, Gtau_ref, atol=1e-12)
122 |
123 | plt.figure(2)
124 | plt.xlim(1,1e+5)
125 | plt.yscale("log")
126 | plt.xscale("log")
127 |
128 | point = []
129 | N = 100000
130 | for x in range(50):
131 | point.append(int(N * numpy.exp(-x/3.)))
132 |
133 | Unl = numpy.sqrt(beta) * basis.compute_unl(point)
134 | Giw = numpy.dot(Unl, Gl)
135 |
136 | # Compare the result with the exact one 1/(i w_n - pole)
137 | Glist = []
138 | reflist = []
139 | p = 0
140 | for n in point:
141 | if stat == 'B':
142 | wn = (2*n ) * numpy.pi/beta
143 | ref = 1/(1J * wn - pole)
144 | elif stat == 'F':
145 | wn = (2*n +1) * numpy.pi/beta
146 | ref = 1/(1J * wn - pole)
147 |
148 | Glist.append(numpy.abs(Giw[p]))
149 | reflist.append(numpy.abs(ref))
150 |
151 | # Giw is consistent with ref
152 | assert numpy.abs(Giw[p] - ref) < 1e-8
153 | p += 1
154 |
155 |
156 | plt.scatter(point,Glist,marker = "o",label = r"$\rm{Exact} \hspace{0.5}$"+r"$G(i\omega_n)$")
157 | plt.scatter(point,reflist,marker = "x",label = r"$\rm{Reconstructed\hspace{0.5} from \hspace{0.5}}$"+r"$G_l$")
158 | plt.tick_params(labelsize=21)
159 | plt.ylabel(r'$|G(iw_n)|$',fontsize = 21)
160 | plt.xlabel(r'$n$',fontsize = 21)
161 | plt.legend(frameon=False,fontsize = 21)
162 | plt.tight_layout()
163 | plt.savefig('Giw.png')
164 |
--------------------------------------------------------------------------------
/sample/how_to_use_from_Julia.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "using PyCall\n",
10 | "\n",
11 | "const irbasis = pyimport(\"irbasis\");"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "data": {
21 | "text/plain": [
22 | "PyObject "
23 | ]
24 | },
25 | "execution_count": 2,
26 | "metadata": {},
27 | "output_type": "execute_result"
28 | }
29 | ],
30 | "source": [
31 | "# Load basis data for fermions and Lambda = 1000.0\n",
32 | "basis = irbasis.load(\"F\", 1000.0)"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": 3,
38 | "metadata": {},
39 | "outputs": [
40 | {
41 | "data": {
42 | "text/plain": [
43 | "0.27401896348952326"
44 | ]
45 | },
46 | "execution_count": 3,
47 | "metadata": {},
48 | "output_type": "execute_result"
49 | }
50 | ],
51 | "source": [
52 | "# l=0, x = 0.1\n",
53 | "basis.ulx(0, 0.1)"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": 4,
59 | "metadata": {},
60 | "outputs": [
61 | {
62 | "data": {
63 | "text/plain": [
64 | "(72, 10)"
65 | ]
66 | },
67 | "execution_count": 4,
68 | "metadata": {},
69 | "output_type": "execute_result"
70 | }
71 | ],
72 | "source": [
73 | "# Compute u_l(x) for all l's at 10 x points.\n",
74 | "# This can be done by using numpy broadcasting.\n",
75 | "function compute_ulx_all_l(basis, xs)\n",
76 | " dim = convert(Int64, basis.dim())\n",
77 | " nx = length(xs)\n",
78 | " all_l = collect(0:dim-1) # make sure l starts with 0.\n",
79 | " basis.ulx(reshape(all_l, dim, 1), reshape(xs, 1, nx))\n",
80 | "end\n",
81 | "nx = 10\n",
82 | "xs = LinRange(-1, 1, nx)\n",
83 | "ulx_mat = compute_ulx_all_l(basis, xs)\n",
84 | "size(ulx_mat)"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": 5,
90 | "metadata": {},
91 | "outputs": [
92 | {
93 | "data": {
94 | "text/plain": [
95 | "(101, 72)"
96 | ]
97 | },
98 | "execution_count": 5,
99 | "metadata": {},
100 | "output_type": "execute_result"
101 | }
102 | ],
103 | "source": [
104 | "# Compute unl for n=0, ...., 100\n",
105 | "unl = basis.compute_unl(collect(0:100))\n",
106 | "size(unl)"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 6,
112 | "metadata": {},
113 | "outputs": [
114 | {
115 | "data": {
116 | "text/plain": [
117 | "72-element Vector{Float64}:\n",
118 | " -0.9999350180790227\n",
119 | " -0.999591919067488\n",
120 | " -0.998810514553798\n",
121 | " -0.9975735342137606\n",
122 | " -0.995868107425596\n",
123 | " -0.993676225696257\n",
124 | " -0.9909740539653258\n",
125 | " -0.9877311295042808\n",
126 | " -0.9839093450370975\n",
127 | " -0.9794616818172159\n",
128 | " -0.9743306649327053\n",
129 | " -0.9684465198121114\n",
130 | " -0.9617250208563004\n",
131 | " ⋮\n",
132 | " 0.9684465198121114\n",
133 | " 0.9743306649327053\n",
134 | " 0.9794616818172162\n",
135 | " 0.9839093450370976\n",
136 | " 0.9877311295042808\n",
137 | " 0.9909740539653258\n",
138 | " 0.993676225696257\n",
139 | " 0.995868107425596\n",
140 | " 0.9975735342137606\n",
141 | " 0.998810514553798\n",
142 | " 0.999591919067488\n",
143 | " 0.9999350180790227"
144 | ]
145 | },
146 | "execution_count": 6,
147 | "metadata": {},
148 | "output_type": "execute_result"
149 | }
150 | ],
151 | "source": [
152 | "# Sampling points in x space\n",
153 | "sp_x = basis.sampling_points_x(basis.dim()-1)"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": 7,
159 | "metadata": {},
160 | "outputs": [
161 | {
162 | "data": {
163 | "text/plain": [
164 | "72×72 transpose(::Matrix{Float64}) with eltype Float64:\n",
165 | " 6.02087 -8.04567 9.48639 -10.5652 … -5.7562 5.64904 -5.54162\n",
166 | " 5.75039 -7.64106 8.79298 -9.58333 5.2586 -5.19723 5.1292\n",
167 | " 5.22002 -6.84939 7.44944 -7.6984 -4.14515 4.11459 -4.0676\n",
168 | " 4.56897 -5.88165 5.83787 -5.47791 3.48043 -3.47632 3.44334\n",
169 | " 3.92892 -4.93653 4.31109 -3.43615 -3.03602 3.05866 -3.03749\n",
170 | " 3.3726 -4.12265 3.05309 -1.82861 … 2.70883 -2.76005 2.75014\n",
171 | " 2.91728 -3.46444 2.09371 -0.6797 -2.45021 2.53266 -2.53418\n",
172 | " 2.55077 -2.94191 1.38475 0.0983916 2.23412 -2.35096 2.36455\n",
173 | " 2.2534 -2.52421 0.862301 0.61095 -2.04508 2.19992 -2.22653\n",
174 | " 2.00797 -2.18467 0.473613 0.94159 1.8731 -2.06984 2.11067\n",
175 | " 1.80177 -1.90373 0.181174 1.14795 … -1.71132 1.95407 -2.01054\n",
176 | " 1.62576 -1.66755 -0.0407086 1.26829 1.55485 -1.84777 1.92144\n",
177 | " 1.47345 -1.46629 -0.209784 1.32817 -1.40029 1.7473 -1.83987\n",
178 | " ⋮ ⋱ ⋮ \n",
179 | " 1.62576 1.66755 -0.0407086 -1.26829 … -1.55485 -1.84777 -1.92144\n",
180 | " 1.80177 1.90373 0.181174 -1.14795 1.71132 1.95407 2.01054\n",
181 | " 2.00797 2.18467 0.473613 -0.94159 -1.8731 -2.06984 -2.11067\n",
182 | " 2.2534 2.52421 0.862301 -0.61095 2.04508 2.19992 2.22653\n",
183 | " 2.55077 2.94191 1.38475 -0.0983916 -2.23412 -2.35096 -2.36455\n",
184 | " 2.91728 3.46444 2.09371 0.6797 … 2.45021 2.53266 2.53418\n",
185 | " 3.3726 4.12265 3.05309 1.82861 -2.70883 -2.76005 -2.75014\n",
186 | " 3.92892 4.93653 4.31109 3.43615 3.03602 3.05866 3.03749\n",
187 | " 4.56897 5.88165 5.83787 5.47791 -3.48043 -3.47632 -3.44334\n",
188 | " 5.22002 6.84939 7.44944 7.6984 4.14515 4.11459 4.0676\n",
189 | " 5.75039 7.64106 8.79298 9.58333 … -5.2586 -5.19723 -5.1292\n",
190 | " 6.02087 8.04567 9.48639 10.5652 5.7562 5.64904 5.54162"
191 | ]
192 | },
193 | "execution_count": 7,
194 | "metadata": {},
195 | "output_type": "execute_result"
196 | }
197 | ],
198 | "source": [
199 | "# Dimensionless version of F matrix for sparse sampling\n",
200 | "F = transpose(compute_ulx_all_l(basis, sp_x))"
201 | ]
202 | },
203 | {
204 | "cell_type": "code",
205 | "execution_count": 8,
206 | "metadata": {},
207 | "outputs": [
208 | {
209 | "data": {
210 | "text/plain": [
211 | "72-element Vector{Int64}:\n",
212 | " -1739\n",
213 | " -716\n",
214 | " -435\n",
215 | " -307\n",
216 | " -234\n",
217 | " -186\n",
218 | " -151\n",
219 | " -126\n",
220 | " -106\n",
221 | " -90\n",
222 | " -77\n",
223 | " -66\n",
224 | " -57\n",
225 | " ⋮\n",
226 | " 65\n",
227 | " 76\n",
228 | " 89\n",
229 | " 105\n",
230 | " 125\n",
231 | " 150\n",
232 | " 185\n",
233 | " 233\n",
234 | " 306\n",
235 | " 434\n",
236 | " 715\n",
237 | " 1738"
238 | ]
239 | },
240 | "execution_count": 8,
241 | "metadata": {},
242 | "output_type": "execute_result"
243 | }
244 | ],
245 | "source": [
246 | "# Sampling points in n\n",
247 | "sp_n = basis.sampling_points_matsubara(basis.dim()-1)"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": 9,
253 | "metadata": {},
254 | "outputs": [
255 | {
256 | "data": {
257 | "text/plain": [
258 | "72×72 Matrix{ComplexF64}:\n",
259 | " -0.0-0.00157101im -5.95226e-5+0.0im … -0.00322053+0.0im\n",
260 | " -0.0-0.00379364im -0.000345927+0.0im 0.00849325+0.0im\n",
261 | " -0.0-0.00617193im -0.000909578+0.0im -0.013881+0.0im\n",
262 | " -0.0-0.00859539im -0.00174668+0.0im 0.0191966+0.0im\n",
263 | " -0.0-0.0110285im -0.00283801+0.0im -0.0243999+0.0im\n",
264 | " -0.0-0.0135049im -0.00418642+0.0im … 0.0294781+0.0im\n",
265 | " -0.0-0.0161079im -0.00583646+0.0im -0.0344425+0.0im\n",
266 | " -0.0-0.0186518im -0.00765254+0.0im 0.0394045+0.0im\n",
267 | " -0.0-0.0213354im -0.00975744+0.0im -0.0443181+0.0im\n",
268 | " -0.0-0.0241132im -0.0121115+0.0im 0.0492602+0.0im\n",
269 | " -0.0-0.0269863im -0.0147053+0.0im … -0.0542247+0.0im\n",
270 | " -0.0-0.0300525im -0.017624+0.0im 0.0595113+0.0im\n",
271 | " -0.0-0.033191im -0.0207469+0.0im -0.0647406+0.0im\n",
272 | " ⋮ ⋱ \n",
273 | " 0.0+0.0300525im -0.017624+0.0im … 0.0595113+0.0im\n",
274 | " 0.0+0.0269863im -0.0147053+0.0im -0.0542247+0.0im\n",
275 | " 0.0+0.0241132im -0.0121115+0.0im 0.0492602+0.0im\n",
276 | " 0.0+0.0213354im -0.00975744+0.0im -0.0443181+0.0im\n",
277 | " 0.0+0.0186518im -0.00765254+0.0im 0.0394045+0.0im\n",
278 | " 0.0+0.0161079im -0.00583646+0.0im … -0.0344425+0.0im\n",
279 | " 0.0+0.0135049im -0.00418642+0.0im 0.0294781+0.0im\n",
280 | " 0.0+0.0110285im -0.00283801+0.0im -0.0243999+0.0im\n",
281 | " 0.0+0.00859539im -0.00174668+0.0im 0.0191966+0.0im\n",
282 | " 0.0+0.00617193im -0.000909578+0.0im -0.013881+0.0im\n",
283 | " 0.0+0.00379364im -0.000345927+0.0im … 0.00849325+0.0im\n",
284 | " 0.0+0.00157101im -5.95226e-5+0.0im -0.00322053+0.0im"
285 | ]
286 | },
287 | "execution_count": 9,
288 | "metadata": {},
289 | "output_type": "execute_result"
290 | }
291 | ],
292 | "source": [
293 | "# Dimensionless version of hatF matrix for sparse sampling\n",
294 | "hatF = basis.compute_unl(sp_n)"
295 | ]
296 | },
297 | {
298 | "cell_type": "code",
299 | "execution_count": null,
300 | "metadata": {},
301 | "outputs": [],
302 | "source": []
303 | }
304 | ],
305 | "metadata": {
306 | "kernelspec": {
307 | "display_name": "Julia 1.6.0",
308 | "language": "julia",
309 | "name": "julia-1.6"
310 | },
311 | "language_info": {
312 | "file_extension": ".jl",
313 | "mimetype": "application/julia",
314 | "name": "julia",
315 | "version": "1.6.0"
316 | }
317 | },
318 | "nbformat": 4,
319 | "nbformat_minor": 4
320 | }
321 |
--------------------------------------------------------------------------------
/sample/run_all_python_scripts.sh:
--------------------------------------------------------------------------------
1 | for file in `ls *.py`
2 | do
3 | python $file
4 | # exit if it fails
5 | [ $? -eq 0 ] || exit $?;
6 | done
7 |
--------------------------------------------------------------------------------
/sample/singular_values.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "colab": {
8 | "base_uri": "https://localhost:8080/",
9 | "height": 267
10 | },
11 | "colab_type": "code",
12 | "executionInfo": {
13 | "elapsed": 7656,
14 | "status": "ok",
15 | "timestamp": 1548325974742,
16 | "user": {
17 | "displayName": "Chikano直樹",
18 | "photoUrl": "",
19 | "userId": "03369377431180913953"
20 | },
21 | "user_tz": -540
22 | },
23 | "id": "KEaf0Cqm11Gd",
24 | "outputId": "f1b5a7f7-2b3e-4903-fd6a-64fec2246c8a"
25 | },
26 | "outputs": [
27 | {
28 | "name": "stdout",
29 | "output_type": "stream",
30 | "text": [
31 | "Collecting irbasis\n",
32 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/5e/e2/6a2b4b85793f498966c60ca7ffdccbe6528b7dee6b34929d934d130302a1/irbasis-1.0.2-py2.py3-none-any.whl (7.8MB)\n",
33 | "\u001b[K 100% |████████████████████████████████| 7.8MB 3.8MB/s \n",
34 | "\u001b[?25hRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from irbasis) (2.8.0)\n",
35 | "Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from irbasis) (1.14.6)\n",
36 | "Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from irbasis) (0.16.0)\n",
37 | "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from h5py->irbasis) (1.11.0)\n",
38 | "Installing collected packages: irbasis\n",
39 | "Successfully installed irbasis-1.0.2\n"
40 | ]
41 | }
42 | ],
43 | "source": [
44 | "!pip install irbasis"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 1,
50 | "metadata": {
51 | "colab": {},
52 | "colab_type": "code",
53 | "id": "oeXLU-tR2Hs-"
54 | },
55 | "outputs": [],
56 | "source": [
57 | "import numpy\n",
58 | "import irbasis\n",
59 | "import matplotlib\n",
60 | "import matplotlib.pyplot as plt"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 19,
66 | "metadata": {
67 | "colab": {},
68 | "colab_type": "code",
69 | "id": "NLn_fXy57A93"
70 | },
71 | "outputs": [
72 | {
73 | "data": {
74 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xl4VOX1wPHvSQgh7IhhVQFBUHYkooiyW3GpICig4NJa0FYQRa1L/dlWrdhSlKq1Km4QdhAR3FBAFlGEKAKCrEkAAZF9TUKW8/vjTmAymSyTTDLb+TzPPMPc+8697wwPnHm384qqYowxxgSbqEBXwBhjjPHGApQxxpigZAHKGGNMULIAZYwxJihZgDLGGBOULEAVQUQqiMh/ROSQiBwRkbdFpFKg62WMMeHOAlTRngR6AG2Ai4CWwL8CWiNjjIkAYuugCiciO4E/q+p01+trgVlALVXNDmjljDEmjIVNC0pEnhCRWSKSLCIqIqmFlI0SkYdEZJOIpIvILhEZJyJVPMrVBM4HfnA7/D1QDWhcBh/DGGOMS9gEKOB5oCewHThcRNmXgBeBjcBInBbRA8B8EXH/Tqq5no+4HTvicc4YY0wZqBDoCvhRU1VNBhCRH4Gq3gqJSCucoDRHVQe4HU8BXgYGA1Ndh4+7nmsAv7j+XNPjnDHGmDIQNi2o3OBUDLcBAoz3OD4BOAUMdbvmEWAX0N6tXAec4JRa0roaY4wpWtgEKB9cBuQAq9wPqmo6zljTZR7l3wKeEJEGIhIP/A14zyZIGGNM2QqnLr7iagAcUNUML+d2A1eKSEVVPe069jxwLrABJ6DPBh7zdmERGQ4MB6gdR8fGNZ34n00UUShpxFKRTH6mLjVrnkPNyjF+/WDGGBMKvvvuuwOqGl9UubCcZp47BqWqjb2c2w7EqOoFXs5NAu7AmUJ+xPO8L5rWr6lrh8fyUfYVZBPN1VHruCBqP2kaw4qc1nxU6beM/8vDZ9+Qsgx2fw9XPVia2xpjTNATke9UNaGocpHYxXcKiC3gXCW3MqXyM/EMy3yYa6K/I1nrUUXSmZrVg2hy6B29hnGnn4VFz0FONswfBdOHQMNLz14gZRl85TlMZowxkSMSA9Qe4FwR8RakGuJ0/532cs5nO6sn8K7czMMVZjMi8wGezBrGXZmPc0zjOCQ1YflYGHM+rJniBKrc1mzKMph1d96AZYwxESYSA9RqnM/dyf2gK79eeyDJHzdp07AGKx7vSZ+W8fwx5898k9MKgG9yWnFv5mhWnnsLi6M6Q+ZJsnKyIfMkJPaDd/rAzLvg1vegSVd/VMUYY0JSJAaoGYACnoM9w4DKwBR/3qz1wKe5uf9tNKwZhwB1qsWSRCum7qlDu+wf+U/WzRzTyryZ05cTcQ1h5zeQnQk/THNaUu6s288YE0HCZpKEiNwBNHK9HAlUBMa5Xu9Q1US3sq8AI4APgE+AS3AySawAeqpqTmnrk5CQoElJ3htj9z3zEv/IHseIzAf4JqcVnaM28HrMi0SJUK39zbB2Omg2RFeE/hOgVb+z3X7WsjLGhLjiTpIIp2nm9wDdPI4963peCiS6HX8QZ6HtcOAG4ADwCvC0P4JTURpnbGaEPnCm2w9AEOZnXcHt/V6D1rfAjCGQleEEpe97wd41FpyMMRElbAKUqnb3oWw2TutqXFFly8L8agPZfSTtzOu2ksy9mQ+xs3oCtwM06wm3z4CtX8CGD2D7QoitDqsmQE4WNO159mI2Pd0YE6YicQwq4B69tgVxMdFnXr+R/Vu+yWnFiYwsmjz+MV1eWMzcI03homsg8xS0udV5/mkeTB4AX/zNmfVns/2MMWEsbMaggk1hY1AAc9fsZuyCzew5kkblitGcPJ03c1K3mJ94M+5VYm+b5HTrJS91uv1iqsKJvVCtPmSmwaBE6/YzxoQUW6gb5Pp1aMiKx3uS8sIN1KxcMd/5i3O28Yg+dDb4XNgNBk+Fy++Fi2+E43sh/SismQpZbsu2bKafMSZMWIAKAnvcxqNyvZH9Wz463izvwSZd4byOzlT0ziMgKhrWTYNXOsLPSdblZ4wJK2EzSSKUNagZl2fShPvxPDynmje/FqYOhKO74K1eUCEWWvbPfwObSGGMCUHWggoCnpMmcl3Xul7eA7u/zzvVvElXuH0mNLrSeZ11GjZ9BNMGw/alzjFrVRljQpS1oIJAvw4NAc5MmqhfoxJRAonfpDJv7R72H8+gQc04Hr32Vvo1aZj/Avs3Qdc/w6o3oXpD+HUDTOkP7W6DzZ/Y+iljTEiyWXxlpKhZfEV556tknvnopzzH4mKiGdO/zZmAlq/LL/d1x7th5etOfr/zOkHz38D5l+cNUtbtZ4wJEJvFF+Le/io137G0zGzGLth89oC3Lr9b34OTB53xqPrt4OdV8M3/YPrQs7n9rNvPhKFhw4YhIjz00ENlfq+ff/6ZkSNH0rlzZypXroyIkJqa6rXsrl27uOWWW6hRowbVq1enf//+7Ny5s8TlClLa9wcjC1BBytvMvnzHr3rQe9fdpvkwcCLcuwx+8yykH4aMozDlVvjir5bTz4SdtLQ0Zs6cSY0aNZg6dSpZWVller9t27Yxc+ZMatWqxdVXX11guVOnTtGzZ082bdrExIkTSUxMZOvWrfTo0YOTJ0/6XK609wk1NgYVpIo9s8+TZ6vqygfg3BawfBzs+hZWjId6bfK/z7r8jI/cF5s7Y6QtznY/l3dd5s7l2LFjTJ48maFDh/LZZ59x4403ltn9unbtyr59+wB46623+Pzzz72WmzBhAsnJyWzevJlmzZxlI23btuWiiy7ijTfeYPTo0T6VK0hp3x+srAUVpAqa2XdvtwsLf6O3VlXza6HnU1CpBsSdA7+sh8T+8NNHznnr8jM+mrtmN0/MWc/uI2kosPtIGk/MWc/cNbsDUp+JEyeSkJDAkCFDuOiii5g4cWKZ3i8qqnj/dc6bN48rrrjiTNAAaNKkCV26dOHDDz/0uVxp7xNqrAUVpDxn9sVXi+XQyQwmfp3K60u3s/dIevF/teYGoEGTnUkT8x+AdTNgxlC4+AZn4a91+UWkv8/fwMY9x3x+35qdRzidnTfxf1pmNn+evY5pq3wb92jZoDp//W2rogsWYM+ePSxcuJCxY8cCMGTIEMaMGcPhw4epVauW1/eoKtnZ2V7PuRMRoqPz/1Asrg0bNtC3b998x1u1asWsWbN8Llfa+4Qaa0EFMfd0SKv+0ptbE85n+/6T7DmS7tuvVvduv5hK0P9NuOkVqFjVWTcVV8vpBjSmmDyDU1HHy9LkyZNRVQYPHgw4ASojI4MZM2YU+J6lS5cSExNT5KNXr16lqtuhQ4e8BslzzjmHw4cP+1yutPcJNdaCCiHLthzIdyx3Zl+hrShv40q1GkOFinBed0heAuNbw5WjoOdfQMQpY+NSYa+kLZcuLyz2OkbasGYcM+7tXNpq+WTixIn06NGD+vXrA9CsWTM6derExIkTue+++7y+p2PHjqxevbrIa1erVs2vdTW+sQAVQoo1s684PNdPrZkK8+6H5WMhdbmTIX3/prNljPHw6LUteGLOetIyz3aTxcVE8+i15dsST0pKYuPGjTz88MN5jg8ZMoRRo0axZcsWmjdvnu99VatWpX379kVeX3J/rJVQrVq1vLZgPFs8xS1X2vuEGuviCyEFzeArcmafJ8+Zfh1uh6EfQIOOsGsljG8D026DW961cSnjVb8ODRnTvw0Na8YhOC2nPIvIy8nEiROJjY1lwIABeY4PGjSI6OhoJk2a5PV95dXF16pVKzZs2JDv+MaNG2nZsqXP5Up7n1BjLagQ4u1Xa0y0+P6r1VuXXdPuzuOj0ZD0tnPss8eg+5PQ8qaz5azbz7j069AwYNPKAU6fPs20adO44YYbqFGjRp5zdevWpXfv3iQmJvLss8/mawmVVxffTTfdxCOPPEJycjIXXujMwE1NTWXFihW88MILPpcr7X1CjaU6KiOlTXVUEPe1J7EVosjIyuHcarEcOJOvrxRrUc6kSvodrHwNstJBFbr9GXo8mb9r0JgA+uCDD+jfvz+zZ8/O14ICSExM5M4772Tx4sX06NHD7/efPXs2AIsWLeL111/ntddeIz4+nvj4eLp16wbAyZMnadeuHXFxcTz33HOICP/3f//H8ePHWbduHVWrVvWpHDitv169evHOO+9w5513+vz+YFDcVEeoqj3K4NGxY0cta4nfpGqjxz7K87j4qU/1g+9/9v1iyUtV/9nEec59PeYC1XEXq/61uuqrl6m+0PjseWMCrG/fvlq9enVNS0vzev748eNauXJlveuuu8rk/oDXR7du3fKU27Fjh/bv31+rVaumVatW1b59+2pKSkq+6xW33JdffqmAvvvuuyV6fzAAkrQY/49aC6qMlFULyl1hM6lWPN7Tt4t9Nd5ZqOuZUPbnJNi+CFK/gqgY6PEEXP1w3jLW5WeM8YEli40AfpvVB94zUDTpCuclwK8/ORnSc7Jh0TMw+RZIP2YZKIwxZcomSYSwEufrKy7PMadLboKpg2HbF/CSa/3M4Ck2HmWMKRPWggph3vL1RUsJZvUVxHM6erNecMf7UL8DZBxzHl+Oga0L874vZZnTZWiMMaVgASqEea5FqVapAtmqVIgu3eLCMwrazuPoTujyIFSoBDu/hqkDIekd55x1+xlj/MQmSZSR8pgk4SkrO4cBr3/Dll+OUSOuIvuO+ZBQtji87eA7fYgzFf30cWh0Fez/yaahG2MKZZMkIlCF6Ciub12PtMwcfjnmY0LZ4vC2g+/gKdD5TxB/Mez4CqJjYesXZ3fvzWXdfsYYH1mACjOTvtmR71i+reJLqqCZfo2vgpP74eLfwvG9sPJ/MG0wJC91yli3nzGmBGwWX5jx69Tz4vDs9vvxfZhzL5w+CVMGwGXDYd106/YzxvjMWlBhxm8JZYvLs9uv9QAY8j407wM5ObDyv9CkmwUnY4zPLECFGW9Tz0uUULa4vHX7Ne0Gne+H2GpQpQ5smAOvXgZbPs9bzsaljDGFsAAVZjynnsdECzFRwlUXnVt+lTizxXwiPPQjtLoZDmyBaYNg9dt5y9i4lDGmABagwpD7VvGfPHA1WTnwt3n594opM+7dfhVinT//5h9QIQ4+Hg3v3WhZ0Y3fDRs2DBHhoYceKvN7/fzzz4wcOZLOnTtTuXJlRITU1FSvZXft2sUtt9xCjRo1qF69Ov3792fnzp0lLldW1/Tne/3F1kGVkUCsgyrIK4u2Mu6LLZxTpSKHT57279ooX6QdgXeudXbrja0BN74IbW45e94Sz5oSSktLo169eogIsbGx7N69mwoVym4O2JIlSxg0aBAdO3YkOzubzz//nJSUFBo3bpyn3KlTp2jXrh2xsbFntsF46qmnOHXqFOvWraNKlSo+lSura3oqzXuLw7bbiIDtNoprdtJObeyvbTlKI3dLj6m3OVt4/LW66sJn856z7TxCw/KX8v9dJS91jgfA1KlTFdDJkycroPPnzy/T+2VnZ5/584QJExTwurXF+PHjNSoqSrdu3XrmWHJyskZHR+u4ceN8LldW1yxpvUsK224jsIKpBeXXbTlKynM6+rqZ8MG9oDlQpyUc/wUGTrQuv1DhLatIALtt+/Tpw8GDB1m9ejXNmzenXbt2zJo1q1zu/dZbbzFs2DCvLahevXqRnp7OihUr8hzP3dBw6dKlPpUrq2t6Ks17i6O4LShbB1UIEYkFXgV6AfHAXuAVVX0loBXzUbmvjfLGczp624FQJR7mj4JfNzpdfhs/dM557kll3X5l59PH4Zf1JXtvtfqQeLPzfHyvk01kyT+dhy/qtYHrSr4t+Z49e1i4cCFjx44FYMiQIYwZM4bDhw9Tq1Ytr+9RVbKzs4u8togQHR1dZLmCbNiwgb59++Y73qpVqzwBtLjlyuqaJa13WbNJEoWrAPwC/AaoAQwEnhKRgQGtlY/KfW2UN96mo0dFw+kT0H6Ik8tv9dswdRBsX+Kct5l+wa1STSc4Hd3lPFeqGZBqTJ48GVVl8ODBgBOgMjIymDFjRoHvWbp0KTExMUU+evXqVaq6HTp0yGuQPOecczh8+LDP5crqmiWtd1mzFlQhVPUk8H9uh34QkXnAVcDMwNTKd49e24In5qwnLfPsL8YKUWW4Nqo48u019VuYeRdknnJloLgH1s+2mX5lrRQtlzN/h13/DElvQ/fHAvJ3NXHiRHr06EH9+vUBaNasGZ06dWLixIncd999Xt/TsWNHVq9eXeS1q1Wr5te6Gt8EfYASkSeAS4GOQBNgh6o2LqBsFDAKuBdoDOzHCSRPu4JNaesSA1wN/Lu01ypPubP1xi7YzJ4jaVSKieJ0Vg7tzg/ML14gf5dfi+tg6Gz47j3YOB++fcPZINGCU3Dy/IHR5OqAjEElJSWxceNGHn744TzHhwwZwqhRo9iyZQvNmzfP976qVavSvn37Iq8vUrqta2rVquW1xeHZQiluubK6ZknrXdZCoYvveaAnsB0oqm35EvAisBEYCcwCHgDmu4LXGSIyXUS0kEd3L9d/FTgOTCrdRyp/7mujlj7agwpRQp/xy2jy+Md0eWGxf7Kd+6KgxLMd74bYKlCtAfw0D17paBkogpG3zPa3vuccL0cTJ04kNjaWAQMG5Dk+aNAgoqOjmTTJ+z/V8uria9WqFRs25F+DuHHjRlq2bOlzubK6ZknrXdZCIUA1VdXaqnoNsKegQiLSCicozVHV/qo6QVVHA6OBHsBgj7cMw5n4UNAjz/QVEXkR6Axcp6qn/fLJAuTr7QfJATKycvy/JUdp5P4qHzgJHlwHbQbCwW1OBgrbEDG4FPQDoxwns5w+fZpp06Zxww03UKNGjTzn6tatS+/evUlMTMTbTOXcLr6iHm+88Uap6njTTTexcuVKkpOTzxxLTU1lxYoV3HTTTT6XK6trlrTeZa44c9GD5QH8CKQWcO45QIGrPY5XAk4Cn5TivuOB9UB8cd8TTOugPF05ZlGeNVG5jyvHLApsxbytrVnxquqzdZ01UxP72lopc8acOXMU0NmzZ3s9P2nSJAV08eLFZXL/WbNm6axZs/S+++5TQF977TWdNWuWLlmy5EyZEydOaNOmTbV169Y6d+5c/fDDD7Vt27bapEkTPX78uM/lyuKaS5Ys0ejoaJ04cWKJ6lMSFHMdVMCDji+PIgLUAiAbiPVybgWwv4T3fNl132IHJw3yAOW5aDf30fixjwJdNe9OHlR9uaMTpF5opLrhw7znA7hA1ARO3759tXr16pqWlub1/PHjx7Vy5cp61113lcn9XT+I8z26deuWp9yOHTu0f//+Wq1aNa1atar27dvX66Le4pbz9zW//PJLBfTdd98tcX18VdwAFVILdUXkR6CqepkkISLrgTqqWtfLuZnArTjBq9jdcyLSCEgFMoAst1PLVfU6L+WHA8MBLrjggo47duTfPDAYBMXCXV/kdus1uBS2fQEIXPN36DIq4AtEjTG+i8Qt3yvjBBJv0t3KFJuq7lBVUdVKqlrV7ZEvOLnKv6mqCaqaEB8f78utypW3LTlE4OFr8s92Cjj3ADR0Ntz0CiDwxdPw1jUWnIwJY+EUoE4BsQWcq+RWJuJ5bslRq3IMqnAqs+iV9eXOc7bYpXfC7TOdhaE/r4LoilDj/IBW0RhTNoJ+HZQP9gAtRSRWVT1bUg2BA75074W7fh0anlkfparcNmElYxds5rrW9ahdtaA4HwDeZoXFxEL2abikrzMV/dXLnHI9nzpbxlIkGRPywqkFtRrn83RyPygilYD2QHBkbg1CIsKzfVtzLC2Tq//1ZeDWRhWHe5ffoEnQfwJoNiwbC1NuhdOnbCq6MWEinFpQM4AngQeB5W7Hh+GMPU0JRKVCxYY9x4iOEk6ddrr5ctdGAeW/b1Rh8iWdvRWq1IbPn4atn8NLrZ2ANSjRxqWMCXFBH6BE5A6gketlPFBRRHL7cnaoaiKAqq4Xkf8CI0RkDvAJcAlOJomlwNTyrXloGbtgM1k5eWd0pmVmM3bB5uAKUN667Jr2hD/2hPeHwfqZINHw7ZugChd2O1vOuv2MCSmh0MV3D/Cs61EHqOn2+h6Psg8CjwCtgP/iZI94BbhRVXPKq8KhKCi25CiNlGWwfRFccT9IFGyaD1Nugc2fnj1v3X7GhJSgb0GpancfymYD41wP44MGNeO8ro0q1y05SspzLdRF18L0wZCZBtNvd7bz2PyJTUc3JsSEQgvKlANva6OiA70lR3F5jks17Qa3z4CEeyCmCqxJhHrtoPHVAa2mMcY3FqAMkH9tVOWK0eTkKC0bVA901YpWUOLSVn0hOgZqN4PkxfDaFXDq0NkylhXdmKBmAcqc4b4lx1eP9aRapQr8ff4GQikd1hlnMqNPhBFJ0OIG2L8J/tMOdq60MSljQkBI5eILJQkJCZqUFNpLr95bkcLf5m/knCoVOXzyNA1qxvHotS2Ca1ZfQb4a7wQf95bVp4/Dt687f46Jg8HTne5AY0y5isRcfMbPqleqgACHTp4Orn2jisNbt991L8CVDwDqbC3/ycPw00d5y1i3nzFBwwKUKdC4L7bi2b7OXRsVklKWwQ+ToeujULEKHNoOM++Ala+fPW/dfsYEjaCfZm4CJ+TXRrnznIrepCvMGOos6v3sMdj0Mfy6waaiGxNErAVlClTQGqiQWBvlyXMqepOuMGgyXH4fxF8MqcsgtjrUaRXQahpjzrIAZQrkbW1UbIWo0Fgb5amgqeiNOsPJ/dDsGjic4uTyy51IkcvGpYwJCAtQpkCea6NE4IJz4ujbvkGgq+Yfnpsh3jgestLg08dg3ijIybFxKWMCyMagTKHc943KnXa+eNOv9LqkboBr5gee3X4Jv3M2Qvz0Mfj+PafbL+2Is5bKxqWMKXfWgjLFNuSKRsRXq8i9id8F955RxeWt269FHxj1AzTrDYeS4fQJ+NVj1qJ1+RlTLixAmWL7eN1ejp7KIitHQ29dlC9Sl8OeNc728tmZ8Okj8NFoZ/sO6/IzptxYF58ptrELNnM6O++uJUG5Z1RpeE5Hb36ds1Yq6W3n3KmD1uVnTDmxFpQptrBaF1UQz3Gpi6+HoXPgnKZwcKuzW29sCCTQNSYMWIAyxRZW66IK4m1cSgTSjzj7SmUchwk94LMnnC6/XDYuZYzfWYAyxeZ1zygJkT2jSsq9y6/fazAwERBY+RpM6gsZJ2xcypgyYmNQpthyx5nGLtjMniNpVImtwImMLJrVqRrgmpUhzy6/S26EoR/Awr9CylIY39bp9huUaONSxviZbbdRRsJhu42iHEvPpNu/vqR1wxok3nN5oKtT/uYMh3UzIKoCtOwLHe/OG6RSljkB7qoHA1ZFY4KRbbdhylz1SjGM7HkRy7ceYNmW/YGuTvlKWQbbFsLlf3Je//g+TLkVti06e966/YwpFWtBlZFIaEEBnM7K4YoxCzmWlkV2jobWpoYl5TkVfftimHYbZKU7ramEe+DH2ZYZ3ZgCWAvKlItP1u/leHoELN515zku1bQnDJkFbQc723esegMu7G7ByZhSsgBlSmXsgs1kZudthYf0pobFUVBm9A5DnK3kq9Rxuvwm9oWc7LNlbCq6MT4pNECJyCMer88t2+qYUBMRi3eLI7fbb1AiPLgOGnSElCXwSgKc2G9jUsaUQFEtqKc9Xm8pzkXF0URE2rqepWTVM8EuIhbvFod7t19MHAxfDK36w+FkeLm9s3uvjUkZ45OiApRnYCk00IhIRREZDxwGtgM/uJ4Pi8hLIhJb4pqaoORt8W6FqDBfvOuNt26/W9+FS+9yMqKnH4N9G/NmnzDGFKqoAOX5r6mof13/BToBA4E6QEXX80DX8VdKUEcTxDw3NYyLiUIErrrIeoNJWQabPoIrH4DoGPjsMZh4o5Muyb2MjUsZ41Wh08xFJB140u3Qc8BT7mVU9UW38oeAi1T1oJdrxQNbVLVWaSsdCiJlmrmn5P0nuOalZdxxRSP+dlOrQFcncPJNRV8KU2+F7Ayofh4MfR9O/pq3jDERorjTzIsKUEsovNWkqtrTrfxBoJWq/uLlWvWBDap6TlGVCgeRGqAAnpiznhmrdxJfLZZfj2VExtooT1+NdyZEeGaW+OZV2PK5s16qQizcNs2Ck4k4xQ1QhebiU9XuPt53MvCZiPwDWAscAWoA7YEngEk+Xs+EoBb1qpKjsO9YBnB2bRQQOUHKW3qjJl2dx6ePw7f/g9OZ8OXzkJPlrKXKZSmSjAH8vw5qNDAH+DewCdgLbHa9/gB42M/3M0FowrKUfMfCfm1UcaUsg/Uz4aqHoUIl2PmNkyLpxw/Onrfp6MYAfs5mrqrZwDPAMyJSA6gGHFfVo/68jwlutjaqAJ7jUk27u1IkZcDs38HWz2DrFzYmZYxLsVtQIjJMRGaJSN3ilFfVo6r6swWnyGNrowrgmSKpSVdnDOry4RBXE9ZOh3ptofHVAa2mMcHCly6+/kBHVd3nflBEGojIRyJyTER2icgYW+8U2bytjYqLiY68tVGeCkqR1LwPIFD7Ikj+El7vYlPRjcG3ANUS+NDL8QnA9UBVoD7wZ5xxKBOh3NdG5frD1U0iZ4KEL3K7/QZOhBGrofm1sG8D/KcD7N9iY1ImovkSoOKBre4HROQ8oA+wEagL1Ab+A/QRkYH+qqQJPf06NGTF4z356Zk+xFeL5duUQ9jWLl64d/uJwO0zIeH3cGo//O9KmH67jUmZiOVLgMomf6ojV98Er6jqfte402hgHXCXn+oYcCISJyLbROREoOsSauIqRjOiRzNWpRziq20HAl2d4OOt2+/Gl+DyP0JOptPVt+QF2P5l3jLW7WcigC8BahvQzuNYT5yFvJ95HH8fCKc+iWeAHYGuRKga3Ol8GtaM49+fb7FWVHGcmYo+2pmKvmOFMxV947yz563bz0QAXwLUR8DtInIJOK0KnBZUiqp6/ue9GwiLjBEi0hHnc/4z0HUJVbEVonmgVzPW7jpCx+cW0uTxj+nywuLw3tSwpNynovf+q7MRYsWqoDkw6y748H5Lj2Qihi8BahxwDPhaRGYAy3GyRMzyUraqq2ypiMgTrqntySKiIpJaSNkoEXlIRDaN7MNxAAAgAElEQVSJSLprRuE4EalSivtXwJkEcj9wuqTXMU6GcwEOnTwdOTvvlkRBU9E7DXMC1ZrJ0DDBgpOJCMUOUKp6BLgG2APcitOFl4ITuDxdBuTLx1cCz+N0I27H2cKjMC8BL+JM2BiJEzgfAOaLSJ7PKSLTXQGvoEd3V9FHgTWquswPnyWivfjF1nxJHS27hBcFTUW/+AYnf1+txrB1gbMR4taFecvZuJQJMz5lklDVDUArEWmL00r6XlXT3cuISDWgLzDTD/VrqqrJruv+6LpnPiLSCicozVHVAW7HU4CXgcHAVLe3DANGFHLfoyLSDLgP6FCqT2AAyy5RKu5T0Rt1gQ/uc8aopg6EAROg9YC8XYPGhIkSpTpS1XWFnG4LJAFzS1SjvPdJLmbR23BmE3r+fJwAvAAMxS1Aqepx4DiFEJGrcKbOb3FtCBwDVBGRA0B/a1X5pkHNOHZ7CUYRn12iODy7/QZMgNrNYNm/YPY9sGUBbFto41Im7PiS6miXiLwiIr1FJLqgcqq6QlV7qerH/qlisVwG5ACrPOqSjrOr72UluOZMoBlOJvb2wB+AU64/f1uaykYi79kloiy7RHF46/br/hj8aSXEnQPrZkC9NpYiyYQdXyZJfAj0Az4HfhWRRBG5WUQql03VfNIAOKCqGV7O7QbOFZGKvlxQVU+5cgn+rKo/A/udw/pzAfdBRIaLSJKIJO3fv9/nDxHOvGWXuDXhfMsuURrH9wIK5zaH5CXw+tVw+uTZ8zYmZUKcL5MkRqjq+cAVwJtAAs56p/0i8qGI3C0itcuonkWpDHgNGkC6W5kSU9Ulqup1DMytzJuqmqCqCfHx8aW5XVjKzS6x/fnruahOVb7efpDsHFsXVSLu41L3r4KLfgP71jspkg5ut7VSJiz4vB+Uqq5S1SdU9RKc/HzPAfWAt4FfRORLEXlARC7wc10LcwooKEFtJbcyJghERwmjel/Etl9P8PH6vYGuTmjyTJE0ZBZ0vBtO7oPXOluKJBMWSrVhoapuUtUxqno5cAHwEE5KpH8DKSLyvYj08UM9i7IHpxvPW5BqiNP9Z+uYgsj1retTr3oso2f8YAt3S8LbuNRv/wOd7oXsDCdF0s6VkJMTmPoZ4wd+21FXVXer6quq2htn9tvvgFSgtb/uUYjVOJ+lk/tBEamEM6khqRzqYHwwb+0eDp48TVaO2sJdf0lZBj/Ohi4PQXQsfPkPeLcPpB/LW8bGpUyI8PeW7wCo6mFVnaSq/VX132VxDw8zcHICPuhxfBjO2NOUcqiD8cHYBZvJzM47/mQLd0vBfR3UNX9zuvyiK8Gub+G1K2D/ZhuXMiGn0HVQIvKIe4ARkXNVtdxSUovIHUAj18t4oKKIPOV6vUNVEwFUdb2I/BcYISJzgE+AS3AySSwl7yJdEwRs4a6fea6VurAbDJ0F37wKWz6H/3WBmEoweKqNS5mQUdRC3adxxpNybaF8k8DeA3TzOPas63kpkOh2/EGcLsXhwA3AAeAV4GlVtY74IGMLd/3sKs/OA5xA1KQrfPoYfPs6ZGTCsn9DjkJTt39WKcucAOftGsYEUFFdfJ77P3m+LlOq2l1VpYBHd4+y2ao6TlVbqGqsqjZU1dGqans4BSFvC3crRost3PW3lGWwfpazdUd0LKQshakDYNMnZ89bt58JUkW1oDwXqdiiFeMXuQt0xy7YzJ4jaURFCedWjaVv+wYBrlkYcR+XatIVLuwO02+D06dgxhBnWvrGD206uglaRQWoiiIy2u11JY/XqOqLACJyO05aoU3WpWaKo1+HhmcC1ezvfuaRWWtZ9NOv9G5ZN8A1CxPexqVumw4bPoC10yHpHWh5swUnE7SksB1ORWQJhbeaVFV7usp+A7TB6QbcgBOsch9rVfVkgVcJQwkJCZqUZLPbiyszO4ee45ZwTuWKzL2/C64EvaYspCyDmXc6XX4nfoF67eCaZ21cypQbEflOVROKLOfPLbjF+V+lOWcTrOY+4oFk4AdVHei3GwYxC1C+m7ZqJ0/MWU/tKhU5dPI0DWrG8ei1LSxfnz+5d/ud1wmmD4HtC529pgZNhhbX5e8aNMbPihugSrTdRkHUiXabXY8ZbpWpy9lgZYxXMa5ddw+edJJ+5C7eBSxI+Ytnt9/Q2fDZ4/DtG06w6ng3bJxrwckEBV+22xjm2n7d5wECVd2nqgtU9Z++vtdEjpcW2q67Zc4zRZIIXPdPuPtjiK4ISW9Dk24WnExQ8CWTRH+go6rucz8oIg1E5CMROebaM2pMATnxjCmULd4NIM12FvJWrQsb5kBi/7x5/CxFkgkAXwJUS5w9oTxNAK7H2Y69PvBnYE7pq2YiTUGLdG3xbhk7s3XHJBi1Fuq3h+2L4H9XOklnba2UCRBfAlQ8sNX9gIicB/QBNuIkiK0N/AfoIyIRMRnC+I/3XXejbfFuWXMfl4qJg+FLoMUNsP8n+E87mHGHjUmZgPAlQGWTP5NEH9exV1R1v6oeVdXRwDrgLj/V0UQIb7vujujZzCZIlDVv41K3TYW2A+HUQchMg2zbrcaUP18C1DagncexnjjrpD7zOP4+YP0Bxme5u+6uffo3VI2twKZfjge6SpEpZRlsWwSdhkNOFkweAPNHgfuyFBuXMmXMlwD1EXC7iFwCICJxOC2oFFXd4VF2N+WbVNaEmRqVYxh6RSM+XreHlAMRtcY78NzXQV0/1sk+EVUBvnsPJt7kpEqycSlTDnwJUOOAY8DXIjIDWA7UAGZ5KVvVVdaYErvnqibEREfx+pLtga5KZPFcK9X8NzB0DjToAKnLnHGpmXfauJQpc8VeqKuqR0TkGmAmcKvrcDJO4PJ0GfBL6atnIll8tVgGXXY+k1fuYMmWX/n1WIZllygP3tIbXdjNmTwx6/ew4X2oEOfk9IO8QcpSJBk/8mlHXVXdoKqtcDJCXAW09tzAUESqAX2BlX6rpYlYjWtXJkdh37EM2xo+0FKWQcoSSLgHsjMg6V2YNtg5nnveuv2MH5Voy3dVXaeqX6tqupfTbYEkYG6pamYM8PZXqfmOWXaJAHAfl7rxRRg0xRmXOn0SptwCC5+x/H3G70oUoAqjqitUtZeqfuzva5vIY9klgoTnuNTF18OQ2dD4KsjKgK/GQdvbLDgZv/Jrslhj/M22hg8S3saUmnaHqCjY8wNkpsPK/wIKfZ4/W8bGpEwp+L0FZYw/WXaJIJbb7XfbNBi2CGKrOUFqzvC8521MypSQtaBMUHPfGn73kTSiBJ7t28pm8QUDz26/kd/BhF6wbgYcToWD22xMypSKtaBM0MvNLvHe7y4jRwvf4tmUI88USVXrOEGqXlvY9S1UqukknjWmhCxAmZDRrXk8l9SvzutLt5OTY2EqKO1aCcd2Q7PecGg7vNQa1k7PW8ZSJJlisgBlQoaIcF+3C9m+/yQLf9pX9BtM+XKfij70ffjNc5BxFD64F1a8nLeMjUuZYhBV+yVaFhISEjQpKSnQ1Qg7Wdk5XPaPhZzIyCIrWy2zRDD5arwTeNy7/dbOhE8fhfQj0LQn7F1r41IGEflOVROKKmctKBNSPlq3lxMZWWRmq2WWCDaeY1IA7QbCQz9C7Ytg+2Ko3gDOvzww9TMhxwKUCSljF2wmMztvq98ySwS5PWsg7RBc0Bl+WQ8vtYKN8/KWsXEp44UFKBNSLLNEiHEfl/r9Z9D1z3Byv5MNPendvGVsXMp4sHVQJqRYZokQ47lWqudfnG6+BX+Bjx50uv12rLBxKeOVtaBMSPGWWaJShSjLLBGsvI1LJfwORv0A1RvCT/Pg3ObQ6KrA1M8ENQtQJqT069CQMf3b0LBmHOI61qXZuTaLL9Ts3wRZ6VCvDez8Bv53JWScOHvexqQMFqBMCMrNLJHywg3c0KY+q1IPcSIjK9DVMsXlPi5173Jofj3s/wleuRQO77AxKXOGBSgT0v5wdROOp2cxY/WuQFfFFJf7uJQI3D4NLr0LTuyD/14O04fYmJQBLECZENfhglp0anwO73yVQmZ2TqCrY4rD27jUTS/DZX+ArDSnq+9wakCqZoKLBSgT8oZ3vZDdR9L4ZP3eQFfFlFTKMtjwAVz5AERFw7yRMONOyM7KW8bGpSKKBSgT8npeXIc61SryyKy1NHn8Y7q8sNgyS4QS9zGp3zwLQ2ZBVAz89CG83RvSjti4VISydVAm5M1bu4fDpzLPZJjITX8E2Oy+UOC5VqppD7hjDnw5BnZ+DS93AM2BQYk2LhVhrAVVBBG5QUS+F5GTIvKLiDwa6DqZvCz9UYjzNibVpCv8/lNod5uTJikzDXKyA1M/EzAWoAohIr8B3gQeBWoAzYFPA1opk4+lPwpTKctg6+fQaTjkZEHizfDpY/nL2LhU2LIAVbhngWdVdZGqZqnqMVX9MdCVMnkVlObI0h+FMPdxqevHwm1TISoKvn0dpg6G7Ewbl4oAQR2gROQJEZklIskioiKSWkjZKBF5SEQ2iUi6iOwSkXEiUqWE964CXAbUc11zn4jME5EmJfw4pox4TX8UY+mPQprnuFTzPjBkDtRtDVs+dcalZt5l66XCXFAHKOB5oCewHThcRNmXgBeBjcBIYBbwADBfRPJ8ThGZ7gp4BT26A7UAAQYAfYAmwC/AHBERTNDwlv6oR4s6NkEilHkbl2raHf64AlpcD0d3QeYpOOKxQNu6/MJKsM/ia6qqyQAi8iNQ1VshEWmFE5TmqOoAt+MpwMvAYGCq21uGASMKue9RoLLrz/9R1VTX9Z4E9gPnAztL8HlMGenXoeGZgPS7d1exOvUQ6ZnZVPJoWZkQl7IMdn0L7YfAD1Pgwz/BiV/h6ofydguasBDULajc4FQMt+G0djx/Ok0ATgFDPa57XFUPFPLIVNWjwA5AMSHlD1dfyIETp5m3dk+gq2L8yT0A9XsNBrwNRMGivzkTKHLPWZdf2AjqAOWDy4AcYJX7QVVNB35wnS+J14FRInK+iFTCmTTxnapa6ymIXdm0NhfXq8bby1NQtd8XYcNzXKrNLXD7dKhc29lXqsZ5zq69JmyES4BqABxQ1Qwv53YD54pIxRJc918408q/d12nAdC/oMIiMlxEkkQkaf/+/SW4nfEHEeEPV1/I5n3HSXhuoWWXCBfexqViXDM1z78C9q6Fl1rDTx/nLWPjUiErXAJUZcBbcAJIdyvjE1XNUdXHVDVeVWurat/CWk+q+qaqJqhqQnx8vK+3M/7kajkdPHka5Wx2CQtSYcS9y++eBXDVaDjxC8wYAmum5i1jU9FDUrgEqFNAbAHnKrmVMRHipYVb8x2z7BJhxrPLr/df4bqxEB3rTJ54/x4blwpx4RKg9uB043kLUg1xuv9Ol3OdTABZdokI4K3L7/LhMHI1VDkX1s+G+h0sOIWwcAlQq3E+Syf3g66JDe2BpEBUygSOZZeIYIdTnbx9tZvB9oXwaidnEoU7G5cKCeESoGbgTAd/0OP4MJyxpynlXiMTUN6yS8TFRFt2iXCXO+Y0cCLcvwpa3QwHNsOUW2DTx3nL2LhU0AvqhboicgfQyPUyHqgoIk+5Xu9Q1UQAVV0vIv8FRojIHOAT4BKcTBJLybtI10SA3EW7YxdsZveRNKIEnuvXyrJLhDvPcalb34MaF8DXL8OMOyDh97Bhjo1LhQgJ5nUiIrIE6FbA6aWq2t2tbDROC2o40Bg4gNOyelpVT5RpRb1ISEjQpCTrWQwGCzfu4w+Tknj19g7c2LZBoKtjAmHH15DY39lSvu1g6P9GoGsU0UTkO1VNKKpcUHfxqWp3VZUCHt09ymar6jhVbaGqsaraUFVHByI4meDS8+I6NKpdmXe+Sgl0VUyg5GRBhUoQVxvWTXcSzbqzMamgFNQByhh/iIoS7r6yMd/vPMLaXUcCXR1T3nLHnAZNglFrnMkTG+fCu9c7kylsTCpoWYAyEeGWjudRNbYC766wVlTEcR+XqlQD/vStk3lixwp4NcG27QhiFqBMRKhWKYZLL6jJ3B/2WOqjSOO5Xiq6gpN5ollvOJQMEg21GgeseqZgFqBMRJi7ZjffphwCsNRHxunW27MG2twKpw7Aq5fBtxPyl7FxqYCyAGUiwtgFm8nIyslzzFIfRSj3HH4D3oK+r0JWBnz6CCx6Lm8ZG5cKqKBeB2WMv1jqI3OG51qpDkOhUk34aDQsH+uMTR3YbONSQcBaUCYiWOojc4a3HH6X3AgPrYe6rWHn11ClDpzXyfv7TbmxAGUigrfUR7EVoiz1kTlr17dwfC807gr7f4KXWsHGeXnL2LhUubIAZSJCvw4NGdO/DQ1rxiGAAK3qV7fUR8bhPi5193zo/rgzeWLmnfB9Yt4yNi5VbmwMykSMfh0anglIz8zfyKRvUvn1WDp1qlcq/I0m/HmOS3V/wunmW/AkzBsJyUsg+Usblypn1oIyEenOzo3IVmXKtwVukGwiibdxqcvugZHfQ5V4+NH2lgoEC1AmIjU+twrdm8czddVOTntMPzfmjEPbnTx+uXtLvX0tZGedPW9jUmXKApSJWHdd2Zj9xzP49Me9ga6KCUaee0s16gK7VsJrnSH9mI1JlYOg3m4jlNl2G8EvJ0fp9PxCjqZlkpWtNKgZx6PXtrCJE8bx1Xgn+Lh36824E376ECqfC5oNAydZt18JhMV2G8aUpXlr93A0LZPMbLX0RyY/b+NSgyZBm4HODL+sDIipHJi6RQgLUCZijV2wmczsvD0Ilv7IFCplGWxfBAn3QFY6vH0NfPl8/jI2LuUXFqBMxLL0R8Yn7mulbnzR6d4DWPpP+HAEqNq4lJ/ZOigTsRrUjGO3l2Bk6Y+MV55rpS75Ldw+Cz55BNYkOtnRj++1tVJ+ZC0oE7G8pT+Ki7H0R6YA3sakLuoND6xxZvjt+9EZk6rbOjD1C0MWoEzEck9/lGvIFY1sFp/xTepy2L8JLr4Rju6C8W1h7fS8ZWxcqkQsQJmI1q9DQ1Y83pOt/7iOOtVi2fbriUBXyYQS93GpwVOgzz/h9HH44F5Y+b+8ZWxcymc2BmUMEBMdxW2dLuDlxVvZcfAkjWpXCXSVTCjwHJe64j6Iq+WMS332OKQsdxb32rhUiVgLyhiX2y+/gGgRJq/cEeiqmFDhbVyq3SB4cB3UOB82fwzntoDGVwemfiHOApQxLnWrV+LaVvWYmfQzaaezA10dE8p+WQ+Zp6BuG2cDxJc7wNaFecvYuFSRLEAZ42boFY04mpZJ5xcW0eTxj+nywmLLLGF84z4udd9y6HAHHE6BqQPhp/l5y9i4VKFsDMoYN78cTUOAI6cygbPpjwCb3WeKx3Ncqu+rUK0+LPu3swFiwu9hwwc2LlUM1oIyxs2/P9+CZ/pkS39kfOJtXKrnX+CezyE6Fla/Bc2useBUDBagjHFj6Y9MmclKgwqVIO4cWDcdZt2T97yNSeVjAcoYNwWlObL0R6ZUcsecBk2CB76HWhfChtkwqZ/l8CuEBShj3HhPfxRt6Y9M6biPS8XVgvu/hQaXQvKX8L/OZydVWLdfHhagjHHjmf6oQpTw/M2tbYKEKR3PcakKFWHYYmd91K8/OV1/dVoFrn5BygKUMR5y0x+Nu7UdWTlK3eqVAl0lE45Sl8OvG52s6Md2w3/aWQ4/DxagjCnADW3rU7NyDFO+3Rnoqphw475WatBkuM49h9/rectE8LiUrYMypgCVYqK55dLzeO/rVH49lk4da0kZf/FcK3X5fVCpJnzyKHz2GKR+5WSgiPBxKWtBGVOI2y+/gKwcZWbSrkBXxYQTrzn8BsOotVDjPNg0H+pcEvE5/CxAGVOIC+Or0qVZbaat2kV2jucSXmP8bN+PkJkGdVo6rahXEmDb4rxlImhcygJUIUSkvoi8LyIHROSgiMwVkfMCXS9TvprXqcruI2k0ffITy81nyo77uNQfv4a2g+HQNphyC2z6JG+ZCBmXsgBVuNeAikAT4HzgJPBOQGtkytXcNbuZtvps915ubj4LUsbv3MelRKD/G9DlQdAcmDEUPvlzxK2XsgBVuKbALFU9rqqngKlA2wDXyZSjsQs2k56Zk+eY5eYzZcLbuNQ1f4e75kFUBVj1BrS4PmKCEwR5gBKRJ0Rklogki4iKSGohZaNE5CER2SQi6SKyS0TGiUhptkZ9EbhFRGqKSDXgDmB+Ka5nQozl5jNBISYOYqvDmkSY+6e858J4TCqoAxTwPNAT2A4cLqLsSzgBZSMwEpgFPADMF5E8n1NEprsCXkGP7q6iXwE1gUPAEaAF8KSfPpsJAZabzwTUmRx+iTDyO6jeEH6YAtOG5D0fpmNSwR6gmqpqbVW9BthTUCERaYUTlOaoan9VnaCqo4HRQA9gsMdbhgHxhTxWuILaQiAJqA5UBeYCS0Qkxo+f0QQxb7n5KkaL5eYz5cN9XKpqHRiRBPGXwOaPYELPsB+TCuoAparJxSx6GyCAZzt3AnAKGOpx3eOqeqCQRyZwDtAIeFlVT6hqGk4LrSXO2JSJAO65+QSIEmfqueXmM+XCc1yqYmX44wpomAC7v3MSzzbsGLj6lbGgDlA+uAzIAVa5H1TVdOAH13mfqOoBYBtwv4jEiUhFYBROV2NqaStsQkdubr6UF25gRM+L2LzvOLsOnQp0tUyk2rHC2UK+aW84uA3Gt4ENH+YtEybjUqIaGosPReRHoKqqNvZybj1QR1Xrejk3E7gViFXV0z7esyVOq+kynGD+I/CYqn5dQPnhwHDXy9au8qZ8nQscCHQlIpB97+WgRizVmtSKujDlcE7y0Qxia8SScWGtqKYAyYdzth/N4LhHmeOBrnMBGqlqfFGFwiUXX2Ugo4Bz6W5lfApQqroR6OND+TeBNwFEJElVE3y5nyk9+94Dw7738iciSUfSw/s7D5cuvlNAbAHnKrmVMcYYEyLCJUDtAc4VEW9BqiFwwNfuPWOMMYEVLgFqNc5n6eR+UEQqAe1xpoqXtzcDcE9j33ug2Pde/sL+Ow+XSRJtgLXAB6o6wO34SOBl4A5VnVxedTXGGFN6QT1JQkTuwFmLBM4C2ooi8pTr9Q5VTQRQ1fUi8l9ghIjMAT4BLsHJJLEUJ4eeMcaYEBLULSgRWQJ0K+D0UlXt7lY2GngQZ5p3Y5wprzOAp1X1RJlW1BhjjN8FdYAyxhgTucJlkkRQKKOM6gYQkeYi8oyIrBSR/SJyXER+EJG/ePt+RaSFa4PJwyJyUkSWi0jPQNQ9nIhIZbfdBV71ct6+dz8RkXNE5N8iss31/8l+EflSRK72KHe5iCx0/Zs4JiKfiUj7QNXbn4J6DCoEvYQz7vUBMI6z42AdRKS3quYU9mZTqN8D9wPzgClAJk4i4OeAgSJyhStfIiLSFPgayAL+BRzFSRC8QESuU9WFAah/uHgGZzw4H/ve/UdEGgFLcJJUvw1sAWrg7EfX0K3cFa5yu4GnXYdHAMtF5EpVXV9+tS4DqmoPPzyAVjj5AN/3OD4SUOD2QNcxlB9AAlDDy/HnXN/vCLdjM4FsoL3bsarADmAzrq5te/j8d3ApTvAZ7frOX/U4b9+7/77r5cAuoH4R5VYBx4CGbscauo59HujPUdqHdfH5j08Z1Y1vVDVJVY96OTXD9dwawNXddxOwRFV/cHv/CeAtoDklSB4c6VyTkCYAnwFzvJy3791PRKQrcBXwL1XdKyIxIlLZS7lmON/pLFXdnXvc9edZQG8RqVde9S4LFqD8x+8Z1U2xnOd63ud6bouT9uobL2VXup7t78J3DwEX43QfeWPfu/9c73reKSLzgTTgpIhsERH3H7q532dB37kAIb0XhwUo/2mAk1LJW9La3TipmCqWc53CmutX/f/hdDvlrnVr4Hre7eUtucdsMycfiEgT4O/AM6qaWkAx+979J3c3zAk4+9LdhTMGexpIFJHfuc6H/XdukyT8p0wyqptCjQc6A0+q6mbXsdyuEG9/F+keZUzxvA4k42w9UxD73v2nmuv5ONBDXXlERWQuzt/D8yIykQj4zq0F5T+WUb0cicizON1Nb6rqGLdTud+xt78L+3vwkatL6Rrgj+rsNF0Q+979J831PE3dklyr6mGcWaz1cFpZYf+dWwvKf/YALUUk1ks3n2VU9yMR+RvwFPAucJ/H6T2uZ29dG7nHvHWJGA+u3QFexEkd9otrUB7Ofo81XMcOYN+7P/3sev7Fy7m9rudaRMB3bi0o/wnGjOphxxWc/gpMBP6grnm1btbjdHl09vL2K1zP9ndRPHE4a55uALa6PZa4zg91vf4D9r37U+5Eq/O8nMs99ivO/zlQ8HeuwHf+rVr5slRHfmIZ1cueiDyNM1ifCNytBSx8FpFZQH/gUlVd6zpWFdiA859oCy+BzXgQkRigr5dT8cBrOFPO3wbWqeoW+979Q0Rq4awdOwZc7Jqqj4jUx/lBsFtVW7iOrcbp7rtYVfe4jjUANgGrVLV3AD6C31iA8iMReQVnXOQD8mZUXwH0LOg/VFM0EbkfeBXYiTNzz/O73KeqX7jKNsP5FZqJk93jGE5GgzbADaq6oLzqHY5EpDGQAvxXVUe4Hbfv3U9EZDjwBk5wfweoCPwRqA/cqKqfu8pdCXyJ0y34iuvtI4G6QJfcHwohK9ArhcPpAUQDD+Osms/A6f99EWcfq4DXL5QfwHs4XRYFPZZ4lL8E+BA4gjNQ/BXQO9CfIxweOLsF5MskYd+737/n/jjrmU7izOj7HCfoeJbrDCwCTrjKLcBpxQb8M5T2YS0oY4wxQckmSRhjjAlKFqCMMcYEJQtQxhhjgpIFKGOMMUHJApQxxpigZAHKGGNMULIAZYwxJihZgDImQohIkoisD3Q9jCkuC1DGRAARqQC0BtYEui7GFJcFKGMiQ0ucfYMsQJmQYQHKmMjQ3vVsAcqEDAtQxkSGDq7nHwJaC2N8YAHKmMjQHkhR1SOBrogxxWUBypjI0B5rPZkQYwHKmDDn2mCwJjb+ZEKMBShjwqlwlJkAAACSSURBVF/u+JMFKBNSLEAZE/4sQJmQZAHKmPDXHtivqrsDXRFjfGEBypjw1wFrPZkQZAHKmDAmIrWB87AAZUKQBShjwpst0DUhS1Q10HUwxhhj8rEWlDHGmKBkAcoYY0xQsgBljDEmKFmAMsYYE5QsQBljjAlKFqCMMcYEJQtQxhhjgpIFKGOMMUHJApQxxpig9P+FfWlPxCa1CQAAAABJRU5ErkJggg==\n",
75 | "text/plain": [
76 | ""
77 | ]
78 | },
79 | "metadata": {},
80 | "output_type": "display_data"
81 | }
82 | ],
83 | "source": [
84 | "markers = ['o', 'x']\n",
85 | "for i, Lambda in enumerate([100.0, 10000.0]):\n",
86 | " basis = irbasis.load('F', Lambda)\n",
87 | " dim = basis.dim()\n",
88 | " s0 = basis.sl(0)\n",
89 | " label = r'$\\Lambda={}$'.format(Lambda)\n",
90 | " plt.semilogy([basis.sl(l)/s0 for l in range(dim)], marker=markers[i], label=label)\n",
91 | " \n",
92 | "plt.legend(loc='best', frameon=False)\n",
93 | "plt.xlim([0, 70])\n",
94 | "plt.ylim([1e-8, 1])\n",
95 | "plt.xlabel(r'$l$')\n",
96 | "plt.ylabel(r'$S^\\mathrm{F}_l/S^\\mathrm{F}_0$')\n",
97 | "plt.tight_layout()\n",
98 | "plt.savefig(\"singular_values.pdf\")"
99 | ]
100 | }
101 | ],
102 | "metadata": {
103 | "colab": {
104 | "collapsed_sections": [],
105 | "name": "sample_plot.ipynb",
106 | "provenance": [],
107 | "toc_visible": true,
108 | "version": "0.3.2"
109 | },
110 | "kernelspec": {
111 | "display_name": "Python 3",
112 | "language": "python",
113 | "name": "python3"
114 | },
115 | "language_info": {
116 | "codemirror_mode": {
117 | "name": "ipython",
118 | "version": 3
119 | },
120 | "file_extension": ".py",
121 | "mimetype": "text/x-python",
122 | "name": "python",
123 | "nbconvert_exporter": "python",
124 | "pygments_lexer": "ipython3",
125 | "version": "3.6.5"
126 | }
127 | },
128 | "nbformat": 4,
129 | "nbformat_minor": 1
130 | }
131 |
--------------------------------------------------------------------------------
/sample/step_by_step_examples.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "irbasis.hpp"
4 |
5 | double rho_omega_metal(double omega) {
6 | return (2/M_PI) * std::sqrt(1-omega*omega);
7 | }
8 |
9 | int main() {
10 | double beta = 100.0;
11 | double wmax = 1.0;
12 | double Lambda = wmax * beta;
13 | irbasis::basis b = irbasis::load("F", Lambda, "./irbasis.h5");
14 |
15 | int dim = b.dim();
16 |
17 | // (1) Semicircular DOS on [-1, 1]
18 | // (2) Pole at \omega = -1,+1
19 | // Comment out one of them
20 | std::string model = "Metal";
21 | //std::string model = "Insulator";
22 |
23 | /*
24 | * Compute rho_l from rho(omega)
25 | */
26 | std::vector rho_l(dim, 0.0);
27 | if (model == "Metal") {
28 | // We use a simple numerical integration on a uniform mesh
29 | // nomega: Number of omega points for numerical integration.
30 | // Please use an adaptive numerical integration method (such as quad in GSL) for better accuracy!
31 | int nomega = 100000;
32 | double dw = 2.0/nomega;
33 | for (int l=0; l Sl(dim), gl(dim);
54 | for (int l=0; l gtau(n_tau, 0.0);
70 | for (int t=0; t ns;
85 | for (int n=0; n > > unl = b.compute_unl(ns);
89 | for (int n=0; n giwn = 0.0;
91 | for (int l=0; lirbasis) (1.12.0)\n","Installing collected packages: irbasis\n","Successfully installed irbasis-1.0.3\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"qRFcK2xKW2kW","colab_type":"code","colab":{}},"source":["from __future__ import print_function\n","import numpy\n","import irbasis"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"iLXlQhBFXH17","colab_type":"code","colab":{}},"source":["def funique(x, tol=1e-12):\n"," \"\"\"\n"," Remove duplicated points\n"," \"\"\"\n"," x = numpy.sort(x)\n"," mask = numpy.ediff1d(x, to_end=2*tol) > tol\n"," x = x[mask]\n"," return x\n","\n","def find_zeros(ulx, parity='even'):\n"," \"\"\"\n"," Find all zeros using a double-exponential mesh + bisection algorithm\n"," \"\"\"\n"," Nx = 10000\n"," eps = 1e-14\n"," # Double exponential mesh on (0, 1)\n"," tvec = numpy.linspace(-2.5, 2.5, Nx) #2.5 is a very safe option.\n"," xvec = 0.5 * numpy.tanh(0.5*numpy.pi*numpy.sinh(tvec)) + 0.5\n","\n"," zeros = []\n"," for i in range(Nx-1):\n"," if ulx(xvec[i]) * ulx(xvec[i+1]) < 0:\n"," a = xvec[i+1]\n"," b = xvec[i]\n"," u_a = ulx(a)\n"," u_b = ulx(b)\n"," while a-b > eps:\n"," half_point = 0.5*(a+b)\n"," if ulx(half_point) * u_a > 0:\n"," a = half_point\n"," else:\n"," b = half_point\n"," zeros.append(0.5*(a+b))\n"," \n"," zeros = numpy.array(zeros)\n"," if parity == 'even':\n"," zeros = numpy.hstack((zeros, -zeros))\n"," elif parity == 'odd':\n"," zeros = numpy.hstack((zeros, -zeros, 0))\n"," else:\n"," raise RuntimeError('Invalid value of parity!')\n"," \n"," return funique(zeros, 1e-10)"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"0RuE7R3EXSnA","colab_type":"code","colab":{}},"source":["Lambda = 10000.0\n","b = irbasis.load('F', Lambda)"],"execution_count":0,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"z1eBvLasZmpK","colab_type":"text"},"source":["Find zeros of $u_l(x)$."]},{"cell_type":"code","metadata":{"id":"YLBgHF3eZo5H","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":272},"outputId":"501560c7-cc04-497a-be01-0bdb45f6a71b","executionInfo":{"status":"ok","timestamp":1558392859365,"user_tz":-540,"elapsed":727,"user":{"displayName":"Shinaoka Hiroshi","photoUrl":"","userId":"08306471410952693884"}}},"source":["whichl = b.dim()-1\n","f = lambda x: b.ulx(whichl, x)\n","parity = 'even' if whichl%2 == 0 else 'odd'\n","zeros = find_zeros(f, parity)\n","print(zeros, len(zeros))\n","assert len(zeros) == whichl"],"execution_count":61,"outputs":[{"output_type":"stream","text":["[-0.99998367 -0.9999137 -0.99978663 -0.99960037 -0.99935178 -0.9990365\n"," -0.9986487 -0.99818079 -0.997623 -0.99696291 -0.99618479 -0.99526892\n"," -0.99419076 -0.99292001 -0.99141959 -0.98964451 -0.98754047 -0.98504236\n"," -0.98207231 -0.97853739 -0.97432678 -0.96930847 -0.96332517 -0.95618965\n"," -0.94767909 -0.93752868 -0.92542414 -0.91099336 -0.89379723 -0.87332016\n"," -0.84896104 -0.82002634 -0.78572801 -0.74519084 -0.69747601 -0.64163062\n"," -0.57677512 -0.50223964 -0.41775212 -0.32366041 -0.22113653 -0.11227597\n"," 0. 0.11227597 0.22113653 0.32366041 0.41775212 0.50223964\n"," 0.57677512 0.64163062 0.69747601 0.74519084 0.78572801 0.82002634\n"," 0.84896104 0.87332016 0.89379723 0.91099336 0.92542414 0.93752868\n"," 0.94767909 0.95618965 0.96332517 0.96930847 0.97432678 0.97853739\n"," 0.98207231 0.98504236 0.98754047 0.98964451 0.99141959 0.99292001\n"," 0.99419076 0.99526892 0.99618479 0.99696291 0.997623 0.99818079\n"," 0.9986487 0.9990365 0.99935178 0.99960037 0.99978663 0.9999137\n"," 0.99998367] 85\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"vClgVto_ZbnZ","colab_type":"text"},"source":["Find zeros of $v_l(y)$."]},{"cell_type":"code","metadata":{"id":"YKPeH6oPXcHd","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":391},"outputId":"4d54190f-93fb-4c1b-e639-7b2d3ffdd061","executionInfo":{"status":"ok","timestamp":1558392865425,"user_tz":-540,"elapsed":647,"user":{"displayName":"Shinaoka Hiroshi","photoUrl":"","userId":"08306471410952693884"}}},"source":["whichl = b.dim()-1\n","f = lambda y: b.vly(whichl, y)\n","parity = 'even' if whichl%2 == 0 else 'odd'\n","zeros = find_zeros(f, parity)\n","print(zeros, len(zeros))\n","assert len(zeros) == whichl"],"execution_count":62,"outputs":[{"output_type":"stream","text":["[-9.90866730e-01 -9.53393435e-01 -8.91614941e-01 -8.13307102e-01\n"," -7.26612384e-01 -6.38453155e-01 -5.53820128e-01 -4.75770960e-01\n"," -4.05803258e-01 -3.44323624e-01 -2.91061470e-01 -2.45377099e-01\n"," -2.06467317e-01 -1.73491307e-01 -1.45641278e-01 -1.22177754e-01\n"," -1.02443526e-01 -8.58655101e-02 -7.19502368e-02 -6.02763776e-02\n"," -5.04862705e-02 -4.22774949e-02 -3.53950244e-02 -2.96241877e-02\n"," -2.47844953e-02 -2.07243071e-02 -1.73162716e-02 -1.44534558e-02\n"," -1.20460810e-02 -1.00187876e-02 -8.30836028e-03 -6.86185301e-03\n"," -5.63505501e-03 -4.59123897e-03 -3.70012924e-03 -2.93703265e-03\n"," -2.28209227e-03 -1.71965747e-03 -1.23781570e-03 -8.28272312e-04\n"," -4.87227752e-04 -2.15753657e-04 0.00000000e+00 2.15753657e-04\n"," 4.87227752e-04 8.28272312e-04 1.23781570e-03 1.71965747e-03\n"," 2.28209227e-03 2.93703265e-03 3.70012924e-03 4.59123897e-03\n"," 5.63505501e-03 6.86185301e-03 8.30836028e-03 1.00187876e-02\n"," 1.20460810e-02 1.44534558e-02 1.73162716e-02 2.07243071e-02\n"," 2.47844953e-02 2.96241877e-02 3.53950244e-02 4.22774949e-02\n"," 5.04862705e-02 6.02763776e-02 7.19502368e-02 8.58655101e-02\n"," 1.02443526e-01 1.22177754e-01 1.45641278e-01 1.73491307e-01\n"," 2.06467317e-01 2.45377099e-01 2.91061470e-01 3.44323624e-01\n"," 4.05803258e-01 4.75770960e-01 5.53820128e-01 6.38453155e-01\n"," 7.26612384e-01 8.13307102e-01 8.91614941e-01 9.53393435e-01\n"," 9.90866730e-01] 85\n"],"name":"stdout"}]}]}
--------------------------------------------------------------------------------
/script/gauss_legendre.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from mpmath import *
3 | from mpmath.calculus.quadrature import GaussLegendre
4 |
5 | dps = 300
6 |
7 | mp.dps = dps
8 | prec = int(dps * 3.33333)
9 | mp.pretty = False
10 |
11 | print("""
12 | inline
13 | std::vector >
14 | gauss_legendre_nodes(int num_nodes) {
15 | """)
16 |
17 | #Note: mpmath gives wrong results for degree==1!
18 | for degree in [4,5]:
19 | g = GaussLegendre(mp)
20 | gl = g.get_nodes(-1, 1, degree=degree, prec=prec)
21 |
22 |
23 |
24 | N = 3*2**(degree-1)
25 |
26 | print("""
27 | if (num_nodes == %d) {
28 | std::vector > nodes(%d);
29 | """%(N,N))
30 |
31 | for i in range(len(gl)):
32 | print(" nodes[{:<5}] = std::make_pair({:.25}, {:.25});".format(i, float(gl[i][0]), float(gl[i][1])));
33 |
34 | print("""
35 | return nodes;
36 | }
37 | """)
38 |
39 | print("""
40 | throw std::runtime_error("Invalid num_nodes passed to gauss_legendre_nodes");
41 | }
42 | """)
43 |
--------------------------------------------------------------------------------
/test/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Copy all *.h5 in database/
2 | file(GLOB_RECURSE DATA_FILES "${CMAKE_SOURCE_DIR}/database/*.h5")
3 | foreach(datafile ${DATA_FILES})
4 | configure_file(${datafile} ${CMAKE_BINARY_DIR}/test/ COPYONLY)
5 | endforeach()
6 |
7 | add_subdirectory(c++)
8 | add_subdirectory(python)
9 |
--------------------------------------------------------------------------------
/test/c++/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | function(add_gtest test)
2 | set(source "${ARGV1}/${test}.cpp")
3 | set(gtest_src "${ARGV1}/gtest_main.cc;${ARGV1}/gtest-all.cc")
4 |
5 | add_executable(${test} ${source} ${gtest_src} ${header_files} dummy.cpp)
6 | target_link_libraries(${test} ${LINK_ALL})
7 | target_include_directories(${test} PRIVATE ${HDF5_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS})
8 | add_test(NAME ${test} COMMAND ${test} ${test_xml_output})
9 | endfunction(add_gtest)
10 |
11 | find_package(HDF5 REQUIRED)
12 | find_package(Boost 1.54.0)
13 |
14 | set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address")
15 | set (CMAKE_LINKER_FLAGS_DEBUG "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address")
16 |
17 | list(APPEND LINK_ALL ${CMAKE_THREAD_LIBS_INIT} ${HDF5_C_LIBRARIES})
18 |
19 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} " CACHE STRING "Linker flags for executables" FORCE)
20 |
21 | set(header_files ${CMAKE_SOURCE_DIR}/c++/irbasis.hpp)
22 |
23 | configure_file(hdf5_test.h5 ${CMAKE_BINARY_DIR}/test/c++ COPYONLY)
24 |
25 | #testing source files
26 | set(unittest_src hdf5 multi_array interpolation)
27 | foreach(test ${unittest_src})
28 | add_gtest(${test} ".")
29 | endforeach(test)
30 |
--------------------------------------------------------------------------------
/test/c++/dummy.cpp:
--------------------------------------------------------------------------------
1 | #include "../../c++/irbasis.hpp"
2 |
3 | // Just for detecting non-inline functions (i.e., duplicate symbols)
4 |
--------------------------------------------------------------------------------
/test/c++/gtest_main.cc:
--------------------------------------------------------------------------------
1 | // Copyright 2006, Google Inc.
2 | // All rights reserved.
3 | //
4 | // Redistribution and use in source and binary forms, with or without
5 | // modification, are permitted provided that the following conditions are
6 | // met:
7 | //
8 | // * Redistributions of source code must retain the above copyright
9 | // notice, this list of conditions and the following disclaimer.
10 | // * Redistributions in binary form must reproduce the above
11 | // copyright notice, this list of conditions and the following disclaimer
12 | // in the documentation and/or other materials provided with the
13 | // distribution.
14 | // * Neither the name of Google Inc. nor the names of its
15 | // contributors may be used to endorse or promote products derived from
16 | // this software without specific prior written permission.
17 | //
18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
30 | #include
31 |
32 | #include "gtest.h"
33 |
34 | GTEST_API_ int main(int argc, char **argv) {
35 | printf("Running main() from gtest_main.cc\n");
36 | testing::InitGoogleTest(&argc, argv);
37 | return RUN_ALL_TESTS();
38 | }
39 |
--------------------------------------------------------------------------------
/test/c++/hdf5.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "gtest.h"
4 | #include "../../c++/irbasis.hpp"
5 |
6 | using namespace irbasis;
7 |
8 | TEST(hdf5, read_double) {
9 | std::string file_name("hdf5_test.h5");
10 | hid_t file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
11 | double data = internal::hdf5_read_scalar(file, std::string("/test_data/double"));
12 | ASSERT_EQ(data, 100.0);
13 | H5Fclose(file);
14 | }
15 |
16 | TEST(hdf5, read_int) {
17 | std::string file_name("hdf5_test.h5");
18 | hid_t file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
19 | int data = internal::hdf5_read_scalar(file, std::string("/test_data/int"));
20 | ASSERT_EQ(data, 100);
21 | H5Fclose(file);
22 | }
23 |
24 | TEST(hdf5, read_double_array1) {
25 | std::string file_name("hdf5_test.h5");
26 | hid_t file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
27 | std::vector data;
28 | std::vector extents;
29 | internal::hdf5_read_double_array<1>(file, std::string("/test_data/double_array1"), extents, data);
30 | for (int i = 0; i < data.size(); ++i) {
31 | ASSERT_EQ(data[i], i);
32 | }
33 | ASSERT_EQ(extents[0], data.size());
34 | H5Fclose(file);
35 | }
36 |
37 | TEST(hdf5, read_double_array2) {
38 | std::string file_name("hdf5_test.h5");
39 | hid_t file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
40 | std::vector data;
41 | std::vector extents;
42 | internal::hdf5_read_double_array<2>(file, std::string("/test_data/double_array2"), extents, data);
43 | for (int i = 0; i < data.size(); ++i) {
44 | ASSERT_EQ(data[i], i);
45 | }
46 | ASSERT_EQ(extents[0] * extents[1], data.size());
47 | H5Fclose(file);
48 | }
49 |
50 | TEST(hdf5, read_double_array3) {
51 | std::string file_name("hdf5_test.h5");
52 | hid_t file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
53 | std::vector data;
54 | std::vector extents;
55 | internal::hdf5_read_double_array<3>(file, std::string("/test_data/double_array3"), extents, data);
56 | internal::multi_array
57 | a = internal::load_multi_array(file, std::string("/test_data/double_array3"));
58 | for (int i = 0; i < data.size(); ++i) {
59 | ASSERT_EQ(data[i], i);
60 | ASSERT_EQ(*(a.origin() + i), data[i]);
61 | }
62 | ASSERT_EQ(extents[0] * extents[1] * extents[2], data.size());
63 | H5Fclose(file);
64 | }
65 |
66 |
--------------------------------------------------------------------------------
/test/c++/hdf5_test.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SpM-lab/irbasis/c38e044887a1fdb6f0b5495ba4cebd6d9f724d68/test/c++/hdf5_test.h5
--------------------------------------------------------------------------------
/test/c++/interpolation.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include "gtest.h"
6 |
7 | #include "../../c++/irbasis.hpp"
8 |
9 | using namespace irbasis;
10 |
11 | inline std::string
12 | get_path(const std::string& stat, double Lambda) {
13 | std::stringstream ss;
14 | ss << std::fixed;
15 | ss << "basis_" << stat << "-mp-Lambda" << std::setprecision(1) << Lambda;
16 | return ss.str();
17 | }
18 |
19 | class refdata {
20 | public:
21 | refdata() {}
22 |
23 | refdata(
24 | const std::string &file_name,
25 | const std::string &prefix = ""
26 | ) {
27 | hid_t file = H5Fopen(file_name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
28 |
29 | if (file < 0) {
30 | throw std::runtime_error("Failed to open " + file_name + "!");
31 | }
32 |
33 | //read info
34 | Lambda = internal::hdf5_read_scalar(file, prefix + std::string("/info/Lambda"));
35 | dim = internal::hdf5_read_scalar(file, prefix + std::string("/info/dim"));
36 |
37 | //read unl_odd
38 | internal::multi_array, 2>
39 | data_odd = internal::load_multi_array, 2>(file, prefix + std::string("/data/lodd/unl"));
40 | unl_odd.resize(data_odd.extent(0));
41 | n_odd.resize(data_odd.extent(0));
42 | for (int i = 0; i < data_odd.extent(0); i++) {
43 | n_odd[i] = static_cast (data_odd(i, 0).real());
44 | unl_odd[i] = data_odd(i, 1);
45 | }
46 |
47 | unl_odd_max = internal::hdf5_read_scalar(file, prefix + std::string("/data/lodd/unlmax"));
48 | odd_l = internal::hdf5_read_scalar(file, prefix + std::string("/data/lodd/l"));
49 |
50 | //read unl_even
51 | internal::multi_array, 2>
52 | data_even = internal::load_multi_array, 2>(file, prefix + std::string("/data/leven/unl"));
53 | unl_even_max = internal::hdf5_read_scalar(file, prefix + std::string("/data/leven/unlmax"));
54 | even_l = internal::hdf5_read_scalar(file, prefix + std::string("/data/leven/l"));
55 |
56 | unl_even.resize(data_even.extent(0));
57 | n_even.resize(data_even.extent(0));
58 | for (int i = 0; i < data_odd.extent(0); i++) {
59 | n_even[i] = static_cast (data_even(i, 0).real());
60 | unl_even[i] = data_even(i, 1);
61 | }
62 | H5Fclose(file);
63 | }
64 |
65 | double Lambda;
66 | int dim;
67 |
68 | std::vector > unl_odd;
69 | std::vector n_odd;
70 | double unl_odd_max;
71 | int odd_l;
72 |
73 | std::vector > unl_even;
74 | std::vector n_even;
75 | double unl_even_max;
76 | int even_l;
77 | };
78 |
79 | TEST(interpolation, attributes) {
80 | basis b10("../irbasis.h5", "/basis_b-mp-Lambda10.0");
81 | ASSERT_EQ(10.0, b10.Lambda());
82 | ASSERT_EQ("B", b10.statistics());
83 | }
84 |
85 | TEST(interpolation, check_ulx_b) {
86 | basis b10("../irbasis.h5", "/basis_b-mp-Lambda10.0");
87 | std::vector > ref_data10 = b10.check_ulx();
88 | for (int i = 0; i < ref_data10.size(); i++) {
89 | ASSERT_LE(ref_data10[i][2], 1e-11);
90 | }
91 | basis b10000("../irbasis.h5", "/basis_b-mp-Lambda10000.0");
92 | std::vector > ref_data10000 = b10000.check_ulx();
93 | for (int i = 0; i < ref_data10000.size(); i++) {
94 | ASSERT_LE(ref_data10000[i][2], 1e-11);
95 | }
96 | }
97 |
98 | TEST(interpolation, check_ulx_f) {
99 | basis b10("../irbasis.h5", "/basis_f-mp-Lambda10.0");
100 | std::vector > ref_data10 = b10.check_ulx();
101 | for (int i = 0; i < ref_data10.size(); i++) {
102 | ASSERT_LE(ref_data10[i][2], 1e-11);
103 | }
104 | basis b10000("../irbasis.h5", "/basis_f-mp-Lambda10000.0");
105 | std::vector > ref_data10000 = b10000.check_ulx();
106 | for (int i = 0; i < ref_data10000.size(); i++) {
107 | ASSERT_LE(ref_data10000[i][2], 1e-11);
108 | }
109 | }
110 |
111 | TEST(interpolation, check_vly_b) {
112 | basis b10("../irbasis.h5", "/basis_b-mp-Lambda10.0");
113 | std::vector > ref_data10 = b10.check_vly();
114 | for (int i = 0; i < ref_data10.size(); i++) {
115 | ASSERT_LE(ref_data10[i][2], 1e-11);
116 | }
117 | basis b10000("../irbasis.h5", "/basis_b-mp-Lambda10000.0");
118 | std::vector > ref_data10000 = b10000.check_vly();
119 | for (int i = 0; i < ref_data10000.size(); i++) {
120 | ASSERT_LE(ref_data10000[i][2], 1e-11);
121 | }
122 | }
123 |
124 | TEST(interpolation, check_vly_f) {
125 | basis b10("../irbasis.h5", "/basis_f-mp-Lambda10.0");
126 | std::vector > ref_data10 = b10.check_vly();
127 | for (int i = 0; i < ref_data10.size(); i++) {
128 | ASSERT_LE(ref_data10[i][2], 1e-11);
129 | }
130 | basis b10000("../irbasis.h5", "/basis_f-mp-Lambda10000.0");
131 | std::vector > ref_data10000 = b10000.check_vly();
132 | for (int i = 0; i < ref_data10000.size(); i++) {
133 | ASSERT_LE(ref_data10000[i][2], 1e-11);
134 | }
135 | }
136 |
137 | TEST(interpolation, differential_ulx) {
138 | basis b10("../irbasis.h5", "/basis_f-mp-Lambda10.0");
139 | double d_1st_ref_data10 = b10.get_ref_ulx(1);
140 | double d_2nd_ref_data10 = b10.get_ref_ulx(2);
141 | int Nl = b10.dim();
142 | if (Nl % 2 == 1)
143 | Nl -= 1;
144 |
145 | ASSERT_LE(fabs((d_1st_ref_data10 - b10.d_ulx(Nl - 1, 1.0, 1)) / d_1st_ref_data10), 1e-11);
146 | ASSERT_LE(fabs((d_2nd_ref_data10 - b10.d_ulx(Nl - 1, 1.0, 2)) / d_2nd_ref_data10), 1e-11);
147 |
148 | basis b10000("../irbasis.h5", "/basis_f-mp-Lambda10000.0");
149 | double d_1st_ref_data10000 = b10000.get_ref_ulx(1);
150 | double d_2nd_ref_data10000 = b10000.get_ref_ulx(2);
151 | Nl = b10000.dim();
152 | if (Nl % 2 == 1)
153 | Nl -= 1;
154 | ASSERT_LE(fabs((d_1st_ref_data10000 - b10000.d_ulx(Nl - 1, 1.0, 1)) / d_1st_ref_data10000), 1e-11);
155 | ASSERT_LE(fabs((d_2nd_ref_data10000 - b10000.d_ulx(Nl - 1, 1.0, 2)) / d_2nd_ref_data10000), 1e-11);
156 | }
157 |
158 | double check_data_tail(basis bs, refdata rb, std::string _statics) {
159 | //Check odd-l
160 | int l = rb.odd_l;
161 | std::vector n(1, 1e+8);
162 | std::vector > > unl = bs.compute_unl(n);
163 | double unl_limit, unl_coeff;
164 | if (_statics == "f") {
165 | unl_limit = -(bs.d_ulx(l, 1, 1) + bs.d_ulx(l, -1, 1)) / (M_PI * M_PI * sqrt(2.0));
166 | unl_coeff = unl[0][l].real() * n[0] * n[0];
167 | } else {
168 | unl_limit = -(bs.ulx(l, 1) - bs.ulx(l, -1)) / (M_PI * sqrt(2.0));
169 | unl_coeff = unl[0][l].imag() * n[0];
170 | }
171 | double dunl_coeff = std::abs(unl_limit - unl_coeff);
172 | if (std::abs(unl_limit) > 1e-12)
173 | dunl_coeff /= std::abs(unl_limit);
174 |
175 | //Check even-l
176 | l = rb.even_l;
177 | if (_statics == "f") {
178 | unl_limit = (bs.ulx(l, 1) + bs.ulx(l, -1)) / (M_PI * sqrt(2.0));
179 | unl_coeff = unl[0][l].imag() * n[0];
180 | } else {
181 | unl_limit = (bs.d_ulx(l, 1, 1) - bs.d_ulx(l, -1, 1)) / (M_PI * M_PI * sqrt(2.0));
182 | unl_coeff = unl[0][l].real() * n[0] * n[0];
183 | }
184 | double dunl_coeff_even = std::abs(unl_limit - unl_coeff);
185 | if (std::abs(unl_limit) > 1e-12)
186 | dunl_coeff_even /= std::abs(unl_limit);
187 | if (dunl_coeff_even > dunl_coeff)
188 | dunl_coeff = dunl_coeff_even;
189 | return dunl_coeff;
190 | }
191 |
192 | double check_data(basis bs, refdata rb, std::string _statics) {
193 | //Check odd-l
194 | int l = rb.odd_l;
195 | std::vector > > unl = bs.compute_unl(rb.n_odd);
196 | double dunl_max = std::abs(unl[0][l] - rb.unl_odd[0]);
197 | for (int i = 1; i < rb.unl_odd.size(); i++) {
198 | double tmp = std::abs(unl[i][l] - rb.unl_odd[i]);
199 | if (tmp > dunl_max)
200 | dunl_max = tmp;
201 | }
202 | dunl_max /= std::abs(rb.unl_odd_max);
203 |
204 | //Check even-l
205 | l = rb.even_l;
206 | unl = bs.compute_unl(rb.n_even);
207 | double dunl_max_even = std::abs(unl[0][l] - rb.unl_even[0]);
208 | for (int i = 1; i < rb.unl_even.size(); i++) {
209 | double tmp = std::abs(unl[i][l] - rb.unl_even[i]);
210 | if (tmp > dunl_max_even)
211 | dunl_max_even = tmp;
212 | }
213 | dunl_max_even /= std::abs(rb.unl_even_max);
214 | if (dunl_max < dunl_max_even)
215 | dunl_max = dunl_max_even;
216 |
217 | return dunl_max;
218 | }
219 |
220 | /*
221 | TEST(interpolation, unl_limit) {
222 |
223 | basis b10f("../irbasis.h5", "/basis_f-mp-Lambda10.0");
224 | refdata ref10f("../unl_safe_ref.h5", "/basis_f-mp-Lambda10.0");
225 | double dunl_coeff = check_data_tail(b10f, ref10f, "f");
226 | ASSERT_LE(dunl_coeff, 1e-11);
227 |
228 | basis b10000f("../irbasis.h5", "/basis_f-mp-Lambda10000.0");
229 | refdata ref10000f("../unl_safe_ref.h5", "/basis_f-mp-Lambda10000.0");
230 | dunl_coeff = check_data_tail(b10000f, ref10000f, "f");
231 | ASSERT_LE(dunl_coeff, 1e-11);
232 |
233 | basis b10b("../irbasis.h5", "/basis_b-mp-Lambda10.0");
234 | refdata ref10b("../unl_safe_ref.h5", "/basis_b-mp-Lambda10.0");
235 | dunl_coeff = check_data_tail(b10b, ref10b, "b");
236 | ASSERT_LE(dunl_coeff, 1e-11);
237 |
238 | basis b10000b("../irbasis.h5", "/basis_b-mp-Lambda10000.0");
239 | refdata ref10000b("../unl_safe_ref.h5", "/basis_b-mp-Lambda10000.0");
240 | dunl_coeff = check_data_tail(b10000b, ref10000b, "b");
241 | ASSERT_LE(dunl_coeff, 1e-11);
242 | }
243 | */
244 |
245 | TEST(interpolation, unl) {
246 | double wmax = 1.0;
247 |
248 | std::vector n_plt;
249 | n_plt.push_back(-1);
250 | n_plt.push_back(0);
251 | n_plt.push_back(1);
252 | for (int o=1; o<14; ++o) {
253 | n_plt.push_back(static_cast(std::pow(10.0, o)));
254 | }
255 | int num_n = n_plt.size();
256 |
257 | std::vector poles;
258 | poles.push_back(1.0);
259 | poles.push_back(0.1);
260 |
261 | std::vector Lambdas;
262 | Lambdas.push_back(10.0);
263 | Lambdas.push_back(1E+4);
264 | Lambdas.push_back(1E+7);
265 |
266 | std::vector stats;
267 | stats.push_back("f");
268 | stats.push_back("b");
269 |
270 | for (int iLambda=0; iLambda > > unl = b.compute_unl(n_plt);
281 |
282 | std::vector Sl(dim), rho_l(dim), gl(dim);
283 |
284 | int stat_shift = 0;
285 | if (stat == "f") {
286 | for (int l=0; l
2 |
3 | #include "gtest.h"
4 | #include "../../c++/irbasis.hpp"
5 |
6 | using namespace irbasis;
7 |
8 | //#include "basis.hpp"
9 | //TEST(multi_array, finite_temp_basis) {
10 | //double beta = 10.0;
11 | //double Lambda = 10.0;
12 | //IrBasis(FERMION, beta, Lambda, 50, "../irbasis.h5");
13 | //}
14 |
15 | TEST(multi_array, dim2_copy) {
16 | int N1 = 2;
17 | int N2 = 4;
18 | internal::multi_array array2(N1, N2);
19 | {
20 | internal::multi_array array(N1, N2);
21 | array.fill(1);
22 | array2 = array;
23 | }
24 | ASSERT_EQ(array2.extent(0), N1);
25 | ASSERT_EQ(array2.extent(1), N2);
26 | for (int i=0; i array2(N1, N2, N3);
38 | {
39 | internal::multi_array array(N1, N2, N3);
40 | array.fill(1);
41 | array2 = array;
42 | }
43 | ASSERT_EQ(array2.extent(0), N1);
44 | ASSERT_EQ(array2.extent(1), N2);
45 | ASSERT_EQ(array2.extent(2), N3);
46 | for (int i=0; i array(N1, N2);
59 | array(0, 0) = 0;
60 | array(N1 - 1, N2 - 1) = 0;
61 | ASSERT_EQ(array.extent(0), N1);
62 | ASSERT_EQ(array.extent(1), N2);
63 | }
64 |
65 | TEST(multi_array, array2_view) {
66 | int N1 = 2;
67 | int N2 = 4;
68 | internal::multi_array array(N1, N2);
69 |
70 | for (int i = 0; i < N1; ++i) {
71 | for (int j = 0; j < N2; ++j) {
72 | array(i, j) = N2 * i + j;
73 | }
74 | }
75 |
76 | internal::multi_array view = array.make_view(1);
77 | for (int j = 0; j < N2; ++j) {
78 | ASSERT_EQ(view(j), N2 + j);
79 | }
80 | }
81 |
82 | TEST(multi_array, array3_view) {
83 | int N1 = 2;
84 | int N2 = 4;
85 | int N3 = 8;
86 | internal::multi_array array(N1, N2, N3);
87 |
88 | for (int i = 0; i < N1; ++i) {
89 | for (int j = 0; j < N2; ++j) {
90 | for (int k = 0; k < N3; ++k) {
91 | array(i, j, k) = (i * N2 + j) * N3 + k;
92 | }
93 | }
94 | }
95 |
96 | internal::multi_array view = array.make_view(1);
97 | for (int j = 0; j < N2; ++j) {
98 | for (int k = 0; k < N3; ++k) {
99 | ASSERT_EQ(view(j, k), array(1, j, k));
100 | }
101 | }
102 | }
103 |
104 | TEST(multi_array, fill) {
105 | int N1 = 2;
106 | int N2 = 4;
107 | double value = 1.0;
108 | internal::multi_array array(N1, N2);
109 | array.fill(value);
110 | for (int i = 0; i < N1; ++i) {
111 | for (int j = 0; j < N2; ++j) {
112 | ASSERT_EQ(value, array(i, j));
113 | }
114 | }
115 |
116 | }
117 |
118 | TEST(multi_array, matrix_view) {
119 | int N1 = 2;
120 | int N2 = 4;
121 | int N3 = 8;
122 | internal::multi_array array(N1, N2, N3);
123 |
124 | for (int i = 0; i < N1; ++i) {
125 | for (int j = 0; j < N2; ++j) {
126 | for (int k = 0; k < N3; ++k) {
127 | array(i, j, k) = (i * N2 + j) * N3 + k;
128 | }
129 | }
130 | }
131 |
132 | internal::multi_array view = array.make_matrix_view(N1 * N2, N3);
133 |
134 | int I = 0;
135 | for (int i = 0; i < N1; ++i) {
136 | for (int j = 0; j < N2; ++j) {
137 | for (int k = 0; k < N3; ++k) {
138 | ASSERT_EQ(view(I, k), (i * N2 + j) * N3 + k);
139 | }
140 | ++I;
141 | }
142 | }
143 |
144 | }
145 |
146 | TEST(multi_array, multiply) {
147 | int N1 = 2;
148 | int N2 = 3;
149 | int N3 = 3;
150 | internal::multi_array A(N1, N2);
151 | internal::multi_array B(N2, N3);
152 | internal::multi_array AB(N1, N3);
153 | internal::multi_array AB_test(N1, N3);
154 |
155 | for (int i = 0; i < N1; ++i) {
156 | for (int j = 0; j < N2; ++j) {
157 | A(i, j) = i + 10 * j;
158 | }
159 | }
160 |
161 | for (int i = 0; i < N2; ++i) {
162 | for (int j = 0; j < N3; ++j) {
163 | B(i, j) = i + 9 * j;
164 | }
165 | }
166 |
167 | AB.fill(0);
168 | for (int i = 0; i < N1; ++i) {
169 | for (int j = 0; j < N2; ++j) {
170 | for (int k = 0; k < N3; ++k) {
171 | AB(i, k) += A(i, j) * B(j, k);
172 | }
173 | }
174 | }
175 |
176 | internal::multiply(A, B, AB_test);
177 |
178 | for (int i = 0; i < N1; ++i) {
179 | for (int j = 0; j < N3; ++j) {
180 | ASSERT_NEAR(AB(i, j), AB_test(i, j), 1e-10);
181 | }
182 | }
183 |
184 | }
185 |
--------------------------------------------------------------------------------
/test/python/.gitignore:
--------------------------------------------------------------------------------
1 | irbasis*
2 | *.pdf
3 | *.DS_Store
4 | *~
--------------------------------------------------------------------------------
/test/python/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | function(add_python_test test)
2 | add_test(NAME python_${test} COMMAND ${PYTHON_EXECUTABLE} ${test}.py)
3 | set_tests_properties(python_${test} PROPERTIES ENVIRONMENT "PYTHONPATH=${CMAKE_BINARY_DIR}/test")
4 | endfunction(add_python_test)
5 |
6 | find_package(PythonInterp REQUIRED)
7 |
8 | configure_file(${CMAKE_SOURCE_DIR}/python/irbasis.py ${CMAKE_BINARY_DIR}/test/python COPYONLY)
9 | configure_file(${CMAKE_SOURCE_DIR}/version ${CMAKE_BINARY_DIR}/test/python COPYONLY)
10 | file(GLOB_RECURSE TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/*.py")
11 | foreach(testfile ${TEST_FILES})
12 | configure_file(${testfile} ${CMAKE_BINARY_DIR}/test/python COPYONLY)
13 | endforeach()
14 |
15 | set(python_test_src check_ulx_vly check_unl utility sparse_sampling)
16 | foreach(test ${python_test_src})
17 | add_python_test(${test})
18 | endforeach(test)
19 |
--------------------------------------------------------------------------------
/test/python/check_ulx_vly.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import numpy
3 | import irbasis as ir
4 | import math
5 |
6 |
7 | class TestMethods(unittest.TestCase):
8 | def __init__(self, *args, **kwargs):
9 |
10 | super(TestMethods, self).__init__(*args, **kwargs)
11 |
12 | def test_ulx(self):
13 | for _lambda in [10.0, 1E+4, 1E+7]:
14 | prefix = "basis_f-mp-Lambda" + str(_lambda)
15 | rb = ir.basis("../irbasis.h5", prefix)
16 | d_ulx_ref = rb._get_d_ulx_ref()
17 | num_ref_data = d_ulx_ref.shape[0]
18 | tol = {0: 1e-10, 1: 1e-10, 2: 1e-5}
19 | for i_ref_data in range(num_ref_data):
20 | Nl, x, order, ref_val = d_ulx_ref[i_ref_data, :]
21 | Nl = int(Nl)
22 | order = int(order)
23 | val = rb.d_ulx(Nl-1, x, order)
24 | adiff = abs(ref_val - val)
25 | rdiff = adiff / ref_val
26 | print(Nl, x, order, ref_val, val, rdiff)
27 | self.assertTrue(rdiff < tol[order] or adiff < tol[order])
28 |
29 | def test_vly(self):
30 | for _lambda in [10.0, 1E+4, 1E+7]:
31 | prefix = "basis_f-mp-Lambda" + str(_lambda)
32 | rb = ir.basis("../irbasis.h5", prefix)
33 | d_vly_ref = rb._get_d_vly_ref()
34 | num_ref_data = d_vly_ref.shape[0]
35 | print("Lambda ", _lambda)
36 | tol = {0: 1e-10, 1: 1e-10, 2: 1e-5}
37 | for i_ref_data in range(num_ref_data):
38 | Nl, y, order, ref_val = d_vly_ref[i_ref_data, :]
39 | Nl = int(Nl)
40 | order = int(order)
41 | val = rb.d_vly(Nl-1, y, order)
42 | adiff = abs(ref_val - val)
43 | rdiff = adiff / ref_val
44 | print(Nl, y, order, ref_val, val, rdiff)
45 | self.assertTrue(rdiff < tol[order] or adiff < tol[order])
46 |
47 | def test_vectorization(self):
48 | for _lambda in [1E+4]:
49 | prefix = "basis_f-mp-Lambda" + str(_lambda)
50 | rb = ir.basis("../irbasis.h5", prefix)
51 |
52 | # re-vectorized functions
53 | revec_ulx = numpy.vectorize(rb.ulx)
54 | revec_vly = numpy.vectorize(rb.vly)
55 | revec_sl = numpy.vectorize(rb.sl)
56 |
57 | # check that those match:
58 | x = numpy.array([-.3, .2, .5])
59 | l = numpy.array([1, 3, 10, 15], dtype=int)
60 | alll = numpy.arange(rb.dim())
61 |
62 | self.assertTrue(numpy.allclose(revec_sl(l), rb.sl(l)))
63 | self.assertTrue(numpy.allclose(revec_sl(alll), rb.sl()))
64 |
65 | self.assertTrue(numpy.allclose(revec_ulx(l[0], x), rb.ulx(l[0], x)))
66 | self.assertTrue(numpy.allclose(revec_ulx(l, x[0]), rb.ulx(l, x[0])))
67 | self.assertTrue(numpy.allclose(revec_ulx(alll, x[0]),
68 | rb.ulx(None, x[0])))
69 | self.assertTrue(numpy.allclose(revec_ulx(l[:,None], x[None,:]),
70 | rb.ulx(l[:,None], x[None,:])))
71 |
72 | self.assertTrue(numpy.allclose(revec_vly(l[0], x), rb.vly(l[0], x)))
73 | self.assertTrue(numpy.allclose(revec_vly(l, x[0]), rb.vly(l, x[0])))
74 | self.assertTrue(numpy.allclose(revec_vly(alll, x[0]),
75 | rb.vly(None, x[0])))
76 | self.assertTrue(numpy.allclose(revec_vly(l[:,None], x[None,:]),
77 | rb.vly(l[:,None], x[None,:])))
78 |
79 |
80 |
81 | if __name__ == '__main__':
82 | unittest.main()
83 |
84 | # https://cmake.org/pipermail/cmake/2012-May/050120.html
85 |
--------------------------------------------------------------------------------
/test/python/check_unl.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from builtins import range
3 |
4 | import unittest
5 | import numpy
6 | import h5py
7 | import irbasis as ir
8 | import math
9 | from itertools import product
10 |
11 | class TestMethods(unittest.TestCase):
12 | def __init__(self, *args, **kwargs):
13 |
14 | super(TestMethods, self).__init__(*args, **kwargs)
15 |
16 | def test_unl(self):
17 | """
18 | Consider a pole at omega=pole. Compare analytic results of G(iwn) and numerical results computed by using unl.
19 | """
20 | for _lambda in [10.0, 1E+4, 1E+7]:
21 | for _statistics, pole in product(["f", "b"], [1.0, 0.1]):
22 | print("lambda = %d, stat = %s, y = %g" %
23 | (_lambda, repr(_statistics), pole))
24 | prefix = "basis_"+_statistics+"-mp-Lambda"+str(_lambda)
25 | basis = ir.basis("../irbasis.h5", prefix)
26 | dim = basis.dim()
27 |
28 | wmax = 1.0
29 | beta = _lambda/wmax
30 |
31 | if _statistics == 'f':
32 | rho_l = numpy.sqrt(1/wmax)* numpy.array([basis.vly(l, pole/wmax) for l in range(dim)])
33 | Sl = numpy.sqrt(0.5 * beta * wmax) * numpy.array([basis.sl(l) for l in range(dim)])
34 | stat_shift = 1
35 | else:
36 | rho_l = numpy.sqrt(1/wmax)* numpy.array([basis.vly(l, pole/wmax) for l in range(dim)])/pole
37 | Sl = numpy.sqrt(0.5 * beta * wmax**3) * numpy.array([basis.sl(l) for l in range(dim)])
38 | stat_shift = 0
39 | gl = - Sl * rho_l
40 |
41 | def G(n):
42 | wn = (2*n+stat_shift)*numpy.pi/beta
43 | z = 1J * wn
44 | return 1/(z - pole)
45 |
46 | # Compute G(iwn) using unl
47 | n_plt = numpy.array([-1, 0, 1, 1E+1, 1E+2, 1E+3, 1E+4,
48 | 1E+5, 1E+6, 1E+7, 1E+8, 1E+9, 1E+10, 1E+14],
49 | dtype=int)
50 | Uwnl_plt = numpy.sqrt(beta) * basis.compute_unl(n_plt)
51 | Giwn_t = numpy.dot(Uwnl_plt, gl)
52 |
53 | # Compute G(iwn) from analytic expression
54 | Giwn_ref = numpy.array([G(n) for n in n_plt])
55 |
56 | magnitude = numpy.abs(Giwn_ref).max()
57 | diff = numpy.abs(Giwn_t - Giwn_ref)
58 | reldiff = diff/numpy.abs(Giwn_ref)
59 |
60 | # Absolute error must be smaller than 1e-12
61 | print ("max. absdiff = %.4g, rel = %.4g" %
62 | (diff.max()/magnitude, reldiff.max()))
63 | self.assertLessEqual((diff/magnitude).max(), 5e-13)
64 |
65 | # Relative error must be smaller than 1e-12
66 | self.assertLessEqual(numpy.amax(numpy.abs(diff/Giwn_ref)), 5e-13)
67 |
68 |
69 | if __name__ == '__main__':
70 | unittest.main()
71 |
72 | # https://cmake.org/pipermail/cmake/2012-May/050120.html
73 |
--------------------------------------------------------------------------------
/test/python/sparse_sampling.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from builtins import range
3 |
4 | import unittest
5 | import numpy
6 | import scipy
7 | import irbasis
8 |
9 | Lambda = 1E+7
10 |
11 | class TestMethods(unittest.TestCase):
12 | def __init__(self, *args, **kwargs):
13 |
14 | super(TestMethods, self).__init__(*args, **kwargs)
15 |
16 | def test_sampling_point_matsubara(self):
17 | for stat in ['F', 'B']:
18 | b = irbasis.load(stat, Lambda, "../irbasis.h5")
19 |
20 | dim = b.dim()
21 | whichl = dim - 1
22 | sp = irbasis.sampling_points_matsubara(b, whichl)
23 | sp2 = b.sampling_points_matsubara(whichl)
24 | assert numpy.all(sp==sp2)
25 | if stat == 'F':
26 | assert numpy.all([-s-1 in sp for s in sp])
27 | elif stat in ['B']:
28 | assert numpy.all([-s in sp for s in sp])
29 |
30 | assert len(sp) >= whichl + 1
31 |
32 | Unl = b.compute_unl(sp)[:, :dim]
33 | U, S, Vh = scipy.linalg.svd(Unl, full_matrices=False)
34 | cond_num = S[0] / S[-1]
35 |
36 | print("cond_num ", cond_num)
37 | self.assertLessEqual(cond_num, 1E+4)
38 |
39 | def test_sampling_point_x(self):
40 | for stat in ['F', 'B']:
41 | b = irbasis.load(stat, Lambda, "../irbasis.h5")
42 |
43 | dim = b.dim()
44 | whichl = dim - 1
45 | sp = irbasis.sampling_points_x(b, whichl)
46 | sp2 = b.sampling_points_x(whichl)
47 | assert numpy.all(sp==sp2)
48 | assert len(sp) == whichl+1
49 | uxl = numpy.array([b.ulx(l, x) for l in range(dim) for x in sp]).reshape((dim, dim))
50 | U, S, Vh = scipy.linalg.svd(uxl, full_matrices=False)
51 | cond_num = S[0] / S[-1]
52 |
53 | print("cond_num ", cond_num)
54 | self.assertLessEqual(cond_num, 1E+4)
55 |
56 | def test_sampling_point_y(self):
57 | for stat in ['F', 'B']:
58 | b = irbasis.load(stat, Lambda, "../irbasis.h5")
59 |
60 | dim = b.dim()
61 | whichl = dim - 1
62 | sp = irbasis.sampling_points_y(b, whichl)
63 | sp2 = b.sampling_points_y(whichl)
64 | assert numpy.all(sp==sp2)
65 | #print(len(sp), whichl)
66 | #print(sp)
67 | assert len(sp) == whichl+1
68 |
69 | if __name__ == '__main__':
70 | unittest.main()
71 |
--------------------------------------------------------------------------------
/test/python/utility.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from builtins import range
3 |
4 | import unittest
5 | import numpy
6 | import irbasis as ir
7 | import math
8 |
9 | def _composite_leggauss(deg, section_edges):
10 | """
11 | Composite Gauss-Legendre quadrature.
12 | :param deg: Number of sample points and weights. It must be >= 1.
13 | :param section_edges: array_like
14 | 1-D array of the two end points of the integral interval
15 | and breaking points in ascending order.
16 | :return ndarray, ndarray: sampling points and weights
17 | """
18 | x_loc, w_loc = numpy.polynomial.legendre.leggauss(deg)
19 |
20 | ns = len(section_edges)-1
21 | x = []
22 | w = []
23 | for s in range(ns):
24 | dx = section_edges[s+1] - section_edges[s]
25 | x0 = section_edges[s]
26 | x.extend(((dx/2)*(x_loc+1)+x0).tolist())
27 | w.extend((w_loc*(dx/2)).tolist())
28 |
29 | return numpy.array(x), numpy.array(w)
30 |
31 |
32 | class transformer(object):
33 | def __init__(self, basis, beta):
34 | section_edges = basis.section_edges_x
35 | self._dim = basis.dim()
36 | self._beta = beta
37 | self._x, self._w = _composite_leggauss(24, section_edges)
38 |
39 | nx = len(self._x)
40 | self._u_smpl = numpy.zeros((nx, self._dim))
41 | for ix in range(nx):
42 | for l in range(self._dim):
43 | self._u_smpl[ix, l] = self._w[ix] * basis.ulx(l, self._x[ix])
44 |
45 | def compute_gl(self, gtau, nl):
46 | assert nl <= self._dim
47 |
48 | nx = len(self._x)
49 | gtau_smpl = numpy.zeros((1, nx), dtype=complex)
50 | for ix in range(nx):
51 | gtau_smpl[0, ix] = gtau(0.5 * (self._x[ix] + 1) * self._beta)
52 |
53 | return numpy.sqrt(self._beta / 2) * numpy.dot(gtau_smpl[:, :], self._u_smpl[:, 0:nl]).reshape((nl))
54 |
55 | class TestMethods(unittest.TestCase):
56 | def __init__(self, *args, **kwargs):
57 |
58 | super(TestMethods, self).__init__(*args, **kwargs)
59 |
60 | def test_to_Gl(self):
61 | for _lambda in [10.0, 10000.0]:
62 | for _statistics in ["f", "b"]:
63 | beta = 10.0
64 | prefix = "basis_"+_statistics+"-mp-Lambda"+str(_lambda)
65 | basis = ir.basis("../irbasis.h5", prefix)
66 | Nl = basis.dim()
67 |
68 | trans = transformer(basis, beta)
69 |
70 | # Trivial test
71 | gtau = lambda tau: basis.ulx(Nl - 1, 2 * tau / beta - 1)
72 | gl = trans.compute_gl(gtau, Nl)
73 |
74 | for l in range(Nl - 1):
75 | self.assertLessEqual(numpy.abs(gl[l]), 1e-8)
76 | self.assertLessEqual(numpy.abs(gl[-1] - numpy.sqrt(beta / 2)), 1e-8)
77 |
78 | if __name__ == '__main__':
79 | unittest.main()
80 |
81 | # https://cmake.org/pipermail/cmake/2012-May/050120.html
82 |
--------------------------------------------------------------------------------
/version:
--------------------------------------------------------------------------------
1 | 2.2.3
--------------------------------------------------------------------------------