├── tests ├── __init__.py ├── test_helpers.py ├── conftest.py ├── test_imports.py ├── test_messpy.py ├── test_plot_helpers.py ├── test_data_io.py ├── test_units.py ├── test_utils.py ├── test_quickcontrol.py ├── test_basefuncs.py ├── test_twodim.py └── test_dataset.py ├── skultrafast ├── h5py_helpers.py ├── base_funcs │ ├── __init__.py │ ├── lineshapes.py │ ├── base_functions_np.py │ ├── backend_tester.py │ ├── ana.py │ ├── base_functions_numba.py │ └── pytorch_fitter.py ├── examples │ ├── .png.png │ ├── README.rst │ ├── data │ │ ├── test.npz │ │ ├── germanium.npz │ │ ├── ir_waterabs.npy │ │ ├── messpyv1_data.npz │ │ ├── quickcontrol.zip │ │ └── PolystyreneFilm_spectrum.npz │ ├── tutorial_sys_response_ir.py │ ├── wip_fit_pfid.py │ ├── tutorial_fit_pfid.py │ ├── wip_messpy25.py │ ├── convolution.py │ ├── tutorial_quickcontrol.py │ ├── tutorial_messpy.py │ ├── tutorial_2d_signal_calculation.py │ ├── tutorial_compart_modeling.py │ ├── tutorial_start.py │ ├── tutorial_spectrometer_calibration.py │ └── tutorial_figures.py ├── __init__.py ├── base_functions.py ├── pseudo_zernike │ ├── __init__.py │ ├── poly.py │ └── ps_fit.py ├── kubo_fitting │ ├── kubo_fitter.py │ ├── __init__.py │ └── backend_numpy.py ├── spectrum.py ├── nlo.py ├── single_spectrum.py ├── styles.py ├── unit_conversions.py ├── pfid_fitter.py ├── lifetimemap.py ├── kinetic_model.py ├── data_io.py ├── filter.py ├── sympy_model.py ├── fit_spectrum.py ├── zero_finding.py └── referencing.py ├── docs ├── releasenotes.rst ├── _static │ └── custom.css ├── introduction.rst ├── index.rst ├── install.rst ├── zero_finding_lot.py ├── development.rst ├── data_io.rst ├── fitting.rst ├── zero-finding.rst ├── make.bat ├── Makefile └── pub_figures.rst ├── .gitattributes ├── .gitignore ├── .readthedocs.yml ├── releasenotes.rst ├── .github └── workflows │ ├── python-package.yml │ └── python-publish.yml ├── pyproject.toml ├── LICENSE.txt └── README.rst /tests/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tests/test_helpers.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /skultrafast/h5py_helpers.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /skultrafast/base_funcs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/releasenotes.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../releasenotes.rst -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('Agg') 3 | -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | div.sphinxsidebar { 2 | background-color: #FA99CE; 3 | } -------------------------------------------------------------------------------- /docs/introduction.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | .. include:: ../README.rst 5 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | skultrafast/_version.py export-subst 2 | skultrafast\_version.py export-subst 3 | -------------------------------------------------------------------------------- /skultrafast/examples/.png.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tillsten/skultrafast/HEAD/skultrafast/examples/.png.png -------------------------------------------------------------------------------- /tests/test_imports.py: -------------------------------------------------------------------------------- 1 | def test_import(): 2 | from skultrafast import dv, data_io, plot_helpers 3 | return 4 | -------------------------------------------------------------------------------- /skultrafast/examples/README.rst: -------------------------------------------------------------------------------- 1 | Examples and tutorials 2 | ====================== 3 | Below are the skultrafast tutorials. -------------------------------------------------------------------------------- /skultrafast/examples/data/test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tillsten/skultrafast/HEAD/skultrafast/examples/data/test.npz -------------------------------------------------------------------------------- /skultrafast/examples/data/germanium.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tillsten/skultrafast/HEAD/skultrafast/examples/data/germanium.npz -------------------------------------------------------------------------------- /skultrafast/examples/data/ir_waterabs.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tillsten/skultrafast/HEAD/skultrafast/examples/data/ir_waterabs.npy -------------------------------------------------------------------------------- /skultrafast/examples/data/messpyv1_data.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tillsten/skultrafast/HEAD/skultrafast/examples/data/messpyv1_data.npz -------------------------------------------------------------------------------- /skultrafast/examples/data/quickcontrol.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tillsten/skultrafast/HEAD/skultrafast/examples/data/quickcontrol.zip -------------------------------------------------------------------------------- /skultrafast/examples/data/PolystyreneFilm_spectrum.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tillsten/skultrafast/HEAD/skultrafast/examples/data/PolystyreneFilm_spectrum.npz -------------------------------------------------------------------------------- /skultrafast/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Mar 23 14:21:55 2012 4 | 5 | @author: Tillsten 6 | """ 7 | 8 | __version__ = '5.0' 9 | -------------------------------------------------------------------------------- /tests/test_messpy.py: -------------------------------------------------------------------------------- 1 | from skultrafast.messpy import Messpy25File 2 | 3 | 4 | def test_messpy_v1(): 5 | pass 6 | 7 | 8 | def test_2d_loader(): 9 | pass 10 | -------------------------------------------------------------------------------- /tests/test_plot_helpers.py: -------------------------------------------------------------------------------- 1 | from skultrafast.plot_helpers import nsf 2 | 3 | def test_nsf(): 4 | assert(nsf(0.123) == '0.12') 5 | assert(nsf(2.345) == ' 2.3') 6 | assert(nsf(6.4324) == ' 6.4') 7 | assert (nsf(66.43) == ' 70') 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .pypirc 2 | build/* 3 | *.pyc 4 | .vscode/* 5 | .idea/* 6 | .pytest_cache/* 7 | docs/_build/* 8 | docs/auto_examples/* 9 | dist/* 10 | *egg-info/* 11 | docs/api/* 12 | .mypy_cache/ 13 | skultrafast/.mypy_cache/* 14 | __pycache__/ 15 | 16 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to skultrafast's documentation! 2 | ======================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | introduction 8 | install 9 | releasenotes 10 | auto_examples/tutorial_start 11 | auto_examples/tutorial_2d 12 | data_io 13 | api 14 | development 15 | zero-finding 16 | auto_examples/index 17 | fitting 18 | 19 | 20 | Indices and tables 21 | ================== 22 | 23 | * :ref:`genindex` 24 | * :ref:`modindex` 25 | * :ref:`search` 26 | 27 | -------------------------------------------------------------------------------- /tests/test_data_io.py: -------------------------------------------------------------------------------- 1 | from skultrafast import data_io 2 | from pathlib import Path 3 | 4 | 5 | def test_load_exmaple(): 6 | wl, t, d = data_io.load_example() 7 | assert ((t.shape[0], wl.shape[0]) == d.shape) 8 | 9 | 10 | def test_path_loader(): 11 | p = data_io.get_example_path('sys_response') 12 | assert Path(p).exists() 13 | p = data_io.get_example_path('messpy') 14 | assert Path(p).exists() 15 | 16 | 17 | def test_2d_load(): 18 | p = data_io.get_example_path('quickcontrol') 19 | assert Path(p).exists() 20 | 21 | 22 | def test_2d_webload(): 23 | a = data_io.get_twodim_dataset() 24 | assert isinstance(a, Path) 25 | -------------------------------------------------------------------------------- /skultrafast/base_functions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Module to import the base functions from. 4 | """ 5 | from __future__ import print_function 6 | 7 | try: 8 | from skultrafast.base_funcs.base_functions_numba import (_fold_exp, 9 | _fold_exp_and_coh, 10 | _coh_gaussian) 11 | 12 | except ImportError: 13 | from skultrafast.base_funcs.base_functions_np import(_fold_exp, 14 | _fold_exp_and_coh, 15 | _coh_gaussian) 16 | 17 | -------------------------------------------------------------------------------- /tests/test_units.py: -------------------------------------------------------------------------------- 1 | from skultrafast.unit_conversions import (cm2eV, eV2cm, cm2fs, fs2cm, cm2nm, nm2cm, 2 | cm2THz, THz2cm, cm2kcal, kcal2cm, invps2cm, cm2invps) 3 | import numpy as np 4 | 5 | 6 | def test_handle_np(): 7 | x1 = np.arange(2, 30, dtype='float') 8 | x0 = 100. 9 | for x in x0, x1: 10 | np.testing.assert_allclose(eV2cm(cm2eV(x)), x) 11 | np.testing.assert_allclose(cm2fs(fs2cm(x)), x) 12 | np.testing.assert_allclose(cm2THz(THz2cm(x)), x) 13 | np.testing.assert_allclose(cm2nm(nm2cm(x)), x) 14 | np.testing.assert_allclose(cm2kcal(kcal2cm(x)), x) 15 | np.testing.assert_allclose(invps2cm(cm2invps(x)), x) 16 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from skultrafast.utils import pfid_r4, pfid_r6, simulate_binning 2 | import numpy as np 3 | import pytest 4 | 5 | def test_pfid(): 6 | t = np.linspace(0, 10, 100) 7 | fre = np.linspace(900, 1200, 64) 8 | y1 = pfid_r4(t, fre, [1000, 1100], [2, 1]) 9 | y2 = pfid_r6(t, fre, [1000], [1015], [2]) 10 | 11 | 12 | @pytest.mark.skip("Not implemented yet") 13 | def test_simulate_binning(): 14 | wl = np.linspace(0, 2*np.pi, 4) 15 | 16 | def func(*, wl): 17 | return np.sin(wl) 18 | 19 | func_bin = simulate_binning(func, fac=20) 20 | res = func(wl=wl) 21 | binned_res = func_bin(wl=wl) 22 | assert res.shape == binned_res.shape 23 | many = np.linspace(0, 2*np.pi, 10_0000) 24 | precise_sum = np.sin(many) 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_sys_response_ir.py: -------------------------------------------------------------------------------- 1 | """ 2 | Measuring the system response in the mid IR 3 | =========================================== 4 | 5 | In the mid-IR the system response is measured by monitoring the transmittance of the 6 | probe light through a thin semi-conductor. skultrafast has an helper function to 7 | analyze such a signal. 8 | """ 9 | # %% 10 | from skultrafast import messpy, data_io 11 | 12 | fname = data_io.get_example_path('sys_response') 13 | tz_result = messpy.get_t0(fname, display_result=False, 14 | t_range=(-2, 0.3), 15 | no_slope=False) 16 | 17 | # %% 18 | # Newer version of lmfit have a html representation which is used by ipython, e.g. 19 | # in the notebook. Hence the line below will display the fit results. 20 | 21 | tz_result.fit_result 22 | 23 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | # You can also specify other tool versions: 14 | # nodejs: "16" 15 | # rust: "1.55" 16 | # golang: "1.17" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | configuration: docs/conf.py 21 | 22 | # If using Sphinx, optionally build your docs in additional formats such as PDF 23 | # formats: 24 | # - pdf 25 | 26 | # Optionally declare the Python requirements required to build your docs 27 | python: 28 | install: 29 | - method: pip 30 | path: . 31 | extra_requirements: 32 | - doc 33 | 34 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | Installation via PIP 5 | -------------------- 6 | The easiest way to install skultrafast is using *pip*. Since skultrafast is a 7 | pure python package, no compiler is necessary. 8 | 9 | To get the latest released version from pypi:: 10 | 11 | pip install skultrafast 12 | 13 | To get the latest development version from GitHub (requires git installed):: 14 | 15 | pip install git+https://github.com/tillsten/skultrafast --upgrade 16 | 17 | 18 | If you are using anaconda, git can be installed by:: 19 | 20 | conda install git 21 | 22 | 23 | Software prerequisites 24 | ---------------------- 25 | Needs Python 3.6 or higher. Requires the following packages, 26 | which are automatically installed when using pip: 27 | 28 | * numpy 29 | * numba 30 | * scipy 31 | * lmfit 32 | * matplotlib 33 | * sklearn 34 | * sympy 35 | 36 | To build the docs, more packages are necessary. 37 | 38 | * sphinx 39 | * sphinx-gallery 40 | -------------------------------------------------------------------------------- /releasenotes.rst: -------------------------------------------------------------------------------- 1 | Release notes 2 | ============= 3 | 4 | 3.1.0 2021-02-24 5 | ---------------- 6 | 7 | - allow PolTRSpec to be concacted 8 | 9 | 3.0.1 2021-02-09 10 | ---------------- 11 | 12 | - plot sas function 13 | - doc fixes 14 | 15 | 3.0 2021-02-09 16 | -------------- 17 | 18 | - error estimates for the linear parameters are now part of FitExpResult 19 | - EDAS plot fuction 20 | - compartment modelling is now working 21 | 22 | 4.0 2021-01-22 23 | -------------- 24 | 25 | - Initial support for 2D data: 26 | * CLS spectra 27 | * Pump-slice Amplitude 28 | * Diagonal extraction 29 | * Plotting 30 | - Support for quick-control files 31 | 32 | 33 | 4.1 2022-09-23 34 | -------------- 35 | - More 2D support: 36 | * More Plotting 37 | * Exponential fitting 38 | * Expanded cls methods 39 | * Background subtraction 40 | 41 | - Support for messpy 2D-files 42 | 43 | 5.0 2022-09-24 44 | -------------- 45 | - Mostly internal changes, now uses the hatchling build system 46 | - Small improvements to the 2D functions 47 | - Added a few more tests 48 | 49 | 50 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: ['3.12', '3.10', '3.11'] 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v5 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | cache: 'pip' 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | python -m pip install flake8 pytest 31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 32 | if [ -f doc_requirements.txt ]; then pip install -r doc_requirements.txt; fi 33 | - name: Test with pytest 34 | run: | 35 | python -m pip install -e . 36 | pytest 37 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v4 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "hatchling.build" 3 | requires = ["hatchling"] 4 | 5 | [project] 6 | authors = [ 7 | {name = "Till Stensitzki", email = "mail.till@gmx.de"}, 8 | ] 9 | dependencies = [ 10 | "attrs", 11 | "h5py", 12 | "joblib", 13 | "lmfit", 14 | "matplotlib", 15 | "numba", 16 | "numpy", 17 | "pooch", 18 | "pytest", 19 | "scikit-learn", 20 | "scipy", 21 | "statsmodels", 22 | "sympy", 23 | "wrapt", 24 | ] 25 | 26 | description = "Python package for analyzing time-resolved spectra." 27 | dynamic = ["version"] 28 | keywords = [ 29 | "chemistry", 30 | "physics", 31 | "pump-probe", 32 | "science", 33 | "spectroscopy", 34 | "time-resolved", 35 | ] 36 | license = {file = "LICENSE.txt"} 37 | name = "skultrafast" 38 | readme = "README.rst" 39 | requires-python = ">=3.10" 40 | 41 | [project.urls] 42 | Homepage = "http://github.com/tillsten/skultrafast" 43 | 44 | [project.optional-dependencies] 45 | doc = [ 46 | "sphinx >= 4", 47 | "sphinx-gallery", 48 | "pytest", 49 | "docutils", 50 | "sphinx-autoapi", 51 | "furo", 52 | ] 53 | 54 | [tool.hatch.version] 55 | path = "skultrafast/__init__.py" 56 | 57 | [tool.hatch.build.targets.sdist] 58 | include = [ 59 | "/skultrafast", 60 | ] 61 | 62 | [tool.hatch.envs.doc.scripts] 63 | build = "cd docs && make html" 64 | features = ["doc"] 65 | -------------------------------------------------------------------------------- /skultrafast/pseudo_zernike/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides a class for calculating the pseudo-Zernike polynomials for 3 | 2DIRT spectra. 4 | 5 | It is based on "A new method based on pseudo-Zernike polynomials to analyze and 6 | extract dynamical and spectral information from the 2DIR spectra" by Gurung & 7 | Kuroda. 8 | """ 9 | # %% 10 | 11 | # %% 12 | import numpy as np 13 | 14 | from skultrafast.pseudo_zernike.poly import Polybase 15 | from skultrafast.twoD_dataset import TwoDim 16 | import matplotlib.pyplot as plt 17 | 18 | def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float: 19 | """ 20 | Calculate the cosine similarity between two vectors. 21 | 22 | Parameters 23 | ---------- 24 | a : np.ndarray 25 | The first vector 26 | b : np.ndarray 27 | The second vector 28 | 29 | Returns 30 | ------- 31 | similarity : float 32 | The cosine similarity 33 | """ 34 | return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) 35 | 36 | 37 | def total_pzs(ds: TwoDim, n: int) -> np.ndarray: 38 | """ 39 | Calculate the pseudo-Zernike polynomials for the 2DIR spectra. 40 | 41 | Parameters 42 | ---------- 43 | ds : TwoDim 44 | The 2DIR spectra 45 | n : int 46 | The maximum order of the polynomials 47 | 48 | Returns 49 | ------- 50 | pzs : np.ndarray 51 | The pseudo-Zernike polynomials 52 | """ 53 | pb = Polybase(ds.pump_wn, ds.probe_wn, n) 54 | base = pb.make_base() 55 | 56 | 57 | -------------------------------------------------------------------------------- /skultrafast/examples/wip_fit_pfid.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fitting the perturbed free induction decay 3 | ========================================== 4 | 5 | Sometimes it is useful to fit the perturbed free induction (see explanation in 6 | the PFID tutorial), maybe certain excited state features are not yet visible or 7 | a more exact determination of the center frequency is required. 8 | 9 | 10 | WORK IN PROGRESS 11 | """ 12 | # %% 13 | import numpy as np 14 | import matplotlib.pyplot as plt 15 | import skultrafast.plot_helpers as ph 16 | import skultrafast.unit_conversions as uc 17 | from skultrafast.utils import pfid_r4, pfid_r6 18 | from skultrafast import messpy, data_io, dataset 19 | # %% 20 | 21 | 22 | fname = data_io.get_example_path('messpy') 23 | print("Tutorial MessPy-file located at %s" % fname) 24 | mpf = messpy.MessPyFile( 25 | fname, 26 | invert_data=True, # Changes the sign of the data 27 | is_pol_resolved=True, # If the data was recored polarization resolved. 28 | pol_first_scan='perp', # Polarisation of the first scan 29 | valid_channel=1, # Which channel to use, recent IR data always uses 1 30 | # Recent visible data uses 0 31 | ) 32 | 33 | mpf.recalculate_wavelengths(8.8) 34 | # %% 35 | 36 | para, perp, iso = mpf.avg_and_concat() 37 | 38 | pol_ds = dataset.PolTRSpec(para, perp) 39 | pol_ds.subtract_background() 40 | merged_ds = pol_ds.merge_nearby_channels(8) 41 | merged_ds.plot.spec(-.5, n_average=3); 42 | 43 | # %% 44 | merged_ds.plot.spec(-1.5, n_average=1); 45 | 46 | 47 | 48 | # %% 49 | merged_ds.t 50 | 51 | # %% 52 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_fit_pfid.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fitting the perturbed free induction decay 3 | ========================================== 4 | 5 | Sometimes it is useful to fit the perturbed free induction (see explanation in 6 | the PFID tutorial), maybe certain excited state features are not yet visible or 7 | a more exact determination of the center frequency is required. 8 | 9 | 10 | WORK IN PROGRESS 11 | """ 12 | # %% 13 | import numpy as np 14 | import matplotlib.pyplot as plt 15 | import skultrafast.plot_helpers as ph 16 | import skultrafast.unit_conversions as uc 17 | from skultrafast.utils import pfid_r4, pfid_r6 18 | from skultrafast import messpy, data_io, dataset 19 | # %% 20 | 21 | 22 | fname = data_io.get_example_path('messpy') 23 | print("Tutorial MessPy-file located at %s" % fname) 24 | mpf = messpy.MessPyFile( 25 | fname, 26 | invert_data=True, # Changes the sign of the data 27 | is_pol_resolved=True, # If the data was recored polarization resolved. 28 | pol_first_scan='perp', # Polarisation of the first scan 29 | valid_channel=1, # Which channel to use, recent IR data always uses 1 30 | # Recent visible data uses 0 31 | ) 32 | 33 | mpf.recalculate_wavelengths(8.8) 34 | # %% 35 | 36 | para, perp, iso = mpf.avg_and_concat() 37 | 38 | pol_ds = dataset.PolTRSpec(para, perp) 39 | pol_ds.subtract_background() 40 | merged_ds = pol_ds.merge_nearby_channels(8) 41 | merged_ds.plot.spec(-.5, n_average=3); 42 | 43 | # %% 44 | merged_ds.plot.spec(-1.5, n_average=1); 45 | 46 | 47 | 48 | # %% 49 | merged_ds.t 50 | 51 | # %% 52 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2018, Till Stensitzki 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /tests/test_quickcontrol.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import zipfile 3 | from pathlib import Path 4 | 5 | from skultrafast.quickcontrol import QC1DSpec, QC2DSpec, parse_str, QCFile 6 | from skultrafast.data_io import get_example_path, get_twodim_dataset 7 | 8 | 9 | def test_parse(): 10 | assert (parse_str('-8000.000000') == -8000.0) 11 | assert (parse_str('75') == 75) 12 | assert (parse_str('TRUE') == True) 13 | assert (parse_str('FALSE') == False) 14 | flist = '-8000.000000,-7950.000000,-7900.000000,-7850.000000' 15 | res = parse_str(flist) 16 | assert (isinstance(res, list)) 17 | assert (res[0] == -8000) 18 | assert (len(res) == 4) 19 | 20 | 21 | @pytest.fixture(scope='session') 22 | def datadir(tmp_path_factory): 23 | p = get_example_path('quickcontrol') 24 | tmp = tmp_path_factory.mktemp("data") 25 | zipfile.ZipFile(p).extractall(tmp) 26 | return tmp 27 | 28 | 29 | @pytest.fixture(scope='session') 30 | def datadir2d(tmp_path_factory): 31 | p = get_twodim_dataset() 32 | return p 33 | 34 | 35 | def test_info(datadir): 36 | qc = QCFile(fname=datadir / '20201029#07') 37 | 38 | 39 | def test_1d(datadir): 40 | qc = QC1DSpec(fname=datadir / '20201029#07') 41 | assert (qc.par_data.shape == qc.per_data.shape) 42 | assert (qc.par_data.shape[1] == len(qc.t)) 43 | assert (qc.par_data.shape[2] == 128) 44 | ds = qc.make_pol_ds() 45 | ds.plot.spec(1) 46 | 47 | 48 | @pytest.mark.skip(reason="Tested by test_twodim.py") 49 | def test_2d(datadir2d): 50 | infos = list(Path(datadir2d).glob('*320.info')) 51 | ds = QC2DSpec(infos[0]) 52 | out = ds.make_ds() 53 | -------------------------------------------------------------------------------- /docs/zero_finding_lot.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Sep 29 20:20:34 2018 4 | 5 | @author: Tillsten 6 | """ 7 | 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import skultrafast.data_io 11 | from skultrafast import zero_finding 12 | from scipy.ndimage import gaussian_filter1d 13 | wl, t, dat = skultrafast.data_io.load_example() 14 | plt.rcParams['font.family'] = 'serif' 15 | 16 | gw = {'height_ratios': (3, 2, 2)} 17 | fig, axs = plt.subplots(3, 1, num='test', figsize=(5, 6), gridspec_kw=gw, 18 | sharex='col', constrained_layout=True) 19 | 20 | t = t*1000 21 | y = dat[:, 200] 22 | axs[0].plot(t, dat[:, 200], '-sk', mec='w', ms=4) 23 | axs[0].set_xlim(-.5*1000, 1.5*1000) 24 | axs[-1].set_xlabel('Time [fs]') 25 | axs[0].set_ylabel('mOD') 26 | 27 | ld = axs[1].plot(t[1:], np.diff(y), '-o', c='C0') 28 | axs[1].set_ylabel('diff') 29 | 30 | axs[2].plot(t, gaussian_filter1d(y, 1.5, order=1), '-o', c='C1') 31 | axs[2].set_ylabel('gauss diff') 32 | axs[2].text(-450, -1.2, 'sigma=1.5',) 33 | axs[0].text(750, -.7, 'val=2', color='C4') 34 | td = zero_finding.use_diff(dat[:, 200]) 35 | tm = np.argmax(y) 36 | tg = zero_finding.use_gaussian(dat, sigma=1)[200] 37 | ta = zero_finding.use_first_abs(y, val=2) 38 | 39 | axs[0].plot(t[td], y[td], 'o', ms=10, zorder=1, c='C0', label='use_diff') 40 | 41 | axs[0].plot(t[tg], y[tg], 'D', ms=12, zorder=.9, c='C1', label='use_gaussian') 42 | axs[0].plot(t[tm], y[tm], '^', ms=10, zorder=1, c='C2', label='use_max') 43 | axs[0].plot(t[ta], y[ta], '>', ms=10, zorder=.9, c='C4', label='use_first_abs') 44 | axs[0].axhline(-2, alpha=0.5, c='C4') 45 | axs[0].axhline(2, alpha=0.5, c='C4') 46 | axs[0].legend() 47 | -------------------------------------------------------------------------------- /docs/development.rst: -------------------------------------------------------------------------------- 1 | .. _dev_docs: 2 | 3 | Developer documentation 4 | ======================= 5 | We'd love your help, either as ideas, documentation, or code. If you have a new 6 | module or want to add or fix existing code, please do. *skultrafast* tries to 7 | follow Python's PEP-8 closely. New code should have numpy-doc styled docstrings 8 | and unit tests. The aim is to increase the quality of the code base over time. 9 | 10 | The easiest way to contribute is file bugs, questions and ideas as an issue on 11 | _github. If the issue involves code, please provide small, complete, working 12 | example that illustrates the problem. 13 | 14 | Contributing code 15 | ----------------- 16 | Contributing code is done via pull-requests on 17 | `github `_. A detailed description of 18 | the workflow can be found in the `Matplotlib documentation 19 | `_. 20 | 21 | 22 | Documentation 23 | ------------- 24 | The documentation is in `docs` directory and uses the Sphinx documentation 25 | generator. Sphinx uses reStructuredText (`.rst`) as its makeup language. Simple 26 | changes to the documentation can be done by using the github web interface 27 | directly. Browse to the file, click it and use the `Edit this file` button. Use 28 | the "Create a new branch for this commit and start a pull request." option for 29 | submitting the change. 30 | 31 | The code itself uses the numpy-doc style doc-strings for public functions and 32 | classes. These doc-strings are in the source files itself. This part of can be 33 | found in the :ref:`api_docs` section. 34 | 35 | Running tests 36 | ------------- 37 | Run ``pytest`` in the source directory. 38 | -------------------------------------------------------------------------------- /skultrafast/base_funcs/lineshapes.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Jan 30 17:17:07 2013 4 | 5 | @author: Tillsten 6 | """ 7 | import numpy as np 8 | from scipy.special import erf 9 | 10 | 11 | def lorentz(x, A, w, xc): 12 | return A / (1 + ((x-xc) / w)**2) 13 | 14 | 15 | def gaussian(x, A, w, xc): 16 | return A * np.exp(((x-xc) / w)**2) 17 | 18 | 19 | def gauss_step(x, amp: float, center: float, sigma: float): 20 | """Returns the stepfunction (erf-style) for given arguments. 21 | 22 | Parameters 23 | ---------- 24 | x : array 25 | Independent variable 26 | amp : float 27 | Amplitude of the step 28 | center : float 29 | Position of the step 30 | sigma : float 31 | Width of the step 32 | 33 | Returns 34 | ------- 35 | array 36 | The step functions 37 | """ 38 | return amp * 0.5 * (1 + erf((x-center) / sigma / np.sqrt(2))) 39 | 40 | 41 | def gauss2d(pu, pr, A0, x_pu, x_pr, sigma_pu, sigma_pr, corr): 42 | pr = pr[:, None] 43 | pu = pu[None, :] 44 | c_pr = ((pr-x_pr)/sigma_pr) 45 | c_pu = ((pu-x_pu)/sigma_pu) 46 | y = A0*np.exp(-1/(2-2*corr**2)*(c_pr**2-2*corr*c_pr*c_pu+c_pu**2)) 47 | return y 48 | 49 | 50 | def two_gauss2D_shared(pu, pr, A0, x01, ah, sigma_pu, sigma_pr, corr, offset=0, k=1): 51 | y = gauss2d(pu, pr, A0, x01, x01, sigma_pu, sigma_pr, corr) 52 | x12 = x01 - ah 53 | y += gauss2d(pu, pr, -k*A0, x01, x12, sigma_pu, sigma_pr, corr) 54 | return y + offset 55 | 56 | 57 | def two_gauss2D(pu, pr, A0, x01, ah, sigma_pu, sigma_pr, corr, A1, sigma_pu2, sigma_pr2, 58 | corr2): 59 | y = gauss2d(pu, pr, A0, x01, sigma_pu, sigma_pr, corr) 60 | x12 = x01 - ah 61 | y -= gauss2d(pu, pr, A1, x12, sigma_pu2, sigma_pr2, corr2) 62 | return y 63 | -------------------------------------------------------------------------------- /skultrafast/examples/wip_messpy25.py: -------------------------------------------------------------------------------- 1 | """ 2 | Messpy 2.5 Tutorial 3 | =================== 4 | This example demonstrates how to load MessPy 2.5 files. 5 | """ 6 | # %% 7 | import numpy as np 8 | import pymesh 9 | import mcubes 10 | import h5py 11 | #from skultrafast.twoD_dataset import ContourOptions 12 | from skultrafast.messpy import Messpy25File 13 | from matplotlib import pyplot as plt 14 | from skultrafast import plot_helpers 15 | 16 | # %% 17 | 18 | 19 | # %% 20 | plot_helpers.enable_style() 21 | 22 | f = h5py.File( 23 | r"C:\Users\TillStensitzki\Nextcloud\AG Mueller-Werkmeister\2DIR\tmp2d\22-02-09 13_16 1 M NaSCN in H2O 10 mu.messpy") 24 | # %% 25 | mp = Messpy25File(f) 26 | 27 | two_d = mp.get_means() 28 | # mp.make_model_fitfiles(r'C:/Users/TillStensitzki/Desktop/test', 29 | # 'test', probe_filter=2, bg_correct=(5, 15)) 30 | two_d = mp.make_two_d(probe_filter=1, bg_correct=(10, 10))['iso'] 31 | 32 | two_d = two_d.select_range((2000, 2155), (1985, 2155)).select_t_range(0, 6) 33 | co = {'levels': 20} 34 | 35 | two_d.plot.contour( 36 | 0.2, 0.4, 0.8, 1.6, 3, 6, contour_params=co, ax_size=1.9, average=1, fig_kws={'dpi': 150}) 37 | plt.savefig('contour.svg') 38 | ds = two_d 39 | # %% 40 | y_cls, x_cls, lin_fit = ds.single_cls(2, pr_range=20, pu_range=15, method='fit') 41 | 42 | # Plot the result 43 | artists = ds.plot.contour(2) 44 | ax = artists[0]['ax'] 45 | 46 | # First plot the maxima 47 | ax.plot(x_cls, y_cls, color='yellow', marker='o', markersize=3, lw=0) 48 | 49 | # Plot the resulting fit. Since the slope is a function of the pump frequencies, 50 | # we have to use y-values as x-coordinatetes for the slope. 51 | ax.plot(y_cls * lin_fit.slope + lin_fit.intercept, y_cls, color='w') 52 | # %% 53 | cls_result = two_d.cls(pr_range=15, pu_range=15) 54 | cls_result.plot_cls() 55 | two_d.t.shape 56 | # %% 57 | plt.plot(two_d.probe_wn, two_d.spec2d[1, :, two_d.pump_idx(2228)]) 58 | 59 | # %% 60 | -------------------------------------------------------------------------------- /skultrafast/kubo_fitting/kubo_fitter.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | import attr 3 | import numpy as np 4 | from scipy.constants import speed_of_light as c 5 | from skultrafast.unit_conversions import invps2cm, cm2invps 6 | from scipy.interpolate import interp2d 7 | 8 | 9 | def response_functions(g, coords, omega, domega, two_level=False): 10 | T1, t2, T3 = coords 11 | anh = 5 12 | if two_level: 13 | R_r = np.exp(-1j*omega*(-T1+T3))*np.exp(-g(T1)+g(t2) - 14 | g(T3)-g(T1+t2)-g(t2+T3)+g(T1+t2+T3)) 15 | R_nr = np.exp(-1j*omega*(-T1+T3))*np.exp(-g(T1)-g(t2) - 16 | g(T3)+g(T1+t2)+g(t2+T3)-g(T1+t2+T3)) 17 | else: 18 | gT1 = g(T1) 19 | gt2 = g(t2) 20 | gT3 = gT1.T 21 | gT1t2 = g(T1+t2) 22 | gt2T3 = gT1t2.T 23 | ga = g(T1+t2+T3) 24 | pop = (2-2*np.exp(-1j*anh*T3)) 25 | osc = np.exp(-1j*omega*(-T1+T3)) 26 | R_r = osc*np.exp(-gT1+gt2-gT3-gT1t2-gt2T3+ga)*pop 27 | R_nr = osc*np.exp(-gT1-gt2-gT3+gT1t2+gt2T3-ga)*pop 28 | 29 | R_r[:, 0] *= 0.5 30 | R_r.T[:, 0] *= 0.5 31 | R_nr[:, 0] *= 0.5 32 | R_nr.T[:, 0] *= 0.5 33 | return R_r, R_nr 34 | 35 | 36 | @attr.define 37 | class KuboFitter: 38 | t1: np.ndarray 39 | t2: np.ndarray 40 | probe_wn: np.ndarray 41 | rot_frame_freq: float 42 | ifgr: np.ndarray 43 | 44 | kubo_fcn: Callable 45 | 46 | def resample(self): 47 | freqs = self.t1 48 | out = np.zeros((self.t2.size, self.t1.size, self.t1.size)) 49 | 50 | for i, wt in enumerate(self.t2): 51 | intp = interp2d(self.t1, self.probe_wn, self.ifgr[i, :, :], kind='cubic') 52 | out[i, :, :] = intp() 53 | 54 | def simulate_ifg(self, anh, kubo_params): 55 | def f(t): 56 | return self.kubo_fcn(t, **kubo_params) 57 | 58 | response_functions(f, pass) 59 | -------------------------------------------------------------------------------- /docs/data_io.rst: -------------------------------------------------------------------------------- 1 | Importing Data 2 | ============== 3 | It is very easy to import data into skultrafast, since all what *skultrafast* 4 | requires are numpy arrays. There writing a loader function using Python should 5 | be straight forward. 6 | 7 | skultrafast also offer various methods for data pre-processing. Currently, the 8 | package focus is on working with data from *MessPy*, the software for 9 | controlling the experiments in our lab. 10 | 11 | A tutorial can be found under :ref:`sphx_glr_auto_examples_tutorial_messpy.py`. 12 | 13 | 14 | Working with MessPy-files 15 | ------------------------- 16 | MessPy saves the data in ``.npz`` file. All data preprocessing and averaging is 17 | now done via the `messpy.MessPyFile` class. 18 | 19 | The file format of MessPy is a ``.npz`` file, containing three arrays. The 20 | wavelength, the delay-times in fs and the data. The shape of the data-array is 21 | explained in my PhD-thesis. Loading MessPy is done via the `MessPyFile`-class. 22 | The constructor takes all necessary information to average the data down into 23 | datasets. 24 | 25 | This is done via `MessPyFile.average_scans`, which either returns a 26 | ``TimeResSpec`` or dict of ``TimeResSpec``'s. For data recorded after 2017, the 27 | following recipes should work. 28 | 29 | How to *load data from Vis-pump Vis-Probe data, not polarisation resolved?*:: 30 | 31 | mf = MessPyFile(file_name, valid_channel=0, is_pol_resolved=False) data_set 32 | = mf.average_scans() 33 | 34 | How to *load data from Vis-pump Vis-Probe data, polarisation resolved?* For that 35 | we need to know the polarisation of the first scan. The code assumes, that the 36 | polarization of is switched every scan:: 37 | 38 | mf = MessPyFile(file_name, valid_channel=0, is_pol_resolve=True, 39 | pol_first_scan='perp') #or 'para' 40 | data_set_dict = mf.average_scans() 41 | 42 | How to *load data from IR-Probe setup?* Same as above, but with 43 | ``valid_channel=1``, since the zeroth channel contains the unreferenced data. 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /skultrafast/examples/convolution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sympy notebook with the convolution 3 | =================================== 4 | In this notebook we use *sympy*, a CAS for python, to calculate the convolution of an 5 | gaussian with an one-sided decaying exponential. Since *sympy* is not an requirement of 6 | *skultrafast*, it may be manually installed. 7 | """ 8 | # %% 9 | import sympy 10 | from sympy import (symbols, Heaviside, exp, sqrt, oo, integrate, simplify, Eq, 11 | plot, pi, init_printing, solve) 12 | from sympy import erfc, erf 13 | init_printing() 14 | # %% 15 | # First, we need to define sympy symbols. 16 | 17 | A, t, ti = symbols('A t ti', real=True) 18 | tau, sigma = symbols('tau sigma', positive=True) 19 | step = Heaviside 20 | 21 | # %% [rsT] 22 | # Define :math:`y=A\exp(-t/\tau)` and the Gaussian IRF. 23 | 24 | # %% 25 | y = step(t) * A * exp(-t / tau) 26 | y 27 | 28 | # %% 29 | irf = 1 / sqrt(2 * pi * sigma**2) * exp(-t**2 / (2 * sigma**2)) 30 | irf 31 | 32 | # %% 33 | # Calculate the covolution integral. 34 | 35 | # %% 36 | func = integrate((irf.subs(t, t - ti) * y.subs(t, ti)), (ti, -oo, oo)) 37 | func = simplify(func) 38 | func 39 | 40 | # %% 41 | # Rewrite the `erf` with the `erfc` function: 42 | # erfc, erf = special.error_functions.erfc, special.error_functions.erf 43 | func2 = func.rewrite(erfc) 44 | func2 45 | 46 | # %% [markdown] 47 | # Plot the result for visualization: 48 | plot(func2.subs(sigma, 0.2).subs(tau, 2).subs(A, 1), (t, -1, 10)) 49 | 50 | # %% 51 | # Normalized derivatives of a Gaussian 52 | # ------------------------------------ 53 | # These are used to model coherent contributions. 54 | 55 | irf, irf.diff(t, 1), irf.diff(t, 2) 56 | 57 | p1 = None 58 | for i in range(0, 3): 59 | f = irf.diff(t, i) 60 | sol = solve(f.diff(t, 1), t) 61 | ext = f.subs(t, sol[0]) 62 | if not p1: 63 | p1 = plot((f/ext).subs(sigma, 1), (t, -4, 4), show=False) 64 | else: 65 | p1.append(plot((f/ext).subs(sigma, 1), (t, -4, 4), show=False)[0]) 66 | print((f/ext)) 67 | p1.show() 68 | # %% 69 | plot(irf.diff(t).subs(sigma, 0.2).subs(tau, 2), (t, -1, 1)) 70 | plot(irf.diff(t, 2).subs(sigma, 0.2).subs(tau, 2), (t, -1, 1)) 71 | -------------------------------------------------------------------------------- /skultrafast/base_funcs/base_functions_np.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Jul 31 15:38:47 2013 4 | 5 | @author: Tillsten 6 | """ 7 | import numpy as np 8 | from scipy.special import erfc 9 | import math 10 | sq2 = math.sqrt(2) 11 | 12 | 13 | def _fold_exp(tt, w, tz, tau): 14 | """ 15 | Returns the values of the folded exponentials for given parameters. 16 | 17 | Parameters 18 | ---------- 19 | tt: ndarray(N) 20 | Array containing the time-coordinates 21 | w: float 22 | The assumed width/sq2 23 | tz: float 24 | The assumed time zero. 25 | tau: ndarray(M) 26 | The M-decay rates. 27 | 28 | Returns 29 | ------- 30 | y: ndarray 31 | Folded exponentials for given taus. 32 | 33 | """ 34 | if w == 0: 35 | k = -1 / tau 36 | return np.exp((tt.reshape(tt.shape[0], tt.shape[1], 1), -tz) * k.reshape(1, 1, -1)) 37 | ws = w 38 | k = 1 / (tau[..., None, None]) 39 | t = (tt + tz).T[None, ...] 40 | y = np.exp(k * (ws * ws * k / (4.0) - t)) 41 | y *= 0.5 * erfc(-t / ws + ws * k / (2.0)) 42 | return y.T 43 | 44 | 45 | def _fold_exp_and_coh(t_arr, w, tz, tau_arr): 46 | a = _fold_exp(t_arr, w, tz, tau_arr) 47 | b = _coh_gaussian(t_arr, w, tz) 48 | return a, b 49 | 50 | 51 | exp_half = np.exp(0.5) 52 | 53 | 54 | def _coh_gaussian(t, w, tz): 55 | """Models artifacts proportional to a gaussian and it's derivatives 56 | 57 | Parameters 58 | ---------- 59 | t: ndarray 60 | Array containing the time-coordinates 61 | w: float 62 | The assumed width/sq2 63 | tz: float 64 | The assumed time zero. 65 | 66 | Returns 67 | ------- 68 | y: ndarray (len(t), 3) 69 | Array containing a gaussian and it the scaled derivatives, 70 | each in its own column. 71 | """ 72 | w = w / sq2 73 | tt = t + tz 74 | idx = (tt / w < 3.) 75 | y = np.where(idx, np.exp(-0.5 * (tt/w) * (tt/w)), 0) 76 | y = np.tile(y[..., None], (1, 1, 3)) 77 | tt = tt[idx] 78 | y[idx, ..., 1] *= (-tt * exp_half / w) 79 | y[idx, ..., 2] *= (tt*tt/w/w - 1) 80 | #y[idx,..., 2] *= (-tt ** 3 / w ** 6 + 3 * tt / w ** 4) 81 | return y 82 | -------------------------------------------------------------------------------- /skultrafast/spectrum.py: -------------------------------------------------------------------------------- 1 | from attr import dataclass, define, evolve, field 2 | import numpy as np 3 | 4 | from typing import Optional, Tuple, List 5 | import matplotlib.pyplot as plt 6 | 7 | def _default_fit_style(): 8 | return {'color': 'k', 'linewidth': 1} 9 | 10 | def _default_line_style(): 11 | return {} 12 | 13 | @dataclass 14 | class PlotOptions: 15 | xlabel: str = 'Wavenumber [cm-1]' 16 | ylabel: str = 'Absorption [OD]' 17 | fit_style: dict = field(factory=_default_fit_style) 18 | line_style: dict = field(factory=_default_line_style) 19 | 20 | @dataclass 21 | class Spectrum1D: 22 | x: np.ndarray 23 | y: np.ndarray 24 | y_baseline: Optional[np.ndarray] = None 25 | plot_ops: PlotOptions = field(factory=PlotOptions) 26 | 27 | def copy(self): 28 | cpy = evolve(self) 29 | cpy.x = self.x.copy() 30 | cpy.y = self.y.copy() 31 | 32 | def select(self, low: float = -np.inf, high: float = np.inf, invert=False): 33 | """ 34 | Selects a subrange of the spectrum. The range is defined by the 35 | low and high values. If invert is True, the outside of the range is selected. 36 | """ 37 | idx = (self.x >= low) & (self.x <= high) 38 | if invert: 39 | idx = ~idx 40 | return Spectrum1D(x=self.x[idx], y=self.y[idx]) 41 | 42 | def est_poly_baseline(self, poly_deg, region: Optional[Tuple[float, float]]=None, 43 | exclude: List[Tuple[float, float]]=[]): 44 | if region is None: 45 | idx = np.ones(self.x.size, dtype=bool) 46 | else: 47 | high = max(*region) 48 | low = min(*region) 49 | idx = np.where((self.x >= low) & (self.x <= high)) 50 | assert isinstance(idx, np.ndarray) 51 | for r in exclude: 52 | high = max(*r) 53 | low = min(*r) 54 | idx[((self.x >= low) & (self.x <= high))] = False 55 | x, y = self.x[idx], self.y[idx] 56 | coefs = np.polyfit(x, y, deg=poly_deg) 57 | yfit = np.polyval(coefs, self.x) 58 | assert isinstance(yfit, np.ndarray) 59 | self.y_baseline = yfit 60 | 61 | def plot(self, ax: Optional[plt.Axes]=None): 62 | if ax is None: 63 | ax = plt.gca() # type: plt.Axes 64 | ax.plot(self.x, self.y) 65 | if self.y_baseline is not None: 66 | ax.plot(self.x, self.y_baseline) 67 | -------------------------------------------------------------------------------- /tests/test_basefuncs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Apr 21 20:34:15 2013 4 | 5 | @author: Tillsten 6 | """ 7 | 8 | from skultrafast.base_funcs.base_functions_numba import fast_erfc, _fold_exp, _exp 9 | 10 | import skultrafast.base_funcs.base_functions_np as bnp 11 | import skultrafast.base_funcs.base_functions_numba as bnb 12 | 13 | from numpy.testing import assert_array_almost_equal 14 | import numpy as np 15 | import pytest 16 | 17 | 18 | def test_fast_erfc(): 19 | from scipy.special import erfc as erfc_s 20 | x = np.linspace(-3, 3, 200) 21 | y = np.array([fast_erfc(i) for i in x]) 22 | assert_array_almost_equal(erfc_s(x), y, 3) 23 | 24 | 25 | def test_exp(): 26 | taus = np.array([1., 20., 30.]) 27 | t_array = np.subtract.outer(np.linspace(0, 50, 300), np.linspace(0, 0, 400)) 28 | w = 0.1 29 | y = _exp(t_array, w, 0, taus) 30 | np.testing.assert_almost_equal(np.exp(-t_array[:, 0]), y[:, 0, 0]) 31 | 32 | 33 | def test_folded_equals_exp(): 34 | """ 35 | For t>>w exp==folded exp 36 | """ 37 | taus = np.array([1., 20., 30.]) 38 | t_array = np.subtract.outer(np.linspace(40, 50, 300), np.linspace(3, 3, 400)) 39 | w = 0.1 40 | y = _fold_exp(t_array, w, 0, taus) 41 | y2 = _fold_exp(t_array, w, 0, taus) 42 | exp_y = np.exp(-t_array[:, :, None] / taus[None, None, :]) 43 | np.testing.assert_array_almost_equal(y, exp_y) 44 | 45 | 46 | def test_compare_fold_funcs(): 47 | taus = np.array([1., 20., 30.]) 48 | t_array = np.subtract.outer(np.linspace(-2, 50, 300), np.linspace(-1, 3, 400)) 49 | w = 0.1 50 | y1 = bnp._fold_exp(t_array, w, 0, taus) 51 | y3 = bnb._fold_exp(t_array, w, 0, taus) 52 | np.testing.assert_array_almost_equal(y1, y3, 3) 53 | 54 | 55 | @pytest.mark.skip 56 | def test_compare_coh_funcs(): 57 | t_array = np.subtract.outer(np.linspace(-4, 4, 300), np.linspace(3, 3, 400)) 58 | w = 0.1 59 | y1 = bnb._coh_gaussian(t_array, w, 0.) 60 | y2 = bnp._coh_gaussian(t_array, w, 0.) 61 | np.testing.assert_array_almost_equal(y1, y2, 4) 62 | 63 | 64 | if __name__ == '__main__': 65 | print('jo1') 66 | test_compare_coh_funcs() 67 | print('jo') 68 | # import matplotlib.pyplot as plt 69 | 70 | # a = test_fold_exp() 71 | ## 72 | ## plt.plot(a[:, 0, :]) 73 | ## plt.show() 74 | # 75 | # b = test_exp() 76 | # print a.shape5 77 | # plt.plot(b[:, 9, :], lw=2) 78 | # plt.plot(a[:, 9, :], lw=2) 79 | # plt.show() 80 | 81 | # nose.run() 82 | -------------------------------------------------------------------------------- /skultrafast/nlo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module containing helpers for small calculation involing nonlinear optics 3 | """ 4 | # %% 5 | from skultrafast.unit_conversions import c 6 | from scipy.optimize import minimize_scalar 7 | import numpy as np 8 | 9 | def tl_pulse_from_nm(center_wl: float, fhwm: float, shape: str = 'gauss') -> float: 10 | """ 11 | Calculates the transformlimted pulselength in fs from given center 12 | wavelength and fwhm in nanometers. 13 | 14 | Parameters 15 | ---------- 16 | center_wl : float 17 | fhwm : float 18 | shape : str, 19 | optional, by default 'gauss' 20 | 21 | Returns 22 | ------- 23 | float 24 | 25 | """ 26 | if shape == 'gauss': 27 | tbw = 0.44 28 | elif shape == 'sech': 29 | tbw = 0.315 30 | 31 | return tbw / (c*1e9 * fhwm / center_wl**2)*1e15 32 | 33 | 34 | def pulse_length(t_in, phi_2): 35 | f = 4*np.log(2)*phi_2 / t_in**2 36 | t_out = t_in * np.sqrt(1+f**2) 37 | return t_out 38 | 39 | 40 | def dispersion(t_in, t_out): 41 | """ 42 | Estimates the amount of dispersion assuming form the pulse length a 43 | transform limited input pulse 44 | 45 | Parameters 46 | ---------- 47 | t_in : float 48 | [description] 49 | t_out : float 50 | [description] 51 | 52 | Returns 53 | ------- 54 | [type] 55 | [description] 56 | """ 57 | f = minimize_scalar(lambda x: (pulse_length(t_in, x) - t_out)**2) 58 | return f.x 59 | 60 | 61 | def dist(d, alpha=10): 62 | a = np.deg2rad(alpha) 63 | lot = np.cos(a/2)*d 64 | dist = np.sin(a)*d 65 | return dist*2 66 | tl = tl_pulse_from_nm(765, 20) 67 | dispersion(tl, 120)/54 68 | 25/dist(5) 69 | 70 | import matplotlib.pyplot as plt 71 | 72 | plt.figure(dpi=200) 73 | d = np.linspace(4, 20, 100) 74 | plt.plot(d, 2*np.floor(25.4/dist(d))) 75 | plt.setp(plt.gca(), xlabel='distance mirrors (mm)', ylabel='max. bounces') 76 | plt.annotate('Bounces at 10° AOI', (10, 25), fontsize='large') 77 | dist(280) 78 | 79 | 80 | # %% 81 | a = np.arange(1,7) 82 | for i in range(4): 83 | a = np.dstack((a, a)) 84 | a.shape 85 | # %% 86 | 6**4 87 | # %% 88 | import itertools 89 | # %% 90 | a = itertools.product(range(1, 7), repeat=3) 91 | ar = np.array(list(a)) 92 | ar = ar.sum(1) 93 | #plt.hist(ar.sum(1), bins=np.arange(3, 20), histtype='step', density=True) 94 | plt.step(np.arange(ar.max()+1), np.bincount(ar)/len(ar)) 95 | a = itertools.product(range(1, 7), repeat=4) 96 | ar = np.array(list(a)) 97 | ar = ar.sum(1)-ar.min() 98 | #plt.hist(ar.sum(1), bins=np.arange(3, 25), histtype='step') 99 | plt.step(np.arange(ar.max()+1), np.bincount(ar)/len(ar)) 100 | 101 | # %% 102 | itertools.co -------------------------------------------------------------------------------- /skultrafast/single_spectrum.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from skultrafast import dv, plot_helpers 3 | import matplotlib.pyplot as plt 4 | import lmfit 5 | 6 | 7 | class SingleSpec: 8 | def __init__(self, frequency, signal, unit_freq='nm', unit_signal='OD'): 9 | """ 10 | Class for working with steady state spectra. 11 | 12 | Parameters 13 | ---------- 14 | frequency : ndarray 15 | The frequencies of the spectrum 16 | signal : ndarray 17 | Array containing the signal. 18 | unit_freq : str 19 | Unit type of the frequencies. 20 | unit_signal : str 21 | Unit type of the signal. 22 | """ 23 | 24 | assert (frequency.shape[0] == signal.shape[0]) 25 | idx = np.argsort(frequency) 26 | self.x = frequency[idx] 27 | self.y = signal[idx, ...] 28 | self.unit_freq = unit_freq 29 | self.unit_signal = unit_signal 30 | self.back = np.zeros_like(self.x) 31 | self.fi = dv.make_fi(self.x) 32 | self.plot = SingleSpecPlotter(self) 33 | 34 | def subtract_const(self, region: tuple): 35 | """ 36 | Subtracts a constant background. The background is calculated by 37 | taking the mean signal in the designated region. 38 | 39 | Parameters 40 | ---------- 41 | region : tuple of two floats 42 | The borders of the region. 43 | """ 44 | a, b = region 45 | i1, i2 = self.fi(a), self.fi(b) 46 | self.back -= self.y[i1:i2, ...].mean(0) 47 | 48 | def cut(self, region, invert_sel=True): 49 | """ 50 | Cuts part of the spectrum away. 51 | 52 | Parameters 53 | ---------- 54 | region : tuple of floats 55 | Defines the region to be cutted away. 56 | invert_sel : bool 57 | If `True`, the cutted region is inverted. 58 | 59 | Returns 60 | ------- 61 | SingleSpec 62 | Cut spectra. 63 | 64 | """ 65 | a, b = region 66 | i1, i2 = self.fi(a), self.fi(b) 67 | new_x, new_y = self.x[i1:i2], self.y[i1:i2, ...] 68 | return SingleSpec(new_x, new_y, self.unit_freq, self.unit_signal) 69 | 70 | def fit_single_gauss(self, start_params=None, back_deg=2, 71 | peak_region=None): 72 | peak = lmfit.models.GaussianModel(x=self.x) 73 | back = lmfit.models.PolynomialModel(degree=back_deg) 74 | model = peak + back 75 | 76 | 77 | class SingleSpecPlotter: 78 | def __init__(self, single_spec: SingleSpec): 79 | self.ds = single_spec 80 | 81 | def spec(self, remove_back=True, ax=None): 82 | if ax is None: 83 | ax = plt.gca() 84 | ds = self.ds 85 | 86 | ax.plot(ds.x, ds.y - ds.back) 87 | ax.set_xlabel(ds.unit_freq) 88 | ax.set_ylabel(ds.unit_signal) 89 | ax.minorticks_on() 90 | -------------------------------------------------------------------------------- /docs/fitting.rst: -------------------------------------------------------------------------------- 1 | ******** 2 | Appendix 3 | ******** 4 | 5 | Mathematical details of the fitting procedure 6 | ============================================= 7 | 8 | This section will describe the algorithm used to do the exponential fit. It 9 | mostly follows prior work, with only some small modifications to increase the 10 | speed and the stability of the fitting. 11 | 12 | The fitting function 13 | -------------------- 14 | As its default, *skultrafast* assumes the traces have an gaussian IRF. 15 | Therefore, the data is described by the convolution of a sum of one-sided 16 | exponentials 17 | 18 | :math:`y(t, \omega)= \sum_i A(i, \omega) exp(-t/\tau_i) \Theta(t)`, 19 | 20 | with :math:`\Theta` being the Heaviside-function, and gaussian instrument 21 | response function (IRF): 22 | 23 | :math:`IRF(t) = \frac{1}{\sqrt{2 \pi \sigma}} \exp\left(-\frac{t^2}{2\sigma^2}\right)`. 24 | 25 | The result of the convolution 26 | 27 | :math:`y_{\textrm{conv}} = IRF \circledast y` 28 | 29 | can be expressed in terms of the complementary error-function `erfc`. Using 30 | sympy, the calculation is done in 'convolution.ipynb' notebook. Therefore, 31 | by default *skultrafast* fits the function 32 | 33 | .. math:: 34 | 35 | y(t, \omega)= A \exp(\frac{-t}{\tau_i}+\frac{\sigma^2}{2\tau_i^2})\frac{1}{2} erfc(\frac{\sigma}{\sqrt 2 \tau_i}-\frac{t}{\sqrt 2\sigma}) 36 | 37 | .. toctree:: 38 | :maxdepth: 1 39 | 40 | auto_examples/convolution 41 | 42 | 43 | Variable projection 44 | ------------------- 45 | For given :math:`\tau` and :math:`\omega`, the least squares problem is linear 46 | since the function is a sum of term where only the coefficients are unknown. 47 | Therefore we use our nonlinear functions as a basis matrix :math:`A_{ij} = 48 | y(t_i, \tau_j)`. The linear least-squares problem can be written as :math:`min_x 49 | |Ax-b|_2` and can be directly solved. The separation of the linear and 50 | non-linear parameters is also known as variable projection. 51 | 52 | Since the exponential function basis is numerically unstable, skultrafast uses 53 | L2-regularization by default. This is also called Tikhonov regularization or 54 | Rigde regression. It modifies the problem to :math:`min_x |Ax-b|_2+\alpha 55 | |x|_2`, alpha being small. 56 | 57 | Depending on how the dispersion is handled, we can accelerate these steps. First 58 | we will assume that each frequency was interpolated or binned on the same common 59 | time-points. Then we just have to calculate the matrix A and its pseudoinverse 60 | once. The coefficients c for a single channel :math:`b` are than just the dot 61 | product :math:`c = A_{PINV}b`. 62 | 63 | If the different frequencies don't share a common time-axis, the matrix A has 64 | and its pesudoinverse has to be calculated for every channel, which gets 65 | time-consuming for larger datasets. 66 | 67 | The advantage of the latter approach is that it allows for easier inclusion of 68 | the dispersion parameters to the fitting model. 69 | 70 | 71 | -------------------------------------------------------------------------------- /skultrafast/styles.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Sep 17 21:33:24 2015 4 | 5 | @author: Tillsten 6 | """ 7 | import matplotlib 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | 11 | tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), 12 | (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), 13 | (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), 14 | (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), 15 | (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] 16 | 17 | tableau20 = [(r/255., g/255., b/255.) for r,g,b, in tableau20] 18 | 19 | #plt.rcParams['savefig.dpi'] = 110 20 | #plt.rcParams['font.family'] = 'Vera Sans' 21 | 22 | out_ticks = {'xtick.direction': 'out', 23 | 'xtick.major.width': 1.5, 24 | 'xtick.minor.width': 1, 25 | 'xtick.major.size': 6, 26 | 'xtick.minor.size': 3, 27 | 'xtick.minor.visible': True, 28 | 'ytick.direction': 'out', 29 | 'ytick.major.width': 1.5, 30 | 'ytick.minor.width': 1, 31 | 'ytick.major.size': 6, 32 | 'ytick.minor.size': 3, 33 | 'ytick.minor.visible': True, 34 | 'axes.spines.top': False, 35 | 'axes.spines.right': False, 36 | 'text.hinting': True, 37 | 'axes.titlesize': 'xx-large', 38 | 'axes.titleweight': 'semibold', 39 | } 40 | 41 | 42 | plt.figure(figsize=(6,4)) 43 | 44 | with plt.style.context(out_ticks): 45 | ax = plt.subplot(111) 46 | x = np.linspace(0, 7, 1000) 47 | y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) 48 | l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') 49 | l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') 50 | l, = plt.plot(x, y, lw=1.1) 51 | #l.set_clip_on(0) 52 | plt.tick_params(which='both', top=False, right=False) 53 | plt.margins(0.01) 54 | ax.text(7, 1, r'$y(t)=\exp\left(-t/1.5\right)\cos(\omega_1t)\cos(\omega_2t)$', 55 | fontsize=18, va='top', ha='right') 56 | #plt.title("Hallo") 57 | plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') 58 | ax = plt.axes([0.57, 0.25, 0.3, .2]) 59 | #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) 60 | ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], 61 | abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') 62 | ax.set_xlim(0, 10) 63 | ax.set_xlabel("Frequency") 64 | ax.xaxis.labelpad = 1 65 | 66 | plt.locator_params(nbins=4) 67 | plt.tick_params(which='both', top=False, right=False) 68 | plt.tick_params(which='minor', bottom=False, left=False) 69 | 70 | #plt.grid(1, axis='y', linestyle='-', alpha=0.3, lw=.5) 71 | plt.show() 72 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_quickcontrol.py: -------------------------------------------------------------------------------- 1 | """ 2 | Importing and processing data from QuickControl. 3 | ================================================ 4 | """ 5 | 6 | # %% 7 | # Quickcontrol is a software from Phasetech to control and record data from 8 | # time-resolved measurements. The software saves the data in a folder containing 9 | # several files 10 | 11 | import numpy as np 12 | from skultrafast.quickcontrol import QC2DSpec 13 | from skultrafast import plot_helpers 14 | from skultrafast.data_io import get_twodim_dataset 15 | 16 | # %% 17 | # The following line returns a path to a folder containing the sample data. If 18 | # necessary, it will try downloading the data from the internet. 19 | 20 | p = get_twodim_dataset() 21 | 22 | # %% 23 | # Lets look at the content of the folder. For measurements with quickcontrol, we 24 | # are looking for `.info` files which contain all necessary information. 25 | 26 | infos = list(p.glob('*.info')) 27 | infos 28 | 29 | # There are two `.info`-files the directory. The first, index 319, contains the 30 | # transient 1D-data and the second (320) the transient 2D-data. Here in this 31 | # tutorial, we will work with the 2D data. Therefore we select the second file 32 | # and open it by instancing an `QC2DSpec`-class. Given the info-file, the class 33 | # collects all necessary data from the folder. It is also responsible to turn 34 | # the saved data, which are still inferogramms, into 2D-spectra. This process 35 | # also includes some preprocessing. Below we we apply 2 times upsampling of pump 36 | # axis and use 10 pixel left and right to estimate and subtract an background 37 | # before taking the FFT. For apodization, we are use the default hamming window. 38 | 39 | plot_helpers.enable_style() 40 | 41 | data2d_info_path = list(p.glob('*#320.info'))[0] 42 | qc_file = QC2DSpec(data2d_info_path, upsampling=4, 43 | probe_filter=1) 44 | 45 | # %% 46 | # To create a dataset to work with form the raw data, we call the `make_ds` 47 | # method. The method returns a dict of `TwoDim` objects to work with, containing 48 | # parallel (`para`), perpendicular (`perp`) and isotropic (`iso`) datasets. We 49 | # select the isotropic dataset. 50 | 51 | ds_all = qc_file.make_ds() 52 | ds_iso = ds_all['iso'] 53 | ds_iso.background_correction((2100, 2200), deg=1) 54 | ds_iso.pump_wn *= 2162.5 / 2159.35 # correct pump calibration 55 | 56 | # %% 57 | # One method to check if the polarization is correct is to plot 1D-spectra at 58 | # a early delay. The parallel spectrum should have a stronger signal. 59 | # The following plot shows the 1D-spectra at 0.5 ps delay. 60 | 61 | ds_all['para'].integrate_pump().plot.spec(0.5, add_legend=True) 62 | ds_all['perp'].integrate_pump().plot.spec(0.5, add_legend=True) 63 | 64 | # %% 65 | ds_iso.spec2d = ds_iso.spec2d.astype(np.float16) 66 | ds_iso.save_numpy("2D_example.npz") 67 | # %% 68 | ds2 = ds_iso.copy() 69 | ds2.spec2d = ds_iso.spec2d.astype(np.float16) 70 | # %% 71 | ds2.select_range((2100, 2200), (2100, 2200)).plot.contour(0.1) 72 | ds_iso.select_range((2100, 2200), (2100, 2200)).plot.contour(0.1) 73 | # %% 74 | -------------------------------------------------------------------------------- /skultrafast/unit_conversions.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains functions to covert between units of energy. 3 | """ 4 | import numpy as np 5 | from scipy.constants import physical_constants, c 6 | 7 | c_cm = c * 100 8 | names = dict( 9 | cm="wavenumbers in 1/cm", 10 | fs="period in femotoseconds", 11 | ps="period in picoseconds", 12 | nm="wavelength in nanometers", 13 | eV="energy in electron Volt", 14 | THz="frequency in THz", 15 | dichro="Dichroic ratio (para/perp)", 16 | angle="relative angle between transition dipole moments in degrees", 17 | aniso="Anisotropy (para-perp)/(para+2*perp)", 18 | kcal="energy in kcal/mol", 19 | invps="energy in inverse picoseconds", 20 | ) 21 | 22 | 23 | def make_doc(func): 24 | a, b = str.split(func.__name__, '2') 25 | func.__doc__ = ('%s to %s' % (names[a], names[b])).capitalize() 26 | return func 27 | 28 | 29 | @make_doc 30 | def fs2cm(t): 31 | return 1 / (t*1e-15*c_cm) 32 | 33 | 34 | @make_doc 35 | def cm2fs(cm): 36 | return 1e15 / (cm*c_cm) 37 | 38 | 39 | @make_doc 40 | def ps2cm(t): 41 | return 1 / (t*1e-12*c_cm) 42 | 43 | 44 | @make_doc 45 | def cm2ps(cm): 46 | return 1e12 / (cm*c_cm) 47 | 48 | 49 | @make_doc 50 | def nm2cm(nm): 51 | return 1e7 / nm 52 | 53 | 54 | @make_doc 55 | def cm2nm(cm): 56 | return 1e7 / cm 57 | 58 | 59 | @make_doc 60 | def cm2eV(cm): 61 | eV_m = physical_constants['electron volt-inverse meter relationship'][0] 62 | eV_cm = eV_m / 100 63 | return cm / eV_cm 64 | 65 | 66 | @make_doc 67 | def eV2cm(eV): 68 | eV_m = physical_constants['electron volt-inverse meter relationship'][0] 69 | eV_cm = eV_m / 100 70 | return eV * eV_cm 71 | 72 | 73 | @make_doc 74 | def THz2eV(THz): 75 | hertz_eV = physical_constants['hertz-electron volt relationship'][0] 76 | return THz * 1e12 * hertz_eV 77 | 78 | 79 | @make_doc 80 | def eV2THz(eV): 81 | eV_Hertz = physical_constants['electron volt-hertz relationship'][0] 82 | return eV * eV_Hertz * 1e-12 83 | 84 | 85 | @make_doc 86 | def cm2THz(cm): 87 | return 1 / fs2cm(cm) / 1e-3 88 | 89 | 90 | @make_doc 91 | def THz2cm(THz): 92 | return cm2fs(1e3 / THz) 93 | 94 | 95 | @make_doc 96 | def dichro2angle(d): 97 | return np.arccos(np.sqrt((2*d - 1) / (d+2))) / np.pi * 180 98 | 99 | 100 | @make_doc 101 | def angle2dichro(deg): 102 | rad = np.deg2rad(deg) 103 | return (1 + 2 * np.cos(rad)**2) / (2 - np.cos(rad)**2) 104 | 105 | 106 | @make_doc 107 | def angle2aniso(deg): 108 | ang = np.deg2rad(deg) 109 | return 2 / 5 * (3 * np.cos(ang)**2 - 1) / 2 110 | 111 | 112 | @make_doc 113 | def aniso2angle(r): 114 | return np.arccos(np.sqrt((r*10/2 + 1) / 3)) / np.pi * 180 115 | 116 | 117 | @make_doc 118 | def cm2kcal(cm): 119 | return cm * 2.859e-3 120 | 121 | 122 | @make_doc 123 | def kcal2cm(kcal): 124 | return kcal / 2.859e-3 125 | 126 | 127 | @make_doc 128 | def invps2cm(invps): 129 | return 1 / (invps*1e-12*c) 130 | 131 | 132 | @make_doc 133 | def cm2invps(cm): 134 | return 1 / (cm*1e-12*c) 135 | -------------------------------------------------------------------------------- /skultrafast/pfid_fitter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from skultrafast.utils import pfid_r4, pfid_r6, pfid 3 | import lmfit 4 | from dataclasses import dataclass, field 5 | import typing as T 6 | 7 | from skultrafast.dataset import TimeResSpec, PolTRSpec 8 | from skultrafast.unit_conversions import dichro2angle, angle2dichro 9 | 10 | import numba 11 | 12 | 13 | @numba.vectorize 14 | def mexp(x): 15 | return np.exp(x) 16 | 17 | 18 | @dataclass 19 | class PFID_Fitter: 20 | ds: PolTRSpec 21 | params: lmfit.Parameters = field(default_factory=lmfit.Parameters) 22 | num_peaks: int = 0 23 | alpha: float = 0 24 | 25 | def start_fit(self): 26 | if 't0' not in self.params: 27 | self.params.add("t0", 0, vary=False) 28 | mini = lmfit.Minimizer(self.eval, self.params.copy()) 29 | 30 | fr = mini.least_squares(diff_step=0.001) 31 | print(fr) 32 | # self.params = fr.params 33 | fr.minimizer = mini 34 | return fr 35 | 36 | def add_pfid( 37 | self, A: float, x0: float, T2: float, angle: float, B: float, shift: float 38 | ): 39 | i = self.num_peaks 40 | items = zip("A x0 T2 angle B shift".split(" "), (A, x0, T2, angle, B, shift)) 41 | for name, val in items: 42 | maxval, minval = np.inf, -np.inf 43 | if name == "T2": 44 | minval = 0.3 45 | maxval = 2 46 | if name == "angle": 47 | minval = 0 48 | maxval = 90 49 | if name == 'B': 50 | minval = 0 51 | maxval = 1 52 | if name == 'A': 53 | maxval = 0 54 | self.params.add(f"{name}_{i}", val, min=minval, max=maxval) 55 | 56 | self.num_peaks += 1 57 | 58 | def eval(self, params=None, residual=True, t=None, wn=None): 59 | # print(params.values()) 60 | if params is None: 61 | params = self.params 62 | 63 | if t is None: 64 | t = self.ds.t 65 | if 't0' in params: 66 | t = t - params['t0'].value 67 | if wn is None: 68 | wn = self.ds.wavenumbers 69 | vals = np.array(list(params.valuesdict().values()), dtype="f") 70 | vals = vals[:-1].reshape(self.num_peaks, 6) 71 | 72 | dshape = self.ds.iso.data.shape 73 | out_pa = np.zeros((*dshape, self.num_peaks)) 74 | out_pe = np.zeros_like(self.ds.iso.data) 75 | alpha = 0 76 | 77 | # for i in vals: 78 | # A, x0, T2, angle, B, shift = i 79 | 80 | A, x0, T2, angle, B, shift = vals.T 81 | B = -B * A 82 | dichro = angle2dichro(angle) 83 | pe = A * pfid_r4(-t, wn, x0, T2) 84 | pe += B * pfid_r6(-t, wn, x0, shift, T2) 85 | pa = dichro * pe 86 | 87 | out_pe = pe.sum(-1) 88 | out_pa = pa.sum(-1) 89 | alpha = np.sum(A * A) + np.sum(B * B) 90 | if residual: 91 | out_pa -= self.ds.para.data 92 | out_pe -= self.ds.perp.data 93 | out = np.hstack((out_pa, out_pe)) 94 | if self.alpha > 0: 95 | out = np.hstack((out.ravel(), alpha / self.num_peaks * self.alpha)) 96 | return out 97 | else: 98 | return pa, pe 99 | -------------------------------------------------------------------------------- /skultrafast/base_funcs/backend_tester.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numba as nb 3 | from scipy.special import erfc 4 | 5 | from math import erfc 6 | import time 7 | from collections import namedtuple 8 | import matplotlib.pyplot as plt 9 | 10 | @nb.vectorize(fastmath=True) 11 | def my_erfc(x): 12 | return erfc(x) 13 | 14 | 15 | BenchResult = namedtuple('BenchResult', 'all mean std') 16 | 17 | 18 | def benchmark(func, ta_shape=(1000, 400), N=100): 19 | t_array = np.subtract.outer(np.linspace(-1, 50, ta_shape[0]), 20 | np.linspace(-3, 3, ta_shape[1])) 21 | w = 0.1 22 | taus = np.array([0.1, 2, 10, 1000]) 23 | #Run once for jit 24 | func(t_array, w, 0., taus) 25 | out = [] 26 | for i in range(N): 27 | t = time.time() 28 | func(t_array, w, 0., taus) 29 | out.append(time.time() - t) 30 | out = np.array(out) 31 | mean = out.mean() 32 | std = out.std() 33 | return BenchResult(out, mean, std) 34 | 35 | 36 | def fast_erfc(x): 37 | """ 38 | Calculates the erfc near zero faster than 39 | the libary function, but has a bigger error, which 40 | is not a problem for us. 41 | 42 | Parameters 43 | ---------- 44 | x: float 45 | The array 46 | 47 | Returns 48 | ------- 49 | ret: float 50 | The erfc of x. 51 | """ 52 | a1 = 0.278393 53 | a2 = 0.230389 54 | a3 = 0.000972 55 | a4 = 0.078108 56 | smaller = x < 0 57 | if smaller: 58 | x = x * -1. 59 | bot = 1 + a1 * x + a2 * x * x + a3 * x * x * x + a4 * x * x * x * x 60 | ret = 1. / (bot * bot * bot * bot) 61 | 62 | if smaller: 63 | ret = -ret + 2. 64 | 65 | return ret 66 | 67 | 68 | my_erfc = nb.vectorize(fast_erfc) 69 | 70 | 71 | def _fold_exp(tt, w, tz, tau): 72 | """ 73 | Returns the values of the folded exponentials for given parameters. 74 | 75 | Parameters 76 | ---------- 77 | tt: ndarray(N) 78 | Array containing the time-coordinates 79 | w: float 80 | The assumed width/sq2 81 | tz: float 82 | The assumed time zero. 83 | tau: ndarray(M) 84 | The M-decay rates. 85 | 86 | Returns 87 | ------- 88 | y: ndarray(N,M) 89 | Folded exponential for given taus. 90 | 91 | """ 92 | ws = w 93 | k = 1 / tau 94 | k = k.reshape(tau.size, 1, 1) 95 | 96 | t = (tt + tz).T.reshape(1, tt.shape[1], tt.shape[0]) 97 | y = np.zeros_like(t) 98 | idx = (np.abs(t/w) < 3) 99 | y[idx] = np.exp(k * (ws * ws * k / (4.0) - t[idx])) * 0.5 * my_erfc(-t[idx] / ws + ws * k / (2.0)) 100 | #y = np.exp(k * (ws * ws * k / (4.0) - t)) 101 | #y *= 0.5 * my_erfc(-t / ws + ws * k / (2.0)) 102 | return y.T 103 | 104 | 105 | jit1 = nb.njit(_fold_exp, parallel=True, fastmath=True) 106 | jit2 = nb.njit(_fold_exp, parallel=False, fastmath=True) 107 | jit3 = nb.njit(_fold_exp, parallel=True, fastmath=False) 108 | jit4 = nb.njit(_fold_exp, parallel=False, fastmath=False) 109 | 110 | from base_functions_numba import _fold_exp as jit5 111 | 112 | plt.figure() 113 | for i, j in enumerate([jit1, jit2, jit3, jit4, jit5]): 114 | for N in np.geomspace(10, 1000, 5): 115 | res_jit = benchmark(j, ta_shape=(300, int(N)), N=30) 116 | plt.plot(np.ones_like(res_jit.all) * N, 117 | res_jit.all, 118 | 'x', 119 | c='C' + str(i)) 120 | 121 | plt.show() 122 | -------------------------------------------------------------------------------- /skultrafast/lifetimemap.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function 3 | import numpy as np 4 | from typing import Tuple, List, Iterable 5 | from sklearn import linear_model as lm 6 | from skultrafast.base_funcs.base_functions_np import _fold_exp, _coh_gaussian 7 | 8 | 9 | def _make_base( 10 | tup, 11 | taus: np.ndarray, 12 | w=0.1, 13 | add_coh: bool = True, 14 | add_const: bool = False, 15 | norm: bool = False, 16 | ): 17 | if add_const: 18 | taus = np.hstack((taus, 1000000)) # type: ignore 19 | out: np.ndarray = _fold_exp(tup.t.T[:, None], w, 0, taus[None, :]).squeeze() 20 | if add_const: 21 | out[:, -1] *= 1000 22 | if add_coh: 23 | out = np.hstack((out, _coh_gaussian(tup.t.T[:, None], w, 0).squeeze())) * 10 # type: ignore 24 | if norm: 25 | out = out / np.abs(out).max(0) 26 | 27 | return out.squeeze() 28 | 29 | 30 | def start_ltm( 31 | tup, 32 | taus, 33 | w=0.1, 34 | add_coh=False, 35 | use_cv=False, 36 | add_const=False, 37 | verbose=False, 38 | **kwargs, 39 | ): 40 | """Calculate the lifetime density map for given data. 41 | 42 | Parameters 43 | ---------- 44 | tup : datatuple 45 | tuple with wl, t, data 46 | taus : list of floats 47 | Used to build the basis vectors. 48 | w : float, optional 49 | Used sigma for calculating the , by default 0.1. 50 | add_coh : bool, optional 51 | If true, coherent contributions are added to the basis. 52 | By default False. 53 | use_cv : bool, optional 54 | Whether to use cross-validation, by default False 55 | add_const : bool, optional 56 | Whether to add an explict constant, by default False 57 | verbose : bool, optional 58 | Wheater to be verobse, by default False 59 | 60 | Returns 61 | ------- 62 | tuple of (linear_model, coefs, fit, alphas) 63 | The linear model is the used sklearn model. Coefs is the arrary 64 | of the coefficents, fit contains the resulting fit and alphas 65 | is an array of the applied alpha value when using cv. 66 | """ 67 | 68 | X = _make_base(tup, taus, w=w, add_const=add_const, add_coh=add_coh) 69 | if not use_cv: 70 | mod = lm.ElasticNet(**kwargs, l1_ratio=0.98) 71 | 72 | else: 73 | mod = lm.ElasticNetCV(**kwargs, l1_ratio=0.98) 74 | 75 | mod.fit_intercept = not add_const 76 | mod.warm_start = True 77 | 78 | coefs = np.empty((X.shape[1], tup.data.shape[1])) 79 | fit = np.empty_like(tup.data) 80 | alphas = np.empty(tup.data.shape[1]) 81 | 82 | for i in range(tup.data.shape[1]): 83 | if verbose: 84 | print(i, "ha", end=";") 85 | mod.fit(X, tup.data[:, i]) 86 | coefs[:, i] = mod.coef_.copy() 87 | fit[:, i] = mod.predict(X) 88 | if hasattr(mod, "alpha_"): 89 | alphas[i] = mod.alpha_ 90 | return mod, coefs, fit, alphas 91 | 92 | 93 | def start_ltm_multi(tup, taus, w=0.1, alpha=0.001, **kwargs): 94 | X = _make_base(tup, taus, w=w) 95 | mod = lm.MultiTaskElasticNet(alpha=alpha, **kwargs) 96 | mod.max_iter = 50_000 97 | mod.verbose = True 98 | mod.fit_intercept = False 99 | mod.normalize = True 100 | mod.fit(X, tup.data) 101 | 102 | fit = mod.predict(X) 103 | coefs = mod.coef_ 104 | return mod, coefs, fit, None 105 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | 2 | .. image:: https://github.com/Tillsten/skultrafast/raw/master/docs/_static/skultrafast_logo_v1.svg 3 | :alt: ***skultrafast*** 4 | 5 | .. image:: https://readthedocs.org/projects/skultrafast/badge/?version=latest 6 | :target: https://skultrafast.readthedocs.io/en/latest/?badge=latest 7 | :alt: Documentation Status 8 | 9 | .. image:: https://github.com/Tillsten/skultrafast/workflows/Python%20package/badge.svg 10 | :target: https://github.com/Tillsten/skultrafast/actions?query=workflow%3A%22Python+package%22 11 | 12 | .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.5713589.svg 13 | :target: https://doi.org/10.5281/zenodo.5713589 14 | 15 | What is skultrafast? 16 | -------------------- 17 | skultrafast is a Python package for ultrafast spectroscopy data analysis. It provides tools 18 | for analyzing various types of time-resolved spectroscopy data, including 19 | pump-probe, transient absorption, and two-dimensional infrared (2D-IR) 20 | spectroscopy. The package includes functionality for data import, visualization, 21 | processing, and fitting. It is built on top of the scientific Python ecosystem, 22 | including NumPy, SciPy, and Matplotlib. 23 | 24 | The latest version of the package is available on `github `_. A build of the documentation can be found at `Read 26 | the docs `_. The documentation 27 | includes `Installtion notes 28 | `_. 29 | 30 | 31 | Funding 32 | ------- 33 | The package was created and is maintained by *Till Stensitzki*. The package was 34 | created while being employed in the `Heyne group `_ and was therefore founded by the DFG via `SFB 36 | 1078 `_ and `SFB 1114 `_. Recent development 37 | focussed on 2D-spectroscopy is part of my stay in the `Ultrafast Structual Dynamics 38 | Group `_ in Potsdam under Müller-Werkmeister. 39 | 40 | 41 | Scope of the project 42 | -------------------- 43 | I like to include any kind of algorithm or data structure which comes up in 44 | ultrafast spectropy. I am also open to add a graphical interface to the package, 45 | but as experience shows, a GUI brings in a lot of maintenance burden. Hence, the 46 | first target is a interactive data-explorer for the jupyter notebook. 47 | 48 | This package also tries its best to follow modern software practices. This 49 | includes version control using *git*, continues integration testing via 50 | github action and a decent documentation hosted on `Read the docs`. 51 | 52 | Features 53 | -------- 54 | The current releases centers around working with time-resolved spectra: 55 | 56 | * Generate publication-ready plots with minimal effort. 57 | * Perform global fitting of transient data, including DAS, SAS, and 58 | compartment modeling. 59 | * Analyze polarization-resolved datasets. 60 | * Easily process data by selecting, filtering, and recalibrating it. 61 | * Correct dispersion automatically in the case of chirped spectra. 62 | * Obtain accurate error estimates for fitting results using lmfit 63 | _. 64 | * Analyze lifetime-density using regularized regression. 65 | * Analyze 2D spectroscopy data, including CLS-decay, diagonal extraction, 66 | pump-slice-amplitude spectrum, integration, and Gaussian fitting. 67 | 68 | Users 69 | ----- 70 | 71 | At the moment it is mostly me and other people in my groups. I would be happy 72 | if anyone would like to join the project! 73 | 74 | Citiation 75 | --------- 76 | If you use this package in your research, please cite the zenodo entry at the top. 77 | 78 | 79 | License 80 | ======= 81 | Standard BSD-License. See the LICENSE file for more details. -------------------------------------------------------------------------------- /skultrafast/kubo_fitting/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # %% 3 | import numba 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | from scipy.constants import c 8 | from sympy import OmegaPower 9 | 10 | from skultrafast.unit_conversions import invps2cm, cm2invps 11 | 12 | print(cm2invps(1700)) 13 | cm2invps = c*1e-12 14 | invps2cm = 1/cm2invps 15 | 1/(1700 * cm2invps) 16 | 17 | # %% 18 | Δω = 5 19 | τ = 1 20 | ω = 3 21 | Delta = 5 22 | 23 | t2 = 0.3 24 | dt = 0.1 25 | n_t = 256 26 | n_zp = n_t*2 27 | t = np.arange(0, n_t)*dt 28 | 29 | λ = 1/τ 30 | 31 | two_level = False 32 | 33 | print(f'Δω / λ = {Δω / λ}') 34 | print(f'Δω = {Δω} ps^-1') 35 | print(f'λ = {λ} ps^-1') 36 | 37 | 38 | # %% 39 | 40 | 41 | T1, T3 = np.meshgrid(t, t,) 42 | 43 | 44 | @numba.jit 45 | def g(t, dom, lam): 46 | return (dom / lam)**2 * (np.exp(-lam*t)-1 + lam*t) 47 | 48 | 49 | fig, ax = plt.subplots() 50 | ax.plot(t, np.exp(-g(t, dom=5, lam=1/5))) 51 | ax.set(xlim=(0, 2), xlabel="t / ps") 52 | 53 | # %% 54 | 55 | coods = tuple(T1, ) 56 | 57 | 58 | def response_functions(f, **kwargs): 59 | #coords = T1, t2, T3 60 | anh = 5 61 | def g(x): return f(x, Δω, λ) 62 | if two_level: 63 | R_r = np.exp(-1j*ω*(-T1+T3))*np.exp(-g(T1)+g(t2) - 64 | g(T3)-g(T1+t2)-g(t2+T3)+g(T1+t2+T3)) 65 | R_nr = np.exp(-1j*ω*(-T1+T3))*np.exp(-g(T1)-g(t2) - 66 | g(T3)+g(T1+t2)+g(t2+T3)-g(T1+t2+T3)) 67 | else: 68 | gT1 = g(T1) 69 | gt2 = g(t2) 70 | gT3 = gT1.T 71 | gT1t2 = g(T1+t2) 72 | gt2T3 = gT1t2.T 73 | ga = g(T1+t2+T3) 74 | pop = (2-2*np.exp(-1j*anh*T3)) 75 | R_r = np.exp(-1j*ω*(-T1+T3))*np.exp(-gT1+gt2-gT3-gT1t2-gt2T3+ga)*pop 76 | R_nr = np.exp(-1j*ω*(T1+T3))*np.exp(-gT1-gt2-gT3+gT1t2+gt2T3-ga)*pop 77 | R_r[:, 0] *= 0.5 78 | R_r.T[:, 0] *= 0.5 79 | R_nr[:, 0] *= 0.5 80 | R_nr.T[:, 0] *= 0.5 81 | return R_r, R_nr 82 | 83 | 84 | def response_to_spec(R_r, R_nr): 85 | fR_r = np.fft.fft2(R_r, s=(n_zp, n_zp)) 86 | fR_nr = np.fft.fft2(R_nr, s=(n_zp, n_zp)) 87 | 88 | 89 | R_r, R_nr = response_functions(g) 90 | 91 | # %% 92 | 93 | # %% 94 | fig, ax = plt.subplots(2, sharex='all', sharey='all', figsize=(4, 4)) 95 | ax[0].pcolormesh(R_r.real, rasterized=True) 96 | ax[1].pcolormesh(R_nr.real, rasterized=True) 97 | plt.setp(ax[1], xlim=(0, 50), ylim=(0, 50)) 98 | 99 | # %% 100 | fig, ax = plt.subplots(2, sharex='all', sharey='all', 101 | figsize=(8, 8), subplot_kw=dict(aspect=1), dpi=150) 102 | fR_r = np.fft.fft2(R_r, s=(n_zp, n_zp)) 103 | fR_nr = np.fft.fft2(R_nr, s=(n_zp, n_zp)) 104 | 105 | ax[0].contourf(np.fft.fftshift(fR_r.real), 20, cmap='bwr') 106 | ax[1].contourf(np.fft.fftshift(fR_nr.real), 20, cmap='bwr') 107 | m = t.size 108 | plt.setp(ax[1], xlim=(t.size-m/2, t.size+m/2), ylim=(t.size-m/2, t.size+m/2)) 109 | 110 | # %% 111 | R = np.fft.fftshift(np.real(fR_r+fR_nr)) 112 | #R += np.flipud(np.fliplr(R)) 113 | 114 | fig, ax = plt.subplots() 115 | ax.contourf(np.fft.fftshift(np.fliplr(fR_r)+fR_nr).real.T, 20, cmap='bwr') 116 | plt.setp(ax, xlim=(t.size-m/2, t.size+m/2), ylim=(t.size-m/2, t.size+m/2)) 117 | 118 | # %% 119 | 120 | # # %% 121 | # import sympy as s 122 | # from sympy.functions import Heaviside 123 | # T1, T2, T3, t = s.symbols('T1 T2 T3 t', real=True) 124 | # dw, w, lam = s.symbols('domega omega lambda', positive=True) 125 | # g = s.Lambda(t, (dw/lam)**2 * (s.exp(-lam*t)-1+lam*t)) 126 | 127 | # R_r = s.exp(-g(T1)+g(T2)-g(T3)-g(T1+T2)-g(T2+T3)+g(T1+T2+T3)) 128 | # R_nr = s.exp(-g(T1)-g(T2)-g(T3)+g(T1+T2)+g(T2+T3)-g(T1+T2+T3)) 129 | # rot = s.exp(-1j*w*(-T1+T3)) 130 | # s.simplify(R_r+R_nr) 131 | # 3#.integrals.fourier_transform(Heaviside(T1)*R_r) 132 | # # %% 133 | # from sympy.integrals.transforms import fourier_transform 134 | # fourier_transform(Heaviside(T2)*R_r, T2, w) 135 | # # %% 136 | 137 | # %% 138 | 139 | # %% 140 | -------------------------------------------------------------------------------- /skultrafast/pseudo_zernike/poly.py: -------------------------------------------------------------------------------- 1 | import dataclasses as dc 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | import numba as nb 6 | 7 | from math import factorial 8 | 9 | from functools import lru_cache 10 | 11 | @lru_cache(maxsize=None) 12 | def factor(n, l, k): 13 | nummerator = (-1) ** k * factorial(2*n + 1 - k) 14 | denummerator = ( 15 | factorial(k) 16 | * factorial(n + abs(l) + 1 - k) 17 | * factorial(n - abs(l) - k) 18 | ) 19 | return nummerator // denummerator 20 | 21 | 22 | @nb.njit(fastmath=True) 23 | def _build_cache(r, om, n): 24 | poly = r[None, :, :] ** np.arange(n + 1)[:, None, None] 25 | ex = np.exp(1j * np.arange(n + 1)[:, None, None] * om[None, :, :]) 26 | return poly, ex 27 | 28 | def S(n, l, rho): 29 | fac = np.math.factorial 30 | out = np.zeros_like(rho) 31 | for k in range((n - abs(l)) + 1): 32 | num = (-1) ** k * fac(2 * n - k) 33 | denum = fac(k) * fac(n + abs(l) + 1 - k) * fac(n - abs(l) - k) 34 | out += (num / denum)* rho ** (n - k) 35 | return out 36 | 37 | def W(n, m, rho, phi): 38 | return S(n, m, rho) * np.exp(1j * m * phi) 39 | 40 | @dc.dataclass 41 | class Polybase: 42 | """ 43 | Class for generating pseudo-Zernike polynomials. 44 | 45 | Parameters 46 | ---------- 47 | x : np.ndarray 48 | x-axis 49 | y : np.ndarray 50 | y-axis 51 | n : int 52 | Maximum order of the polynomials 53 | 54 | 55 | Attributes 56 | ---------- 57 | r : np.ndarray 58 | The radial coordinate 59 | om : np.ndarray 60 | The azimuthal coordinate 61 | polybase : np.ndarray 62 | The polynomial base 63 | 64 | """ 65 | x: np.ndarray 66 | y: np.ndarray 67 | n: int 68 | r: np.ndarray = dc.field(init=False) 69 | om: np.ndarray = dc.field(init=False) 70 | polybase: np.ndarray = dc.field(init=False) 71 | val_dict: dict[tuple[int, int], int] = dc.field(init=False) 72 | 73 | _polycache: np.ndarray = dc.field(init=False) 74 | _expcache: np.ndarray = dc.field(init=False) 75 | 76 | def __post_init__(self): 77 | x, y = np.meshgrid(self.x, self.y) 78 | self.r = np.sqrt(x**2 + y**2) 79 | self.r = np.where(self.r < 1, self.r, np.nan) 80 | self.om = np.arctan2(y, x) 81 | self._polycache, self._expcache = _build_cache(self.r, self.om, self.n) 82 | self._Rcache = dict() 83 | self.val_dict = dict() 84 | 85 | def make_base(self): 86 | l = [] 87 | l.append(np.where(self.r < 1, 1.0, np.nan)) 88 | self.val_dict[(0, 0)] = 0 89 | for n in range(1, self.n + 1): 90 | for m in range(0, n + 1): 91 | arr = self._V(n, m) 92 | l.append(arr.real) 93 | self.val_dict[(n, m)] = len(l) - 1 94 | if m > 0: 95 | l.append(arr.imag) 96 | self.val_dict[(n, -m)] = len(l) - 1 97 | return np.array(l) 98 | 99 | def _R(self, n, m): 100 | R = np.zeros(self.r.shape) 101 | hit, not_hit = 0, 0 102 | for s in range((n - abs(m)) + 1): 103 | if (n, m, s) not in self._Rcache: 104 | self._Rcache[(n, m, s)] = factor(n, m, s) * \ 105 | self._polycache[n - s] 106 | not_hit += 1 107 | else: 108 | hit += 1 109 | R += self._Rcache[(n, m, s)] 110 | return R 111 | 112 | def _V(self, n, m): 113 | assert n >= 0 114 | assert n - abs(m) >= 0 115 | a = self._R(n, m) * self._expcache[m] 116 | return a 117 | 118 | def plot_pz(self, n, m, mode="real", offset=(0, 0)): 119 | plt.gca().set_aspect("equal") 120 | if mode == "real": 121 | plt.pcolormesh( 122 | self.x + offset[0], self.y + offset[1], np.real(self._V(n, m)) 123 | ) 124 | elif mode == "imag": 125 | plt.pcolormesh( 126 | self.x + offset[0], self.y + offset[1], np.imag(self._V(n, m)) 127 | ) 128 | 129 | -------------------------------------------------------------------------------- /skultrafast/kinetic_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | This module helps to build a transfer matrix and applying it to an 4 | DAS. 5 | """ 6 | import sympy 7 | import numpy as np 8 | import scipy.linalg as la 9 | from typing import List 10 | import numbers 11 | 12 | class Transition(object): 13 | """ 14 | Represents a transtion between comparments. 15 | """ 16 | def __init__(self, from_comp, to_comp, rate=None, qy=None): 17 | if rate is None: 18 | self.rate = sympy.Symbol(from_comp + '_' + to_comp, real=True, positive=True) 19 | else: 20 | self.rate = sympy.Symbol(rate, real=True, positive=True) 21 | self.from_comp = from_comp 22 | self.to_comp = to_comp 23 | if qy is None: 24 | qy = 1 25 | if isinstance(qy, str): 26 | qy = sympy.Symbol(qy, real=True, positive=True) 27 | self.qu_yield = qy 28 | 29 | 30 | class Model(object): 31 | """ 32 | Helper class to make a model 33 | """ 34 | def __init__(self): 35 | self.transitions: List[Transition] = [] 36 | 37 | def add_transition(self, from_comp, to_comp, rate=None, qy=None): 38 | """ 39 | Adds an transition to the model. 40 | 41 | Parameters 42 | ---------- 43 | from_comp : str 44 | Start of the transition 45 | to_comp : str 46 | Target of the transition 47 | rate : str, optional 48 | Name of the associated rate, by default None, which generates a 49 | default name. 50 | qy : str of float, optional 51 | The yield of the transition, by default 1 52 | """ 53 | 54 | trans = Transition(from_comp, to_comp, rate, qy) 55 | self.transitions.append(trans) 56 | 57 | def build_matrix(self): 58 | """ 59 | Builds the n x n k-matrix 60 | """ 61 | comp = get_comparments(self.transitions) 62 | idx_dict = dict(enumerate(comp)) 63 | inv_idx = dict(zip(idx_dict.values(), idx_dict.keys())) 64 | mat = sympy.zeros(len(comp)) 65 | for t in self.transitions: 66 | i = inv_idx[t.from_comp] 67 | mat[i, i] = mat[i, i] - t.rate * t.qu_yield 68 | if t.to_comp != 'zero': 69 | mat[inv_idx[t.to_comp], i] += t.rate * t.qu_yield 70 | 71 | self.mat = mat 72 | return mat 73 | 74 | def build_mat_func(self): 75 | # Use dict as an ordered set 76 | rates = {t.rate: None for t in self.transitions}.keys() 77 | yields = (t.qu_yield for t in self.transitions 78 | if not isinstance(t.qu_yield, numbers.Number)) 79 | params = list(rates) + list(yields) 80 | K = self.build_matrix() 81 | K_func = sympy.lambdify(params, K) 82 | return K_func 83 | 84 | def get_compartments(self): 85 | return get_comparments(self.transitions) 86 | 87 | def make_diff_equation(self): 88 | A = self.build_matrix() 89 | funcs = [] 90 | t = sympy.Symbol('t', real=True) 91 | for c in self.get_compartments(): 92 | funcs.append(sympy.Function(c)(t)) 93 | eqs = [] 94 | for i, row in enumerate(A): 95 | eqs.append(sympy.Eq(sympy.diff(funcs[i]), row.sum())) 96 | print(eqs) 97 | 98 | def get_trans(self, y0, taus, t): 99 | """ 100 | Return the solution 101 | """ 102 | symbols = get_symbols(self.transitions) 103 | k = np.array(self.mat.subs(zip(symbols, taus))).astype('float') 104 | o = np.zeros((len(t), k.shape[0])) 105 | 106 | for i in range(t.shape[0]): 107 | o[i, :] = la.expm(k * t[i]).dot(y0)[:, 0] 108 | 109 | return o 110 | 111 | 112 | def get_comparments(list_trans): 113 | """ 114 | Getting a list of transtions, return the compartments 115 | """ 116 | l = [] 117 | for trans in list_trans: 118 | if trans.from_comp not in l: 119 | l.append(trans.from_comp) 120 | if trans.to_comp not in l and trans.to_comp != 'zero': 121 | l.append(trans.to_comp) 122 | return l 123 | 124 | 125 | def get_symbols(list_trans): 126 | """ 127 | Return the used symbols 128 | """ 129 | return [t.rate for t in list_trans] 130 | 131 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_messpy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Messpy v1 Example 3 | ================= 4 | 5 | This example shows how to load files from MessPy v1, hence it is only of 6 | interested for people working with files produced by it. Here we loading a 7 | datafile, which used our infrared detection setup. 8 | 9 | MessPy v1 files are .npz files, which consists of zipped npy (numpy) files. 10 | Under the module messpy we a helper class to work with it. We will start with 11 | importing the module and the standard tools. 12 | """ 13 | # %% 14 | from skultrafast import messpy, dataset, data_io 15 | import matplotlib.pyplot as plt 16 | import skultrafast 17 | print(skultrafast.__version__) 18 | 19 | plt.rcParams['figure.dpi'] = 130 20 | plt.rcParams['figure.figsize'] = (3.2, 2) 21 | plt.rcParams['figure.autolayout'] = True 22 | 23 | # %% 24 | # The main tool is the `MessPyFile` class. Note the constructor takes all the 25 | # neccesary information to do the processing. Here I will pass all parameters 26 | # explictily for documentation proposes. Some of the parameters are infered 27 | # automatically. 28 | 29 | # Get the file location first 30 | 31 | fname = data_io.get_example_path('messpy') 32 | print("Tutorial MessPy-file located at %s" % fname) 33 | mpf = messpy.MessPyFile( 34 | fname, 35 | invert_data=True, # Changes the sign of the data 36 | is_pol_resolved=True, # If the data was recored polarization resolved. 37 | pol_first_scan='perp', # Polarisation of the first scan 38 | valid_channel=1, # Which channel to use, recent IR data always uses 1 39 | # Recent visible data uses 0 40 | ) 41 | 42 | print(mpf.data.shape) 43 | # %% 44 | # Simlar to TimeResSpec the MessPyFile class has a plotter subclass with various 45 | # plot methods. For example, the `compare_spec` method plots a averaged spectrum 46 | # for each central channel recored. 47 | 48 | mpf.plot.compare_spec() 49 | 50 | # %% 51 | # As we can see, the applied wavelength calibration used by messpy was not correct. 52 | # Let's change that. 53 | 54 | mpf.recalculate_wavelengths(8.8) 55 | mpf.plot.compare_spec() 56 | 57 | # %% 58 | # Note that `MessPyFile` uses sigma clipping when averaging the scans. If you 59 | # want more control over the process, use the `average_scans` method. For 60 | # example here we change the clipping range and use only the first 5 scans. 61 | 62 | mpf.average_scans(sigma=2, max_scan=20); 63 | # %% 64 | # The indivudal datasets, for each polarization and each spectral window can be 65 | # found in a dict belonging to the class. 66 | 67 | for key, ds in mpf.av_scans_.items(): 68 | print(key, ds) 69 | 70 | # %% 71 | # Now we can work with them directly. For example datasets can be combined manually 72 | 73 | iso_merge = mpf.av_scans_['iso0'].concat_datasets(mpf.av_scans_['iso1']) 74 | all_iso = iso_merge.concat_datasets(mpf.av_scans_['iso2']) 75 | 76 | # %% 77 | # Since this is quite common, this is also automated by the `avg_and_concat` 78 | # method. 79 | 80 | para, perp, iso = mpf.avg_and_concat() 81 | iso.plot.spec(1, 3, 10, n_average=5); 82 | 83 | # %% 84 | # The spectrum looks a little bit janky now, since after merging the datasets 85 | # the points in the overlapping regions were seperataly recorded and the noise 86 | # within a recording is correlated. Hence, while the spectrum looks kind of 87 | # smooth within a window, the noise difference between the windows makes it 88 | # unsmooth. There is also a second issue with the merged spectrum: The point 89 | # density suggests a larger spectral resolution than available. To mitigate both 90 | # issues, we have to bin down the spectrum. We can either bin uniformly or only 91 | # merge channels that are too close together. 92 | 93 | fig, (ax0, ax1) = plt.subplots(2, figsize=(3, 4), sharex=True) 94 | 95 | bin_iso = iso.bin_freqs(30) 96 | bin_iso.plot.spec(1, 3, 10, n_average=5, marker='o', ax=ax0, ms=3) 97 | 98 | merge_iso = iso.merge_nearby_channels(8) 99 | merge_iso.plot.spec(1, 3, 10, n_average=5, marker='o', ax=ax1, ms=3) 100 | 101 | # Remove Legend and correct ylabel 102 | ax0.legend_ = None 103 | ax0.yaxis.label.set_position((0, 0.0)) 104 | ax1.legend_ = None 105 | ax1.set_ylabel(''); 106 | 107 | # %% 108 | # The prefered way to work with are polarisation resolved transient spectra is 109 | # to use `PolTRSpec`, which takes the two datasets we get from avg_and_concat. 110 | 111 | pol_ds = dataset.PolTRSpec(para, perp) 112 | merged_ds = pol_ds.merge_nearby_channels(8) 113 | merged_ds.plot.spec(1, n_average=4); 114 | 115 | 116 | 117 | # %% 118 | -------------------------------------------------------------------------------- /docs/zero-finding.rst: -------------------------------------------------------------------------------- 1 | Dispersion Handling 2 | =================== 3 | It is common to use chirped probe pulses in visible fs-pump-probe spectroscopy. 4 | This results in a wavelength depended time-zero, since the different probe 5 | wavelength arrive at different times at the sample. The chirp is generally well 6 | modeled by a low-order polynomial. Therefore to work with dispersion affected 7 | spectra, it is necessary to know to dispersion curve.restr 8 | 9 | There are various ways to measure the dispersion directly. Alternatively, one 10 | can estimate the dispersion directly form the data. The estimate can used to 11 | correct the spectrum directly, or if the dispersion is explicitly part of the 12 | fitted model, used as the starting guess. 13 | 14 | Estimation of the dispersion from the data 15 | ------------------------------------------ 16 | *skultrafast* can estimate the dispersion by first using a heuristic which 17 | estimates the time-zero for each wavelengths. The resulting curve is then fitted 18 | with a polynomial using a robust fitting method. The robust method is necessary 19 | since the heuristics are error-prone. 20 | 21 | Heuristics 22 | ^^^^^^^^^^ 23 | *skultrafast* comes with different heuristics to estimate the time-zero from a 24 | single trace. The heuristic functions are located in the :py:mod:`zero_finding` 25 | module. Additionally, user-given heuristics are supported. Working directly with 26 | the functions should not be necessary, when using the 27 | :py:obj:`~dataset.TimeResSpec` class. 28 | 29 | 1. `use_diff`, returns the index where the largest signal change is located. 30 | 2. `use_max`, returns the index with the largest positive signal. This 31 | useful when the cross-phase modulation or two-photon contributions are 32 | strong, e.g. when measuring solvent signals. 33 | 3. `use_gaussian`, returns the index where the convolution with the first 34 | derivative of an gaussian is the largest. This works like a averaged 35 | version of `use_diff`. 36 | 4. `use_first_abs`, returns the earliest index where the absolute of the signal is 37 | above a given threshold. The methods works best when the threshold is 38 | small, therefore indicating the first signal contributions above the noise 39 | floor. This captures the dispersion curve quite well, minus a slight 40 | offset, since it captures the beginning of the pump-probe interaction. 41 | 42 | The image below compares the results of the different heuristics on a 43 | real signal. The signal has strong coherent contributions. 44 | 45 | .. plot:: zero_finding_lot.py 46 | :include-source: false 47 | :width: 75% 48 | :align: center 49 | 50 | The *top* figure shows a signal and resulting estimates for the different 51 | heuristics. The horizontal lines mark the value used by the ``use_first_abs`` 52 | method. The *middle* figure shows the difference between sequential points, 53 | ``use_diff`` just returns the absolute argmax of this differences. The *bottom* 54 | figure shows the convolution with the first derivative of a gaussian with the 55 | signal. Again, ``use_gaussian`` just returns the absolute argmax of the 56 | convolution. The sigma is given in data-points. 57 | 58 | Since the signal show strong coherent effects, the ``use_max`` method is the 59 | method of choice for this trace. But on other wavelengths, the non-coherent 60 | signal is stronger and that method may fail. Since both methods based on 61 | derivatives differ only by one point and are less suspisusceptible may be the 62 | better choice. Looking at the figure, one may be thinking that the 63 | ``use_first_abs`` heuristic is useless, because it is clearly giving an too 64 | early estimatation. But since happens for almost all signals, the heuristic is 65 | very useful since it reproduces the (offsetted) dispersion curve quite well. 66 | 67 | Robust fitting 68 | `````````````` 69 | 70 | In the second step, the resulting :math:`t_0(\omega)` data is approximated 71 | with a polynomial of low degree, using a robust fitting method form 72 | statsmodels. The regression of the time-zeros uses wavenumbers as the 73 | depend variable, since the dispersion is proportional to the frequency. 74 | 75 | Using the estimate 76 | ------------------ 77 | There are three different ways to use the resulting dispersion curve. 78 | 79 | 1. Use linear interpolation to correct the dispersion. Here, for every 80 | channel we interpolate the data by shift the data-points from 81 | (t, y) to (t - disp(wl), y_new). 82 | 83 | 2. The new values are used to generate binning borders, which are then 84 | used to generate the new t-vector. 85 | 86 | 3. We fit a full model in which the dispersion is explicitly modeled. 87 | 88 | For a quick exploration, I recommend the first method. An interpolated 89 | dataset is necessary for plotting spectra and maps anyway. 90 | -------------------------------------------------------------------------------- /skultrafast/data_io.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Nov 28 18:34:30 2012 4 | 5 | @author: tillsten 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | 11 | import re 12 | from pathlib import Path 13 | import pooch 14 | import numpy as np 15 | 16 | 17 | def save_txt_das(name, fitter): 18 | """ 19 | Saves the das of a fitter-obj in name.txt 20 | """ 21 | f = fitter 22 | spec = f.c[:, :-3] if f.model_coh else f.c 23 | arr = np.column_stack((f.wl, spec)) 24 | offset = f.model_disp + 1 25 | taus = np.hstack((0, f.last_para[offset:])) 26 | 27 | arr = np.vstack((taus, arr)) 28 | np.savetxt(name, arr) 29 | 30 | 31 | def save_txt(name, wls, t, dat, fmt="%.3f"): 32 | try: 33 | tmp = np.vstack((wls[None, :], dat)) 34 | arr = np.hstack((np.vstack((0, t[:, None])), tmp)) 35 | except ValueError: 36 | print("Shapes wl:", wls.shape, "t", t.shape, "d", dat.shape) 37 | raise IndexError 38 | np.savetxt(name, arr, fmt=fmt) 39 | 40 | 41 | def extract_freqs_from_gaussianlog(fname): 42 | f = open(fname) 43 | fr, ir, raman = [], [], [] 44 | 45 | for line in f: 46 | if line.lstrip().startswith("Frequencies --"): 47 | fr += map(float, re.sub(r"[^\d,.\d\d\d\d]", " ", line).split()) 48 | elif line.lstrip().startswith("IR Inten"): 49 | ir += map(float, re.sub(r"[^\d,.d\d\d\d]", " ", line).split()) 50 | elif line.lstrip().startswith("Raman Activities"): 51 | raman += map(float, re.sub(r"[^\d,.\d\d\d\d ]", " ", line).split()) 52 | 53 | arrs = [fr] 54 | if ir: 55 | arrs.append(ir) 56 | if raman: 57 | arrs.append(raman) 58 | 59 | arrs = map(np.array, [fr, ir]) 60 | return np.vstack([i.flatten() for i in arrs]) 61 | 62 | 63 | def load_example(): 64 | """ 65 | Returns a tuple containing the example data shipped with skultrafast. 66 | 67 | Returns 68 | ------- 69 | tuple of ndarrys 70 | Tuple with wavelengths, t and data-array. 71 | """ 72 | import skultrafast 73 | 74 | a = np.load(skultrafast.__path__[0] + "/examples/data/test.npz") 75 | wl, data, t = a["wl"], a["data"], a["t"] 76 | return wl, t * 1000 - 2, data / 3.0 77 | 78 | 79 | def messpy_example_path(): 80 | """ 81 | Returns the path to the messpy example data shipped with skultrafast. 82 | 83 | Returns 84 | ------- 85 | str 86 | The full path 87 | """ 88 | import skultrafast 89 | 90 | return skultrafast.__path__[0] + "/examples/data/messpyv1_data.npz" 91 | 92 | 93 | def get_example_path(kind): 94 | """Returns the path a example data-file. 95 | 96 | Parameters 97 | ---------- 98 | kind : ('sys_response', 'messpy', 'vapor', 'ir_polyfilm', 'quickcontrol') 99 | Which path to return. 100 | """ 101 | import skultrafast 102 | 103 | root = skultrafast.__path__[0] + "/examples/data/" 104 | file_dict = { 105 | "messpy": "messpyv1_data.npz", 106 | "sys_response": "germanium.npz", 107 | "vapor": "ir_waterabs.npy", 108 | "ir_polyfilm": "PolystyreneFilm_spectrum.npz", 109 | "quickcontrol": "quickcontrol.zip", 110 | } 111 | return root + file_dict[kind] 112 | 113 | 114 | POOCH = pooch.create( 115 | path=pooch.os_cache("skultrafast"), 116 | # Use the figshare DOI 117 | base_url="doi:10.6084/m9.figshare.25745715", 118 | registry={ 119 | "MeSCN_2D_data.zip": "md5:6ca0942395a8b1be17b57a2b3c27ac5b", 120 | }, 121 | ) 122 | 123 | 124 | def get_twodim_dataset(): 125 | data = POOCH.fetch("MeSCN_2D_data.zip", processor=pooch.Unzip()) 126 | p = Path(data[0]).parent 127 | return p 128 | 129 | 130 | def get_processed_twodim_dataset(force_processing=False): 131 | cache = pooch.os_cache("skultrafast") 132 | import pickle 133 | if not (cache / "MeSCN_2D_data.pickle").exists() or force_processing: 134 | data = POOCH.fetch("MeSCN_2D_data.zip", processor=pooch.Unzip()) 135 | p = Path(data[0]).parent 136 | info = list(Path(p).glob('*320.info')) 137 | from skultrafast.quickcontrol import QC2DSpec 138 | qc = QC2DSpec(info[0], probe_filter=1, upsampling=4) 139 | ds = qc.make_ds()["iso"] 140 | ds.background_correction((2170, 2100), 2) 141 | ds = ds.select_range((2130, 2200), (2100, 2200)) 142 | with open(cache / "MeSCN_2D_data.pickle", "wb") as f: 143 | pickle.dump(ds, f) 144 | else: 145 | with open(cache / "MeSCN_2D_data.pickle", "rb") as f: 146 | ds = pickle.load(f) 147 | return ds 148 | -------------------------------------------------------------------------------- /skultrafast/filter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | This module contains various filters and binning methods. All take 4 | a dv tup and return a tup. 5 | """ 6 | 7 | from . import dv 8 | import numpy as np 9 | import scipy.ndimage as nd 10 | import scipy.signal as sig 11 | 12 | def svd_filter(tup, n=6): 13 | """ 14 | Only use the first n-components. 15 | 16 | Parameters 17 | ---------- 18 | tup: 19 | data object. 20 | n: 21 | number of svd components used. 22 | """ 23 | wl, t, d = tup.wl, tup.t, tup.data 24 | u, s, v = np.linalg.svd(d, full_matrices=0) 25 | s[n:] = 0 26 | f = np.dot(u, np.diag(s).dot(v)) 27 | return dv.tup(wl, t, f) 28 | 29 | def wiener(tup, size=(3,3), noise=None): 30 | wl, t, d = tup.wl, tup.t, tup.data 31 | f = sig.wiener(d, size, noise=noise) 32 | return dv.tup(wl, t, f) 33 | 34 | def uniform_filter(tup, sigma=(2, 2)): 35 | """ 36 | Apply an uniform filter to data. 37 | """ 38 | wl, t, d = tup.wl, tup.t, tup.data 39 | f = nd.uniform_filter(d, size=sigma, mode="nearest") 40 | return dv.tup(wl, t, f) 41 | 42 | 43 | def gaussian_filter(tup, sigma=(2, 2)): 44 | """ 45 | Apply an uniform filter to data. 46 | """ 47 | wl, t, d = tup.wl, tup.t, tup.data 48 | f = nd.gaussian_filter(d, sigma=sigma, mode="nearest") 49 | return dv.tup(wl, t, f) 50 | 51 | def sg_filter(tup, window_length=11, polyorder=2, deriv=0, axis=0): 52 | """ 53 | Apply a Savitzky-Golay filter to a tup. 54 | 55 | Parameter 56 | --------- 57 | tup: 58 | Data 59 | window_length: 60 | Winodw length of the filter 61 | polyorder: 62 | The order of local polynomial. Must be smaller than window_length. 63 | deriv: 64 | The order of the derivative to compute. This must be a 65 | nonnegative integer. The default is 0. 66 | 67 | """ 68 | wl, t, d = tup.wl, tup.t, tup.data 69 | f = sig.savgol_filter(d, window_length, polyorder, axis=axis, 70 | mode='nearest') 71 | return dv.tup(wl, t, f) 72 | 73 | def bin_channels(tup, n=200, method=np.mean): 74 | """ 75 | Bin the data onto n-channels. 76 | """ 77 | 78 | def binner(n, wl, dat): 79 | """ 80 | Given wavelengths and data it bins the data into n-wavelenths. 81 | Returns bdata and bwl 82 | 83 | """ 84 | i = np.argsort(wl) 85 | wl = wl[i] 86 | dat = dat[:, i] 87 | idx = np.searchsorted(wl,np.linspace(wl.min(),wl.max(),n+1)) 88 | binned = np.empty((dat.shape[0], n)) 89 | binned_wl = np.empty(n) 90 | for i in range(n): 91 | binned[:,i] = method(dat[:,idx[i]:idx[i+1]],1) 92 | binned_wl[i] = np.mean(wl[idx[i]:idx[i+1]]) 93 | return binned, binned_wl 94 | 95 | 96 | wl, t, d = tup.wl, tup.t, tup.data 97 | binned_d, binned_wl = binner(n, wl, d) 98 | return dv.tup(binned_wl, t, binned_d) 99 | 100 | def weighted_binner(n, wl, dat, std): 101 | """ 102 | Given wavelengths and data it bins the data into n-wavelenths. 103 | Returns bdata and bwl 104 | 105 | """ 106 | i = np.argsort(wl) 107 | wl = wl[i] 108 | dat = dat[:, i] 109 | idx = np.searchsorted(wl,np.linspace(wl.min(),wl.max(),n+1)) 110 | binned = np.empty((dat.shape[0], n)) 111 | binned_wl = np.empty(n) 112 | for i in range(n): 113 | data = dat[:,idx[i]:idx[i+1]] 114 | weights = 1/std[:,idx[i]:idx[i+1]] 115 | binned[:,i] = np.average(data, 1, weights) 116 | binned_wl[i] = np.mean(wl[idx[i]:idx[i+1]]) 117 | return binned, binned_wl 118 | 119 | def _idx_range(arr, a, b): 120 | """Returns a boolean array which is True where arr is between 121 | a and b. 122 | 123 | Parameters 124 | ---------- 125 | arr : array 126 | Array to find the range in. 127 | a : float 128 | First edge of the region. 129 | b : float 130 | Second edge of the region. 131 | 132 | Returns 133 | ------- 134 | : arr of bool 135 | The resulting boolean array 136 | """ 137 | lower, upper = sorted([a,b]) 138 | idx = (lower= 1] = 0 55 | data /= np.abs(data).max() 56 | # coeffs = np.linalg.lstsq( 57 | # nbase.reshape(nbase.shape[0], -1).T, data.flatten(), 58 | # )[0] 59 | # coeffs = np.sum(nbase.reshape((pb.n+1)**2, -1).T * data.flatten()[:, None], 0) 60 | mod = Ridge(alpha=1e-3, fit_intercept=False) 61 | res = mod.fit(nbase.reshape(nbase.shape[0], -1).T, data.flatten()) 62 | # fit = (coeffs[:, None, None] * nbase).sum(axis=0) 63 | fit = res.predict(nbase.reshape( 64 | nbase.shape[0], -1).T).reshape(data.shape) 65 | coeffs = res.coef_ 66 | rms = ((fit - data)**2).mean() 67 | if t_idx == 0: 68 | rmsl.append(rms) 69 | c.append(coeffs) 70 | 71 | plt.plot(rmsl, ' o') 72 | # %% 73 | plt.pcolormesh(x, y, fit, cmap="twilight_r") 74 | 75 | # %% 76 | cls_result = ds.cls(pu_range=7) 77 | # %% 78 | 79 | 80 | def cos_sim(a, b): 81 | return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) 82 | 83 | 84 | cs = np.array([cos_sim(c[0], c[i]) for i in range(len(c))]) 85 | cd = 1-np.array([cos_sim(c[i], c[-1]) for i in range(len(c))]) 86 | 87 | # plt.plot(ds.t, cs, lw=2, c="r") 88 | # plt.plot(ds.t, cd) 89 | pzs = np.cos(np.pi/4)*cs + (1-np.cos(np.pi/4))*cd 90 | 91 | plt.plot(ds.t, pzs) 92 | plt.xscale('log') 93 | plt.twinx() 94 | cls_result.plot_cls() 95 | 96 | # %% 97 | for i in range(4): 98 | fig, ax = plt.subplots(subplot_kw={"aspect": "equal"}, figsize=(3, 3)) 99 | plt.pcolormesh(x, y, base[i]) 100 | plt.colorbar() 101 | # %% 102 | plt.plot(c[0]) 103 | plt.plot(c[1]) 104 | plt.plot(c[2]) 105 | plt.plot(c[3]) 106 | # %% 107 | 108 | cls_result.plot_cls() 109 | # %% 110 | 111 | # %% 112 | 113 | 114 | # %% 115 | x = np.linspace(-1, 1, 100) 116 | y = np.linspace(-1, 1, 100) 117 | 118 | ptest = Polybase(x, y, 5) 119 | nbase = ptest.make_base() 120 | ptest 121 | inv_val_dict = {v: k for k, v in ptest.val_dict.items()} 122 | nbase = np.nan_to_num(nbase) 123 | for i, b in enumerate(nbase): 124 | for j, b2 in enumerate(nbase): 125 | if i == j or i == 0 or j == 0: 126 | pass 127 | else: 128 | val = np.dot(b.flatten(), b2.flatten() ) 129 | if abs(val) > 1e-3: 130 | print( "%.1f"%np.dot(b.flatten(), b2.flatten() ), inv_val_dict[i], inv_val_dict[j]) 131 | # %% 132 | plt.imshow(nbase[ptest.val_dict[(1, 0)]] ) 133 | plt.figure() 134 | plt.imshow(nbase[ptest.val_dict[(3, 0)]] ) 135 | # %% 136 | 137 | def zernike_radial_polynomials(n, r): 138 | if np.any((r > 1) | (r < 0) | (n < 0)): 139 | raise ValueError('r must be between 0 and 1, and n must be non-negative.') 140 | 141 | if n == 0: 142 | return np.ones(len(r)) 143 | 144 | R = np.zeros((n + 1, len(r))) 145 | r_sqrt = np.sqrt(r) 146 | r0 = r_sqrt ** (2 * n + 1) != 0 147 | 148 | if np.any(r0): 149 | R[0, r0] = 1 150 | 151 | if np.any(~r0): 152 | r_sqrt = r_sqrt[~r0] 153 | R1 = zernike_radial_polynomials(n - 1, r_sqrt) 154 | m = np.arange(2, 2 * n + 2, 2) 155 | R1 = R1[m, :] 156 | R1 = R1 / r_sqrt[:, None] 157 | R[:, ~r0] = R1 158 | 159 | return R 160 | 161 | zernike_radial_polynomials(3, np.linspace(0, 1, 2)) 162 | # %% 163 | -------------------------------------------------------------------------------- /skultrafast/sympy_model.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | import numpy as np 4 | from skultrafast import unit_conversions 5 | from lmfit import model 6 | import lmfit 7 | import inspect 8 | import sympy 9 | 10 | 11 | def cosd(deg): 12 | return sympy.cos(deg / 180 * sympy.pi) 13 | 14 | 15 | def angle_to_dichro(deg): 16 | return (1 + 2 * cosd(deg)**2) / (2 - cosd(deg)**2) 17 | 18 | 19 | def make_pol(func): 20 | def wrapper(*args): 21 | angle = args[-1] 22 | print(angle) 23 | perp = func(*args[:-1]) 24 | para = perp * angle_to_dichro(angle) 25 | return perp, para 26 | 27 | return wrapper 28 | 29 | 30 | @make_pol 31 | def lorentz(wl, t, A, Ac, xc, w, tau): 32 | x = (wl-xc) / w 33 | return (A * sympy.exp(-t / tau) + Ac) * 1 / (1 + x**2) 34 | 35 | 36 | @make_pol 37 | def gauss(wl, t, A, Ac, xc, w, tau): 38 | x = (wl-xc) / w 39 | return (A * sympy.exp(-t / tau) + Ac) * np.exp(-.5 * x**2) 40 | 41 | 42 | @make_pol 43 | def gauss_const(wl, t, A, xc, w): 44 | x = (wl-xc) / w 45 | return A * np.exp(-.5 * x**2) 46 | 47 | 48 | @make_pol 49 | def lorentz_const(wl, t, A, xc, w): 50 | x = (wl-xc) / w 51 | return A / (1 + x**2) 52 | 53 | 54 | class ModelBuilder: 55 | def __init__(self, wl: np.ndarray, t: np.ndarray): 56 | self.funcs = [] 57 | self.args = [] 58 | self.values = {} 59 | self.n = 0 60 | self.t, self.wl = sympy.symbols('t wl') 61 | self.t_arr = t 62 | self.wl_arr = wl 63 | self.coords = [self.wl, self.t] 64 | 65 | def add_decaying(self, 66 | A: float, 67 | Ac: float, 68 | xc: float, 69 | w: float, 70 | tau: float, 71 | angle: float, 72 | peak_type: str = 'lor') -> int: 73 | names = "A Ac xc w tau angle" 74 | all_sym = [self.wl, self.t] 75 | for name in names.split(' '): 76 | sym = '%s_%d' % (name, self.n) 77 | all_sym.append(sympy.Symbol(sym)) 78 | self.values[sym] = locals()[name] 79 | 80 | if peak_type == 'lor': 81 | self.funcs.append(lorentz(*all_sym)) 82 | elif peak_type == 'gauss': 83 | self.funcs.append(gauss(*all_sym)) 84 | self.n += 1 85 | self.args += all_sym[2:] 86 | return len(self.funcs) 87 | 88 | def add_constant(self, A, xc, w, angle, peak_type='lor'): # 89 | names = 'A xc w angle' 90 | all_sym = [self.wl, self.t] 91 | for name in names.split(' '): 92 | sym = '%s_%d' % (name, self.n) 93 | all_sym.append(sympy.Symbol(sym)) 94 | self.values[sym] = locals()[name] 95 | 96 | if peak_type == 'lor': 97 | self.funcs.append(lorentz_const(*all_sym)) 98 | elif peak_type == 'gauss': 99 | self.funcs.append(gauss_const(*all_sym)) 100 | self.n += 1 101 | self.args += all_sym[2:] 102 | return len(self.funcs) 103 | 104 | def make_model(self): 105 | para = [] 106 | perp = [] 107 | for i in range(0, self.n): 108 | perp.append(self.funcs[i][0]) 109 | para.append(self.funcs[i][1]) 110 | all_para = sum(para) 111 | all_perp = sum(perp) 112 | return all_para, all_perp 113 | 114 | def make_params(self): 115 | pa, pe = self.make_model() 116 | expr = sympy.Tuple(pa, pe) 117 | 118 | free = expr.free_symbols 119 | func = sympy.lambdify(list(free), expr) 120 | 121 | mod = lmfit.Model(func, ['wl', 't']) 122 | params = mod.make_params() 123 | for pn in params: 124 | p = params[pn] 125 | p.value = self.values[pn] 126 | if pn.startswith('angle'): 127 | p.min = 0 128 | p.max = 90 129 | elif pn.startswith('w'): 130 | p.min = 2 131 | y = mod.eval(wl=self.wl_arr[:, None], t=self.t_arr, **params) 132 | 133 | return params, mod 134 | 135 | def plot_peaks(self, params=None): 136 | if params is None: 137 | params, mod = self.make_params() 138 | 139 | para = sympy.Tuple(*(i[1] for i in self.funcs)) 140 | perp = sympy.Tuple(*(i[0] for i in self.funcs)) 141 | 142 | pa_func = sympy.lambdify(self.coords + list(params.keys()), para) 143 | pe_func = sympy.lambdify(self.coords + list(params.keys()), perp) 144 | 145 | pa = pa_func(t=self.t_arr, wl=self.wl_arr[:, None], **params) 146 | pe = pe_func(t=self.t_arr, wl=self.wl_arr[:, None], **params) 147 | for a, b in zip(pa, pe): 148 | l1, = plt.plot(a[:, 0], lw=2) 149 | plt.plot(b[:, 0], c=l1.get_color()) 150 | y = mod.eval(wl=self.wl_arr[:, None], t=self.t_arr, **params) 151 | -------------------------------------------------------------------------------- /skultrafast/fit_spectrum.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Module to fit the whole spektrum by peak functions. 4 | """ 5 | 6 | from __future__ import print_function 7 | from scipy.special import wofz 8 | from skultrafast.utils import sigma_clip 9 | from . import unit_conversions 10 | 11 | import numpy as np 12 | import lmfit 13 | pi = np.pi 14 | 15 | 16 | def voigt(x, A, mu, sig, gamma=0.1): 17 | w = wofz(((x-mu) + 1j*gamma) * 2**-0.5 / sig) 18 | return A * w.real * (2 * pi)**-0.5 / sig 19 | 20 | 21 | def lorentz_peaks(x, A, x0, w): 22 | A, x0, w = map(np.asarray, [A, x0, w]) 23 | return A[:, None] / (1 + ((x[None, :] - x0[:, None]) / w[:, None])**2) 24 | 25 | 26 | def gauss_peaks(x, A, x0, w): 27 | A, x0, w = map(np.asarray, [A, x0, w]) 28 | return A[:, None] * np.exp(-0.5 * ((x[None, :] - x0[:, None]) / w[:, None])**2) 29 | 30 | 31 | def voigt_peaks(x, A, x0, w): 32 | out = np.zeros((A.size, x.size)) 33 | for i in range(A.size): 34 | out[i, :] = voigt(x, A[i], x0[i], w[i]) 35 | return out 36 | 37 | 38 | def fit_spectrum(x, 39 | y, 40 | start_peaks_list, 41 | yerr=None, 42 | peak_func=lorentz_peaks, 43 | amp_penalty=0.01, 44 | amp_bounds=(-.6, .4), 45 | wmin=2, 46 | wmax=10, 47 | add_const=False): 48 | """ 49 | Fits multiple peaks to mulitple spektra, the position and width of 50 | each peak is the same for all spectra, only the amplitude is 51 | allowed to differ. 52 | 53 | Parameters 54 | ---------- 55 | x: (n)-ndarray 56 | The x-values to fit, e.g. wavelengths or wavenumbers. 57 | y: (n, m)-ndarray 58 | The y-values to fit. 59 | start_peak_list: list 60 | A list containing (x0, amp, w) tuples. Used as starting values. 61 | yerr: (n, m)-ndarray 62 | The errors of the data. Default None. 63 | peaks_func: function, optional 64 | Function which calculates the peaks. Has the following signature: 65 | func(x, A_arr, x0_arr, w_arr), defaults to lorentz_peaks. 66 | amp_penalty: float, optional 67 | Regulazition parameter for the amplitudes. Defaults to 0.001. 68 | amp_bounds: (float, float)-tuple, optional 69 | Min and max bounds for the amplitude. 70 | wmax: float, optional 71 | Upper bound for the width parameter. 72 | wmin: float, optional 73 | Lower bound for the width parameter. 74 | add_const: bool 75 | Weather to add an const background. 76 | """ 77 | y = np.atleast_2d(y) 78 | n = y.shape[0] 79 | paras = lmfit.Parameters() 80 | for i, (x0, A, w) in enumerate(start_peaks_list): 81 | si = str(i) 82 | for j in range(y.shape[0]): 83 | if A < 0: 84 | paras.add('Amp_' + si + str(j), A, max=0, min=amp_bounds[0]) 85 | else: 86 | paras.add('Amp_' + si + str(j), A, max=amp_bounds[1], min=0) 87 | paras.add('Angle_' + si, 54.2, max=90, min=0) 88 | paras.add('x0_' + si, x0) 89 | paras.add('width_' + si, w, min=wmin, max=wmax) 90 | p = paras 91 | x0 = np.array([i.value for i in p.values()]) 92 | 93 | def residuals(p, x, y, peak_func): 94 | fit = np.array([i.value for i in p.values()]).reshape((3 + n, -1), order='F') 95 | base_peak = peak_func(x, np.ones_like(fit[0, :]), *fit[[-2, -1], :]) 96 | 97 | dichro = unit_conversions.angle2dichro(fit[-3, :]) 98 | 99 | resi = [] 100 | for i in range(n): 101 | 102 | fp = base_peak * fit[[i], :].T 103 | fs = fp / dichro[:, None] 104 | sum_fs = fs.sum(0) 105 | sum_fp = fp.sum(0) 106 | 107 | if y is None: 108 | resi.append(np.hstack((fp, fs))) 109 | else: 110 | resi.append( 111 | np.hstack((y[i, :] - np.hstack( 112 | (sum_fp, sum_fs)), fit[i, :] * amp_penalty)).ravel()) 113 | 114 | if y is None: 115 | return np.array(resi) 116 | else: 117 | if yerr is None: 118 | return np.array(resi).ravel() 119 | else: 120 | return np.array(resi / yerr).ravel() 121 | 122 | print(x.shape) 123 | mini = lmfit.Minimizer(residuals, paras, fcn_args=(x, y, peak_func)) 124 | result = mini.leastsq() 125 | 126 | 127 | def bin_every_n(x, start_idx, n=10, reduction_func=lambda x: np.mean(x, 0)): 128 | out = [] 129 | if x.ndim == 1: 130 | x = x[:, None] 131 | for i in range(start_idx, x.shape[0], n): 132 | end_idx = min(i + n, x.shape[0]) 133 | out.append(sigma_clip(x[i:end_idx, :], sigma=2.5, axis=0).mean(0)) 134 | return np.array(out) -------------------------------------------------------------------------------- /skultrafast/zero_finding.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Contains functions to find the time-zero and to interpolate the data. 4 | """ 5 | 6 | import numpy as np 7 | import skultrafast.dv as dv 8 | import scipy.ndimage as nd 9 | 10 | import matplotlib.pyplot as plt 11 | #from skultrafast.fitter import _coh_gaussian 12 | from scipy.linalg import lstsq 13 | from scipy.optimize import least_squares 14 | 15 | 16 | class est(object): 17 | pass 18 | 19 | 20 | @dv.add_to_cls(est) 21 | def use_gaussian(dat, sigma=1): 22 | """ 23 | Use convolution with the derivate of an gaussian. 24 | """ 25 | derivate = nd.gaussian_filter(dat, (sigma, 0), 1) 26 | return np.argmax(np.abs(derivate), 0) 27 | 28 | 29 | @dv.add_to_cls(est) 30 | def use_diff(dat, smooth=0): 31 | """ 32 | Use numerical diff. 33 | """ 34 | if smooth != 0: 35 | dat = nd.gaussian_filter(dat, smooth) 36 | derivate = np.diff(dat, 1, 0) 37 | return np.argmax(np.abs(derivate), 0) 38 | 39 | 40 | @dv.add_to_cls(est) 41 | def use_sv_filter(dat, window=7, polydeg=5): 42 | """ 43 | Use savitzky-golay derivate. 44 | """ 45 | out = np.zeros((dat.shape[1])) 46 | for i in range(dat.shape[1]): 47 | idx = np.argmax(dv.savitzky_golay(dat[:, i], window, polydeg, 1)) 48 | out[i] = idx 49 | return out 50 | 51 | 52 | @dv.add_to_cls(est) 53 | def use_max(dat, use_abs=True): 54 | """ 55 | Uses the absolute maximum of the signal 56 | """ 57 | if use_abs: 58 | dat = np.abs(dat) 59 | return np.argmax(dat, 0) 60 | 61 | 62 | @dv.add_to_cls(est) 63 | def use_first_abs(dat, val=5): 64 | """ 65 | Returns the first index where abs(dat)>val. 66 | """ 67 | idx = np.abs(dat) > val 68 | return np.argmax(idx, 0) 69 | 70 | 71 | import scipy.optimize as opt 72 | 73 | 74 | @dv.add_to_cls(est) 75 | def use_fit(dat, t, tau=[5, 20000], w0=0.08, tn=None, n=-1): 76 | """ 77 | Fits each transient with only w and x0 free. 78 | """ 79 | out = np.zeros(dat.shape[1]) 80 | w_out = np.zeros(dat.shape[1]) 81 | t = t[:n] 82 | o = tn[0] 83 | w = w0 84 | for i in range(dat.shape[1]): 85 | y = dat[:n, i] 86 | f = lambda p: _fit_func(t, y, -p[0], p[1], tau) 87 | f_sum = lambda p: (f(p)**2).sum() 88 | 89 | try: 90 | if not np.isnan(o) and False: 91 | k = o + np.diff(tn)[i] 92 | 93 | else: 94 | k = tn[i] 95 | w = w0 96 | 97 | #o, w = leastsq(f, list([k, w0]))[0][:2] 98 | # = opt.minimize(f_sum, [k,w], method='BFGS') 99 | #x = cma.fmin(f_sum, [o, w0], 0.03, bounds=[(0,0.04),(5, 0.2)], restarts=1, verb_log=0) 100 | x = opt.brute(f_sum, (range((tn - 0.1), 101 | (tn + 0.1), 0.01), np.range(0.04, 0.13, 0.01))) 102 | o, w = x[0] 103 | if abs(o - tn[i]) > 0.04: 104 | plt.plot(t, f([o, w]) + y) 105 | plt.plot(t, y, 'o') 106 | except NameError: 107 | o = w = np.NaN 108 | 109 | out[i] = o 110 | w_out[i] = w 111 | return out, w_out 112 | 113 | 114 | def _fit_func(t, y, x0, w, tau): 115 | """ 116 | Fit 117 | """ 118 | base = np.column_stack(( 119 | _fold_exp(t, w, x0, np.array(tau)).T, #)) 120 | _coh_gaussian(t, w, x0))) 121 | base = np.nan_to_num(base) 122 | c = lstsq(base, y[:, None]) 123 | y_fit = np.dot(base, c[0]) 124 | return (y_fit[:, 0] - y) 125 | 126 | 127 | def robust_fit_tz(wl, tn, degree=3, t=1): 128 | """ 129 | Apply a robust 3-degree fit to given tn-indexs. 130 | """ 131 | powers = np.arange(degree + 1) 132 | X = wl[:, None]**powers[None, :] 133 | c = np.linalg.lstsq(X, tn, rcond=1e-10)[0] 134 | 135 | def fit_func(p): 136 | return tn - X@p 137 | 138 | o = least_squares(fit_func, c, loss='cauchy', f_scale=t) 139 | zeros = X @ o.x 140 | return zeros, o.x[::-1] 141 | 142 | 143 | def interpol(tup, tn, shift=0., new_t=None): 144 | """ 145 | Uses linear interpolation to shift each channcel by given tn. 146 | """ 147 | dat = tup.data 148 | t = tup.t 149 | if new_t is None: 150 | new_t = t 151 | 152 | #t_array = np.tile(t.reshape(t.size, 1), (1, dat.shape[1])) 153 | t_array = t[:, None] - tn[None, :] 154 | t_array -= shift 155 | dat_new = np.zeros((new_t.size, dat.shape[1])) 156 | for i in range(dat.shape[1]): 157 | dat_new[:, i] = np.interp(new_t, t_array[:, i], dat[:, i], left=0) 158 | return dv.tup(tup.wl, t, dat_new) 159 | 160 | 161 | def get_tz_cor(tup, method=use_diff, deg=3, plot=False, **kwargs): 162 | """ 163 | Fully automatic timezero correction. 164 | """ 165 | idx = method(tup.data, **kwargs) 166 | raw_tn = tup.t[idx] 167 | no_nan = ~np.any(np.isnan(tup.data), 0) 168 | fit, p = robust_fit_tz(tup.wl[no_nan], raw_tn[no_nan], deg) 169 | #dv.subtract_background(tup.data, tup.t, fit, 400) 170 | fit = np.polyval(p, tup.wl) 171 | cor = interpol(tup, fit) 172 | if plot: 173 | from . import plot_funcs as pl 174 | pl._plot_zero_finding(tup, raw_tn, fit, cor) 175 | return cor, fit 176 | -------------------------------------------------------------------------------- /skultrafast/base_funcs/ana.py: -------------------------------------------------------------------------------- 1 | # %% 2 | from skultrafast.base_funcs.pytorch_fitter import * 3 | from skultrafast import data_io, dataset 4 | from pathlib import Path 5 | import matplotlib.pyplot as plt 6 | import proplot 7 | ex_dir = Path(__file__).parent.parent / 'examples/data' 8 | 9 | dsl, dsbl, dsl2 = [], [], [] 10 | for pol in ('iso', 'para', 'perp'): 11 | cut_wn = 18000 12 | ds = TimeResSpec.from_txt(ex_dir / f'AGP2 wt (uncorrected).{pol}.txt', 13 | disp_freq_unit='cm') 14 | ds = ds.cut_freq(cut_wn, 1e8) 15 | ds = ds.cut_time(-100, -.3) 16 | dsb = TimeResSpec.from_txt(ex_dir / f'Buffer (uncorrected).{pol}.txt', 17 | disp_freq_unit='cm') 18 | dsb = dsb.cut_freq(cut_wn, 1e8) 19 | dsb = dsb.cut_time(-.15, .2, invert_sel=True) 20 | dsl.append(ds.copy()) 21 | ds.data[ds.t_idx(-.15):ds.t_idx(-.15)+dsb.t.size] -= dsb.data 22 | dsl2.append(ds) 23 | dsbl.append(dsb) 24 | 25 | iso, para, perp = dsl 26 | pol = dataset.PolTRSpec(para,perp) 27 | both = para.copy() 28 | both.data = np.hstack((both.data, perp.data)) 29 | both.wavenumbers = np.hstack((both.wn, both.wn)) 30 | 31 | # %% 32 | nt = dsbl[0].estimate_dispersion('abs',(1,), shift_result=0.03) 33 | 34 | # %% 35 | 36 | fit = FitterTorch(dsbl[0], zero_func=nt.polynomial, disp_poly_deg=3, model_coh=True, 37 | use_cuda=True) 38 | paras, mini = fit.start_lmfit([0.001, 0.01], [ 0.1], False, False, False) 39 | mini.params['w1'].max = 0.02 40 | res1 = mini.leastsq() 41 | 42 | mini.userfcn(res1.params) 43 | res1 44 | # %% 45 | fig, ax = proplot.subplots() 46 | for wn in [11000, 14500, 18000]: 47 | ax.plot(fit.tt.T[:, iso.wn_idx(wn)], fit.dataset.wn_d(wn), lw=2) 48 | ax.plot(fit.tt.T[:, iso.wn_idx(wn)], fit.model.cpu().T[:, iso.wn_idx(wn)], lw=1, c='k') 49 | ax.format(xlim=(-.3, .3)) 50 | # %% 51 | fig, ax = proplot.subplots() 52 | for wn in [11000, 12000,16000, 18000]: 53 | ax.plot(dsb.t, fit.dataset.wn_d(wn), lw=2) 54 | ax.plot(dsb.t, fit.model.cpu().T[:, iso.wn_idx(wn)], lw=1, c='k') 55 | ax.format(xlim=(-.3, .3)) 56 | 57 | 58 | 59 | 60 | # %% 61 | para, perp, iso = dsl2 62 | 63 | both2 = para.copy() 64 | both2.data = np.hstack((both2.data, perp.data)) 65 | both2.wavenumbers = np.hstack((both.wn, both.wn)) 66 | 67 | base = both.data.copy()*0 68 | coh = np.hstack((dsbl[1].data, dsbl[2].data)) 69 | base[ds.t_idx(-.15):ds.t_idx(-.15)+dsb.t.size, :] = coh 70 | 71 | fit2 = FitterTorch(both2, zero_func=nt.polynomial, disp_poly_deg=3, 72 | model_coh=False, use_cuda=True)#, extra_base=base.T[..., None]) 73 | 74 | paras2, mini2 = fit2.start_lmfit([0.001, 0.4], [ 0.1, 0.3, 2, 4, 10000], 75 | True, 76 | False, 77 | False, 78 | ) 79 | res2 = mini2.leastsq() 80 | mini2.userfcn(res2.params) 81 | res2 82 | # %% 83 | fig, (ax, ax2) = proplot.subplots(nrows=2, axwidth=4, axheight=3, sharex=0) 84 | off = 0 85 | for wn in [11000, 12300, 14500, 18000]: 86 | ax.plot(fit2.tt.T[:, iso.wn_idx(wn)], fit2.model.cpu().T[:, iso.wn_idx(wn)]+off, 87 | lw=1, c='k') 88 | ax.plot(fit2.tt.T[:, iso.wn_idx(wn)], both2.wn_d(wn)+off, lw=1) 89 | 90 | ax.plot(dsbl[1].t - fit2.t_zeros[both.wn_idx(wn)], dsbl[1].wn_d(wn)+off, lw=1) 91 | 92 | ax2.plot(fit2.tt.T[:, iso.wn_idx(wn)], fit2.model.cpu().T[:, iso.wn_idx(wn)]+off, 93 | lw=1, c='k') 94 | ax2.plot(fit2.tt.T[:, iso.wn_idx(wn)], both2.wn_d(wn)+off, lw=1) 95 | 96 | ax2.plot(dsbl[1].t - fit2.t_zeros[both.wn_idx(wn)], dsbl[1].wn_d(wn)+off, lw=1) 97 | off += 15 98 | 99 | 100 | 101 | ax.format(xlim=(-.1, 1.5), xscale='linear') 102 | ax2.format(xlim=(.1, 15), xscale='log') 103 | 104 | # %% 105 | from skultrafast import plot_helpers 106 | fig, ax = proplot.subplots(width='4in', height='3in') 107 | proplot.rc['axes.labelsize'] = 'small' 108 | k = iso.wn.size 109 | print(fit.c.shape, iso.wn.size) 110 | end = -3 if fit2.model_coh else None 111 | fit2.c = fit2.c.cpu() 112 | l = ax.plot(both.wavenumbers[:k], fit2.c[:k, :end], lw=1) 113 | l2 = ax.plot(both.wavenumbers[:k], fit2.c[k:, :end], lw=1) 114 | for i, j in zip(l, l2): 115 | j.set_color(i.get_color()) 116 | leg = ['%.2f ps' % v for i, v in res2.params.items() if i[0] == 't'] 117 | if fit2.extra_base is not None: 118 | leg += ['Solvent'] 119 | ax.legend(l, leg, ncol=2) 120 | ax.format(xlim=(both.wn.min(), both.wn.max()), 121 | xlabel=plot_helpers.freq_unit, 122 | ylabel=plot_helpers.sig_label) 123 | d = (lambda x: 1e7 / x, lambda x: 1e7 / x) 124 | ax[0].dualx(d, label='Wavelength [nm]') 125 | # %% 126 | # %% 127 | ci, ci2 = lmfit.conf_interval(mini2, res2, trace=True) 128 | # %% 129 | ci 130 | 131 | # %% 132 | # %% 133 | from matplotlib import pyplot as plt, use 134 | plt.pcolormesh(fit.tt, iso.wn, fit.dev_data, cmap='turbo') 135 | plt.xlim(-0.2, 4) 136 | # %% 137 | polf = pol 138 | polf.plot.trans(12000) 139 | polf.plot.trans(12000) 140 | nt = polf.cut_freq(12000, 14000).iso.estimate_dispersion('diff', (2,)) 141 | # %% 142 | 143 | # %% 144 | plt.plot(fit.t_zeros) 145 | 146 | # %% 147 | plt.imshow(fit2.dev_data-fit2.model) 148 | # %% 149 | plt.plot(fit2.A.cpu()[50, :, 8]) 150 | plt.plot(fit2.dev_data.cpu()[50, :]) 151 | 152 | # %% 153 | -------------------------------------------------------------------------------- /tests/test_twodim.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import pytest 3 | 4 | from skultrafast.data_io import get_twodim_dataset 5 | from skultrafast.quickcontrol import QC2DSpec 6 | from skultrafast.twoD_dataset import TwoDim 7 | 8 | 9 | @pytest.fixture(scope='session') 10 | def datadir2d(tmp_path_factory): 11 | p = get_twodim_dataset() 12 | return p 13 | 14 | 15 | @pytest.fixture(scope='session') 16 | def two_d(datadir2d) -> TwoDim: 17 | info = list(Path(datadir2d).glob('*320.info')) 18 | qc = QC2DSpec(info[0]) 19 | ds = qc.make_ds()["iso"] 20 | return ds 21 | 22 | 23 | def test_saveload(two_d, tmp_path_factory): 24 | path = tmp_path_factory.mktemp('data') / 'test.npz' 25 | two_d.save_numpy(path) 26 | TwoDim.load_numpy(path) 27 | 28 | 29 | def test_select(two_d): 30 | two_d = two_d.copy() 31 | two_d = two_d.select_range((2030, 2200), (2030, 2200)) 32 | 33 | assert two_d.pump_wn.min() > 2030 34 | assert two_d.pump_wn.size > 0 35 | assert two_d.pump_wn.max() < 2200 36 | 37 | assert two_d.probe_wn.min() > 2030 38 | assert two_d.probe_wn.size > 0 39 | assert two_d.probe_wn.max() < 2200 40 | 41 | two_d = two_d.select_t_range(1) 42 | 43 | assert two_d.t.min() > 1 44 | 45 | 46 | @pytest.fixture(scope='session') 47 | def two_d_processed(two_d) -> TwoDim: 48 | two_d = two_d.copy() 49 | return two_d.select_range((2030, 2200), (2030, 2200)) 50 | 51 | 52 | def test_integrate(two_d_processed): 53 | two_d_processed.integrate_pump() 54 | 55 | 56 | def test_cls(two_d_processed): 57 | two_d = two_d_processed.copy() 58 | two_d.single_cls(3) 59 | for m in ['quad', 'fit', 'log_quad', 'skew_fit', 'nodal']: 60 | two_d.single_cls(3, method=m) 61 | 62 | 63 | def test_cls_subrange(two_d_processed): 64 | two_d = two_d_processed.copy() 65 | two_d.single_cls(3, pr_range=(2140, 2169), pu_range=(2140, 2169)) 66 | 67 | 68 | def test_all_cls(two_d_processed: TwoDim): 69 | cls_result = two_d_processed.cls() 70 | for use_const in [True, False]: 71 | for use_weights in [True, False]: 72 | cls_result.exp_fit([1], use_const=use_const, 73 | use_weights=use_weights) 74 | cls_result.exp_fit([1, 10], use_const=use_const, 75 | use_weights=use_weights) 76 | assert cls_result.exp_fit_result_ is not None 77 | cls_result.plot_cls() 78 | 79 | 80 | def test_diag(two_d_processed: TwoDim): 81 | d1 = two_d_processed.diag_and_antidiag(3) 82 | d2 = two_d_processed.diag_and_antidiag(1, offset=0) 83 | 84 | 85 | def test_savetext(two_d_processed, tmp_path_factory): 86 | two_d_processed.save_txt(tmp_path_factory.mktemp('data')) 87 | 88 | 89 | def test_twodplot_contour(two_d_processed): 90 | two_d_processed.plot.contour(1) 91 | two_d_processed.plot.contour(1, 3, 5, ) 92 | two_d_processed.plot.elp(1) 93 | 94 | 95 | def test_contour_with_cls(two_d_processed): 96 | cls_result = two_d_processed.cls() 97 | two_d_processed.plot.contour(1, 10, 30, cls_result=cls_result) 98 | 99 | 100 | def test_bg_correct(two_d_processed: TwoDim): 101 | tbg = two_d_processed.copy() 102 | tbg.background_correction((2130, 2160)) 103 | 104 | 105 | def test_psa(two_d_processed: TwoDim): 106 | two_d_processed.pump_slice_amp(3) 107 | two_d_processed.plot.psa(3) 108 | 109 | 110 | def test_integrate_reg(two_d_processed: TwoDim): 111 | two_d_processed.integrate_reg((2130, 2160), (2130, 2160)) 112 | 113 | 114 | def test_exp_fit(two_d_processed: TwoDim): 115 | res = two_d_processed.fit_das([1, 10]) 116 | assert two_d_processed.fit_exp_result_ is not None 117 | two_d_processed.fit_das([1, 10], fix_last_decay=True) 118 | assert two_d_processed.fit_exp_result_ is not None 119 | assert res.taus.size == 2 120 | 121 | 122 | def test_min_max(two_d_processed: TwoDim): 123 | minmax = two_d_processed.get_minmax(3) 124 | assert minmax['ProbeMin'] > minmax['ProbeMax'] 125 | assert minmax['PumpMin'] > minmax['PumpMax'] 126 | 127 | 128 | def test_mark_minmax(two_d_processed: TwoDim): 129 | two_d_processed.plot.mark_minmax(3) 130 | two_d_processed.plot.mark_minmax(3, marker='o') 131 | 132 | 133 | def test_data_at(two_d_processed: TwoDim): 134 | ret = two_d_processed.data_at(t=1, pump_wn=2160) 135 | assert (ret.size == two_d_processed.probe_wn.size) 136 | ret = two_d_processed.data_at(t=1, probe_wn=2160) 137 | assert (ret.size == two_d_processed.pump_wn.size) 138 | ret = two_d_processed.data_at(probe_wn=2160, pump_wn=2160) 139 | assert (ret.size == two_d_processed.t.size) 140 | ret = two_d_processed.data_at(t=1, probe_wn=2160, pump_wn=2160) 141 | assert ret.size == 1 142 | 143 | 144 | def test_plot_trans(two_d_processed: TwoDim): 145 | two_d_processed.plot.trans(2160, 2160, color='k') 146 | two_d_processed.plot.trans([2160, 2180], [2160, 2160]) 147 | l = two_d_processed.plot.trans(2160, [2160, 2160]) 148 | assert len(l) == 2 149 | 150 | 151 | def test_gaussfit(two_d_processed: TwoDim): 152 | fr = two_d_processed.fit_gauss() 153 | fr.plot_cls() 154 | 155 | 156 | def test_slice_plots(two_d_processed: TwoDim): 157 | l = two_d_processed.plot.probe_slice(2150, 0.2, 0.5) 158 | assert len(l) == 2 159 | l = two_d_processed.plot.pump_slice(2150, 0.2, 0.5) 160 | assert len(l) == 2 161 | -------------------------------------------------------------------------------- /tests/test_dataset.py: -------------------------------------------------------------------------------- 1 | """Here we mostly test if it works at all.""" 2 | from skultrafast.dataset import TimeResSpec, PolTRSpec 3 | from skultrafast.data_io import load_example 4 | import numpy as np 5 | from numpy.testing import assert_almost_equal 6 | 7 | wl, t, data = load_example() 8 | 9 | 10 | def test_integrate(): 11 | ds = TimeResSpec(wl, t, data) 12 | ds.wn_i(15000, 20000) 13 | 14 | 15 | def test_methods(): 16 | ds = TimeResSpec(wl, t, data) 17 | bds = ds.bin_freqs(300) 18 | ds2 = TimeResSpec(1e7 / wl, t, data, freq_unit='cm', disp_freq_unit='cm') 19 | bds2 = ds2.bin_freqs(50) 20 | assert (np.all(np.isfinite(bds2.data))) 21 | 22 | assert (len(bds.wavelengths) == 300) 23 | nds = ds.cut_freq(400, 600) 24 | assert (np.all(nds.wavelengths > 600)) 25 | nds = ds.cut_time(-100, 1) 26 | assert (np.all(nds.t > .99)) 27 | nds = ds.bin_times(5) 28 | assert (nds.t.size == np.ceil(ds.t.size / 5)) 29 | ds.mask_freqs([(400, 600)]) 30 | assert (np.all(ds.data.mask[:, ds.wl_idx(550)])) 31 | ds2 = ds.scale_and_shift(2, t_shift=1, wl_shift=10) 32 | assert_almost_equal(2 * ds.data, ds2.data) 33 | assert_almost_equal(ds.t + 1, ds2.t) 34 | assert_almost_equal(ds.wavelengths + 10, ds2.wavelengths) 35 | assert_almost_equal(1e7 / ds2.wavelengths, ds2.wavenumbers) 36 | 37 | 38 | def test_est_disp(): 39 | ds = TimeResSpec(wl, t, data) 40 | ds.auto_plot = False 41 | for s in ['abs', 'diff', 'gauss_diff', 'max']: 42 | ds.estimate_dispersion(heuristic=s) 43 | 44 | 45 | def test_fitter(): 46 | ds = TimeResSpec(wl, t, data) 47 | x0 = [0.1, 0.1, 1, 1000] 48 | out = ds.fit_exp(x0) 49 | 50 | 51 | def test_error_calc(): 52 | ds = TimeResSpec(wl, t, data) 53 | x0 = [0.1, 0.1, 1, 1000] 54 | out = ds.fit_exp(x0) 55 | out.calculate_stats() 56 | 57 | 58 | def test_das_plots(): 59 | ds = TimeResSpec(wl, t, data) 60 | x0 = [0.1, 0.1, 1, 1000] 61 | out = ds.fit_exp(x0) 62 | ds.plot.das() 63 | ds.plot.edas() 64 | 65 | 66 | def test_das_pol_plots(): 67 | ds = TimeResSpec(wl, t, data) 68 | pds = PolTRSpec(ds, ds) # fake pol 69 | x0 = [0.1, 0.1, 1, 1000] 70 | out = pds.fit_exp(x0) 71 | 72 | pds.plot.das() 73 | pds.plot.edas() 74 | 75 | 76 | def test_sas_pol_plots(): 77 | from skultrafast.kinetic_model import Model 78 | ds = TimeResSpec(wl, t, data) 79 | pds = PolTRSpec(ds, ds) # fake pol 80 | x0 = [0.1, 0.1, 1, 1000] 81 | out = pds.fit_exp(x0) 82 | m = Model() 83 | m.add_transition('S1', 'S1*', 'k1') 84 | m.add_transition('S1', 'zero', 'k2') 85 | pds.plot.sas(m) 86 | 87 | 88 | def test_sas(): 89 | from skultrafast.kinetic_model import Model 90 | ds = TimeResSpec(wl, t, data) 91 | x0 = [0.1, 0.1, 1, 1000] 92 | out = ds.fit_exp(x0) 93 | m = Model() 94 | m.add_transition('S1', 'S1*', 'k1') 95 | m.add_transition('S1', 'zero', 'k2') 96 | out.make_sas(m, {}) 97 | 98 | m2 = Model() 99 | m2.add_transition('S1', 'S1*', 'k1', 'qy1') 100 | m2.add_transition('S1', 'zero', 'k2') 101 | out.make_sas(m2, {'qy1': 0.5}) 102 | 103 | 104 | def test_merge(): 105 | ds = TimeResSpec(wl, t, data) 106 | nds = ds.merge_nearby_channels(10) 107 | assert (nds.wavelengths.size < ds.wavelengths.size) 108 | 109 | 110 | def test_pol_tr(): 111 | ds = TimeResSpec(wl, t, data) 112 | ds2 = TimeResSpec(wl, t, data) 113 | ps = PolTRSpec(para=ds, perp=ds2) 114 | out = ps.bin_freqs(10) 115 | assert (out.para.wavenumbers.size == 10) 116 | assert (out.perp.wavenumbers.size == 10) 117 | assert_almost_equal(out.perp.data, out.para.data) 118 | ps.subtract_background() 119 | ps.mask_freqs([(400, 550)]) 120 | 121 | assert (ps.para.data.mask[1, ps.para.wl_idx(520)]) 122 | out = ps.cut_freq(400, 550) 123 | assert (np.all(out.para.wavelengths >= 550)) 124 | assert (np.all(out.perp.wavelengths >= 550)) 125 | ps.bin_times(6) 126 | ps.scale_and_shift(1, 0.5) 127 | ps.copy() 128 | 129 | 130 | def test_plot(): 131 | ds = TimeResSpec(wl, t, data) 132 | ds = ds.bin_freqs(50) 133 | ds.plot.trans([550]) 134 | ds.plot.spec([2, 10]) 135 | ds.plot.trans_integrals((1e7 / 550, 1e7 / 600)) 136 | ds.plot.trans_integrals((1e7 / 600, 1e7 / 500)) 137 | ds.plot.trans([550], norm=1) 138 | ds.plot.trans([550], norm=1, marker='o') 139 | ds.plot.map(plot_con=0) 140 | ds.plot.svd() 141 | 142 | 143 | def test_concat(): 144 | ds = TimeResSpec(wl, t, data) 145 | ds = ds.bin_freqs(50) 146 | n = ds.wavelengths.size // 2 147 | ds1 = ds.cut_freq(ds.wavelengths[n], np.inf) 148 | ds2 = ds.cut_freq(-np.inf, ds.wavelengths[n]) 149 | dsc = ds1.concat_datasets(ds2) 150 | assert (np.allclose(dsc.data, ds.data)) 151 | pol_ds = PolTRSpec(ds, ds) 152 | a = PolTRSpec(ds1, ds1) 153 | b = PolTRSpec(ds2, ds2) 154 | pol_dsc = a.concat_datasets(b) 155 | for p in 'para', 'perp', 'iso': 156 | assert (np.allclose(getattr(pol_dsc, p).data, getattr(pol_ds, p).data)) 157 | 158 | 159 | def test_pol_plot(): 160 | ds = TimeResSpec(wl, t, data) 161 | ds = ds.bin_freqs(50) 162 | ds = PolTRSpec(para=ds, perp=ds) 163 | ds = ds.bin_freqs(50) 164 | ds.plot.trans([550]) 165 | ds.plot.spec([2, 10]) 166 | ds.plot.trans([550], norm=1) 167 | ds.plot.trans([550], norm=1, marker='o') 168 | -------------------------------------------------------------------------------- /skultrafast/base_funcs/base_functions_numba.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Numba implementation of the base matrix building functions. 4 | """ 5 | import numpy as np 6 | from numba import vectorize, njit, jit, prange 7 | import math 8 | # from lmmv 9 | sq2 = math.sqrt(2) 10 | 11 | 12 | @njit(cache=True) 13 | def _coh_gaussian(ta, w, tz): 14 | """ 15 | Models coherent artifacts proportional to a gaussian and it's first three derivatives. 16 | 17 | Parameters 18 | ---------- 19 | t: ndarray 20 | 2d - Array containing the time-coordinates 21 | w: float 22 | The assumed width/sq2 23 | tz: float 24 | The assumed time zero. 25 | 26 | Returns 27 | ------- 28 | y: ndarray (shape(t), 3) 29 | Array containing a gaussian and it the scaled derivatives, 30 | each in its own column. 31 | """ 32 | 33 | w = w / 1.4142135623730951 34 | n, m = ta.shape 35 | y = np.zeros((n, m, 3)) 36 | 37 | if tz != 0: 38 | ta = ta - tz 39 | 40 | _coh_loop(y, ta, w, n, m) 41 | #y_n = y / np.max(np.abs(y), 0) 42 | return y 43 | 44 | 45 | exp_half = np.exp(0.5) 46 | 47 | 48 | @njit(parallel=True, cache=True) 49 | def _coh_loop(y, ta, w, n, m): 50 | for i in prange(n): 51 | for j in prange(m): 52 | tt = ta[i, j] 53 | if tt / w < 3.: 54 | y[i, j, 55 | 0] = np.exp(-0.5 * (tt/w) * (tt/w)) # / (w * np.sqrt(2 * 3.14159265)) 56 | y[i, j, 1] = y[i, j, 0] * (-tt * exp_half / w) 57 | y[i, j, 2] = y[i, j, 0] * (tt*tt/w/w - 1) 58 | #y[i, j, 2] = y[i, j, 0] * (-tt ** 3 / w ** 6 + 3 * tt / w ** 4) 59 | 60 | 61 | @njit(parallel=True, fastmath=True, cache=True) 62 | def _fold_exp_and_coh(t_arr, w, tz, tau_arr): 63 | a = _fold_exp(t_arr, w, tz, tau_arr) 64 | b = _coh_gaussian(t_arr, w, tz) 65 | return a, b 66 | 67 | 68 | @njit 69 | def fast_erfc(x, cache=True): 70 | """ 71 | Calculates the erfc near zero faster than 72 | the libary function, but has a bigger error, which 73 | is not a problem for us. 74 | 75 | Parameters 76 | ---------- 77 | x: float 78 | The array 79 | 80 | Returns 81 | ------- 82 | ret: float 83 | The erfc of x. 84 | """ 85 | a1 = 0.278393 86 | a2 = 0.230389 87 | a3 = 0.000972 88 | a4 = 0.078108 89 | smaller = x < 0 90 | if smaller: 91 | x = x * -1. 92 | bot = 1 + a1*x + a2*x*x + a3*x*x*x + a4*x*x*x*x 93 | ret = 1. / (bot*bot*bot*bot) 94 | 95 | if smaller: 96 | ret = -ret + 2. 97 | 98 | return ret 99 | 100 | 101 | @njit(fastmath=True, parallel=True, cache=True) 102 | def folded_fit_func(t, tz, w, k): 103 | """ 104 | Returns the value of a folded exponentials. 105 | Employs some domain checking for making the calculation. 106 | 107 | Parameters 108 | ---------- 109 | t: float 110 | The time. 111 | tz: float 112 | Timezero. 113 | w: 114 | Width of the gaussian system response. 115 | k: 116 | rate of the decay. 117 | """ 118 | t = t - tz 119 | if t < -5. * w: 120 | return 0. 121 | elif t < 5. * w: 122 | #print -t/w + w*k/2., w, k, t 123 | return np.exp(k * (w*w*k/4.0 - t)) * 0.5 * fast_erfc(-t / w + w*k/2.) 124 | elif t > 5. * w: 125 | return np.exp(k * (w * w * k / (4.0) - t)) 126 | 127 | 128 | @njit(cache=True) 129 | def _fold_exp(t_arr, w, tz, tau_arr): 130 | """ 131 | Returns the values of the folded exponentials for given parameters. 132 | 133 | Parameters 134 | ---------- 135 | t_arr: ndarray(N, M) 136 | Array containing the time-coordinates 137 | w: float 138 | The assumed width/sq2 139 | tz: float 140 | The assumed time zero. 141 | tau_arr: ndarray(K) 142 | The M-decay rates. 143 | 144 | Returns 145 | ------- 146 | y: ndarray(N, M, K) 147 | Folded exponentials for given taus. 148 | """ 149 | n, m = t_arr.shape 150 | if w != 0: 151 | l = tau_arr.size 152 | out = np.empty((l, m, n)) 153 | _fold_exp_loop(out, tau_arr, t_arr, tz, w, l, m, n) 154 | return out.T 155 | else: 156 | k = -1 / tau_arr 157 | out = np.exp((t_arr.reshape(n, m, 1) - tz) * k.reshape(1, 1, -1)) 158 | return out 159 | 160 | 161 | @njit(fastmath=True, cache=True) 162 | def _fold_exp_loop(out, tau_arr, t_arr, tz, w, l, m, n): 163 | for tau_idx in range(l): 164 | k = 1 / tau_arr[tau_idx] 165 | for j in range(m): 166 | for i in range(n): 167 | t = t_arr[i, j] - tz 168 | if t < -5. * w: 169 | ret = 0 170 | elif t < 5. * w: 171 | ret = np.exp(k * (w*w*k/4.0 - t)) * 0.5 * fast_erfc(-t / w + w*k/2.) 172 | elif t > 5. * w: 173 | ret = np.exp(k * (w * w * k / (4.0) - t)) 174 | out[tau_idx, j, i] = ret 175 | 176 | 177 | #jit(f8[:, :, :], [f8[:, :], f8, f8, f8[:]]) 178 | def _exp(t_arr, w, tz, tau_arr): 179 | """ 180 | Returns the values of exponentials for given parameters. 181 | 182 | Parameters 183 | ---------- 184 | t_arr: ndarray(N, M) 185 | Array containing the time-coordinates 186 | w: float 187 | The assumed width/sq2, not used. 188 | tz: float 189 | The assumed time zero. 190 | tau_arr: ndarray(K) 191 | The M-decay rates. 192 | 193 | Returns 194 | ------- 195 | y: ndarray(N,M, K) 196 | Exponentials for given taus and t_array. 197 | """ 198 | rates = 1 / tau_arr[:, None, None] 199 | if not tz == 0: 200 | t_arr -= tz 201 | return np.exp(-rates * t_arr.T[None, ...]).T 202 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. linkcheck to check all external links for integrity 37 | echo. doctest to run all doctests embedded in the documentation if enabled 38 | goto end 39 | ) 40 | 41 | if "%1" == "clean" ( 42 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 43 | del /q /s %BUILDDIR%\* 44 | goto end 45 | ) 46 | 47 | if "%1" == "html" ( 48 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 49 | if errorlevel 1 exit /b 1 50 | echo. 51 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 52 | goto end 53 | ) 54 | 55 | if "%1" == "dirhtml" ( 56 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 57 | if errorlevel 1 exit /b 1 58 | echo. 59 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 60 | goto end 61 | ) 62 | 63 | if "%1" == "singlehtml" ( 64 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 68 | goto end 69 | ) 70 | 71 | if "%1" == "pickle" ( 72 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished; now you can process the pickle files. 76 | goto end 77 | ) 78 | 79 | if "%1" == "json" ( 80 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished; now you can process the JSON files. 84 | goto end 85 | ) 86 | 87 | if "%1" == "htmlhelp" ( 88 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can run HTML Help Workshop with the ^ 92 | .hhp project file in %BUILDDIR%/htmlhelp. 93 | goto end 94 | ) 95 | 96 | if "%1" == "qthelp" ( 97 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 98 | if errorlevel 1 exit /b 1 99 | echo. 100 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 101 | .qhcp project file in %BUILDDIR%/qthelp, like this: 102 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\skultrafast.qhcp 103 | echo.To view the help file: 104 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\skultrafast.ghc 105 | goto end 106 | ) 107 | 108 | if "%1" == "devhelp" ( 109 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 110 | if errorlevel 1 exit /b 1 111 | echo. 112 | echo.Build finished. 113 | goto end 114 | ) 115 | 116 | if "%1" == "epub" ( 117 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 118 | if errorlevel 1 exit /b 1 119 | echo. 120 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 121 | goto end 122 | ) 123 | 124 | if "%1" == "latex" ( 125 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 129 | goto end 130 | ) 131 | 132 | if "%1" == "text" ( 133 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The text files are in %BUILDDIR%/text. 137 | goto end 138 | ) 139 | 140 | if "%1" == "man" ( 141 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 145 | goto end 146 | ) 147 | 148 | if "%1" == "texinfo" ( 149 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 150 | if errorlevel 1 exit /b 1 151 | echo. 152 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 153 | goto end 154 | ) 155 | 156 | if "%1" == "gettext" ( 157 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 158 | if errorlevel 1 exit /b 1 159 | echo. 160 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 161 | goto end 162 | ) 163 | 164 | if "%1" == "changes" ( 165 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 166 | if errorlevel 1 exit /b 1 167 | echo. 168 | echo.The overview file is in %BUILDDIR%/changes. 169 | goto end 170 | ) 171 | 172 | if "%1" == "linkcheck" ( 173 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 174 | if errorlevel 1 exit /b 1 175 | echo. 176 | echo.Link check complete; look for any errors in the above output ^ 177 | or in %BUILDDIR%/linkcheck/output.txt. 178 | goto end 179 | ) 180 | 181 | if "%1" == "doctest" ( 182 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 183 | if errorlevel 1 exit /b 1 184 | echo. 185 | echo.Testing of doctests in the sources finished, look at the ^ 186 | results in %BUILDDIR%/doctest/output.txt. 187 | goto end 188 | ) 189 | 190 | :end 191 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/skultrafast.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/skultrafast.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/skultrafast" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/skultrafast" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /skultrafast/kubo_fitting/backend_numpy.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Protocol 3 | from numpy import exp, pi, sqrt, zeros, ndarray, asarray, array, fft, rot90, flip, roll 4 | 5 | 6 | __all__ = ['fftshift2d', 'g', 'response_functions_1D', 'response_functions_2D'] 7 | 8 | 9 | def fftshift2d(x: ndarray) -> ndarray: 10 | """ 11 | Parameters 12 | ---------- 13 | x : ndarray 14 | The input array. 15 | 16 | Returns 17 | ------- 18 | ndarray 19 | The shifted array, at least two dims. 20 | 21 | """ 22 | return fft.fftshift(x, axes=(-1, -2)) 23 | 24 | 25 | def g(t: ndarray, dom: ndarray = 5, lam: ndarray = array([1])) -> ndarray: 26 | """ 27 | Generalized kubo function of the form: 28 | 29 | .. math:: 30 | g(t) = \\frac{\\mathrm{dom}}{\\lambda^2} \\left( e^{-\\lambda t} - 1 + \\lambda t \\right 31 | 32 | Parameters 33 | ---------- 34 | t : ndarray 35 | The time points to evaluate the kubo function. 36 | dom : ndareray, optional 37 | Amplitude of the decay. Has to be the same shape as lam, by default 5. 38 | lam : ndarray, optional 39 | The spectral density values, by default array([1]). 40 | 41 | Returns 42 | ------- 43 | ndarray 44 | The correlation function evaluated at the given time points. 45 | 46 | """ 47 | lam = asarray(lam) 48 | dom = asarray(dom) 49 | if t.ndim == 1: 50 | lam = lam[:, None] 51 | dom = dom[:, None] 52 | else: 53 | lam = lam[:, None, None, None] 54 | dom = dom[:, None, None, None] 55 | t = t[None, ...] 56 | s = (dom / lam)**2 * (exp(-lam * t) - 1 + lam*t) 57 | return s.sum(0) 58 | 59 | 60 | def response_functions_1D(T1: ndarray, 61 | omega: float = 0, 62 | dom: ndarray = array([5]), 63 | lam: ndarray = array([1])) -> ndarray: 64 | """ 65 | Generate the response function for 1D IR. 66 | 67 | Parameters 68 | ---------- 69 | T1 : ndarray 70 | The time points for the 1D IR. 71 | omega : float, optional 72 | The frequency shift, by default 0. 73 | dom : ndarray, optional 74 | The spectral density domain parameter, by default 5. 75 | lam : ndarray, optional 76 | The spectral density values, by default array([1]). 77 | 78 | Returns 79 | ------- 80 | ndarray 81 | The response function for 1D IR. 82 | 83 | """ 84 | lam = asarray(lam) 85 | osc = exp(-1j * omega * (-T1)) 86 | FID = osc * exp(-g(T1, dom, lam)) 87 | FID[0] *= 0.5 88 | return FID 89 | 90 | 91 | def response_functions_2D( 92 | T1: ndarray, 93 | T2: ndarray, 94 | T3: ndarray, 95 | omega: float, 96 | dom: ndarray = array([5]), 97 | anh: float = 5, 98 | lam: ndarray = array([1]), 99 | ) -> tuple[ndarray, ndarray]: 100 | """ 101 | Generate the response function for 2D IR. 102 | We assume T1 and T3 are the same for performance reasons. 103 | 104 | Parameters 105 | ---------- 106 | T1 : ndarray 107 | The time points for the first dimension of 2D IR (pulse seperation). 108 | T2 : ndarray 109 | The time points for waiting times. 110 | T3 : ndarray 111 | The time points for the second dimension of 2D IR (spectrum). 112 | omega : float 113 | The frequency shift. 114 | dom : float, optional 115 | Delta omega, the 116 | anh : float, optional 117 | The anharmonicity parameter, by default 5. 118 | lam : ndarray, optional 119 | The kubo decay rates, by default np.array([1]). 120 | 121 | Returns 122 | ------- 123 | tuple[ndarray, ndarray] 124 | The two response functions for 2D IR. 125 | 126 | """ 127 | lam = asarray(lam) 128 | T1 = T1[None, :, :] 129 | T2 = T2[:, None, None] 130 | T3 = T3[None, :, :] 131 | 132 | gT1 = g(T1, dom, lam) 133 | gt2 = g(T2, dom, lam) 134 | gT3 = gT1.transpose(0, 2, 1) 135 | gT1t2 = g(T1 + T2, dom, lam) 136 | gt2T3 = gT1t2.transpose(0, 2, 1) 137 | ga = g(T1 + T2 + T3, dom, lam) 138 | pop = (2 - 2 * exp(-1j * anh * T3)) 139 | if omega != 0: 140 | osc = exp(-1j * omega * (-T1 + T3)) 141 | osc2 = exp(-1j * omega * (T1 + T3)) 142 | else: 143 | osc = 1 144 | osc2 = 1 145 | 146 | # Lines fromt the 2D IR book example 147 | # R_r=exp(-1i*w_0.*(-T1+T3)).*exp(-g(T1)+g(t2)-g(T3)-g(T1+t2)-g(t2+T3)+g(T1+t2+T3)).*(2-2.*exp(-sqrt(-1)*Delta.*T3)); 148 | # R_nr=exp(-1i*w_0.*(T1+T3)).*exp(-g(T1)-g(t2)-g(T3)+g(T1+t2)+g(t2+T3)-g(T1+t2+T3)).*(2-2.*exp(-sqrt(-1)*Delta.*T3)); 149 | # Reordered for better numerical stability. 150 | 151 | R_r = osc * exp(-gT1 - gT3 - gT1t2 - gt2T3 + gt2 + ga) * pop 152 | R_nr = osc2 * exp(-(gT1 + gt2 + gT3 + ga) + gT1t2 + gt2T3) * pop # type: ignore 153 | 154 | # Multiply by 0.5 to account for the fact that we are only using half of the 155 | # response function. This is because we are only using half of the spectrum 156 | # and half of the pulse seperation. 157 | 158 | R_r[:, :, 0] *= 0.5 159 | R_r[:, 0, :] *= 0.5 160 | R_nr[:, :, 0] *= 0.5 161 | R_nr[:, 0, :] *= 0.5 162 | return R_r, R_nr 163 | 164 | 165 | def response_to_spec_2D(R_r, R_nr): 166 | # R_nr, R_r = response_functions(T1, T2, T3, ω) 167 | fR_nr, fR_r = fft.fft2(R_nr), fft.fft2(R_r) 168 | fR_nr, fR_r = fftshift2d(fR_nr), fftshift2d(fR_r) 169 | R = rot90(fR_nr, 2, axes=(1, 2)) 170 | R += flip(roll(fR_r, -1, axis=1), axis=(2, )) 171 | R = R.real 172 | return R 173 | 174 | 175 | def response_to_spec_1D(R): 176 | fR = fft.fft(R) 177 | fR = fft.fftshift(fR) 178 | R = R.real 179 | return R 180 | 181 | 182 | @dataclass 183 | class KuboBackend(Protocol): 184 | T1: ndarray 185 | T2: ndarray 186 | T3: ndarray 187 | 188 | def g(self, t, dom, lam) -> ndarray: 189 | ... 190 | 191 | def response_functions_1D(self, omega=0, dom=5, lam=array([1])) -> ndarray: 192 | ... 193 | 194 | def response_functions_2D(self, 195 | omega, 196 | dom=5, 197 | anh=5, 198 | lam=array([1])) -> tuple[ndarray, ndarray]: 199 | ... 200 | 201 | def response_to_spec_2D(self, R_r, R_nr): 202 | ... 203 | -------------------------------------------------------------------------------- /docs/pub_figures.rst: -------------------------------------------------------------------------------- 1 | .. role:: bi 2 | :class: bolditalic 3 | 4 | 5 | Creating publication-ready figures 6 | ================================== 7 | 8 | While the ad-hoc settings of skultrafast are generally fine, some 9 | additional adjustment may be necessary to create figures for an article. 10 | Here, in this tutorial will the most common steps. If you think anything 11 | is missing or wrong, don't hesitate to submit an issue or pull-request. 12 | If you looking for way to do something, please also have a look at the 13 | `matplotlib documentation `__. 14 | It contains many examples and multiple tutorials. 15 | 16 | \*\ **tl;dr** 17 | 18 | Call ``plot_helpers.enable_style()`` before creating your figures. 19 | 20 | Checklist 21 | --------- 22 | 23 | - [ ] Figure size set according to journal guidelines 24 | - [ ] Font selected according to journal guidelines 25 | - [ ] (optionally) mathfont selected accordingly to the main font 26 | - [ ] Distinguishable colors 27 | - [ ] Vector format used 28 | - [ ] Set rasterized ``True`` for large images and colermaps in the figure 29 | - [ ] No part of the figure is clipped 30 | - [ ] Consistent frequency axes: Identical units and directions 31 | 32 | [TOC] 33 | 34 | Figure size 35 | ----------- 36 | 37 | **Use the journal supplied figure size.** 38 | 39 | The most important step is to get the figure size correct. By default, 40 | matplotlib creates a (4 in x 3 in) figure (1in = 2.54 cm) which is a reasonable 41 | size for the screen but unsuitably large on paper. If the figure is just 42 | manually scaled down after creation, the font-size is too small and other small 43 | details like ticks may be not recognizable anymore. 44 | 45 | Instead, most journals author guidelines give values for the maximum width of 46 | figures, use them. It is not always necessary to use the full width, sometimes 47 | its okay to leave some free space. As a rule of thump, a single column figure 48 | should have width of around 3 in, a two column figure about 6.5". 49 | 50 | If the figure is too small on screen, which is often the case in the 51 | Jupyter-notebook, resist the urge to make the figure larger. Instead, increase 52 | the dpi of the figure. The matplotlib defaults of 100 dpi are quite low compared 53 | to modern screens. Desktop-screens often have around 144 dpi and laptop screens 54 | can get up 230 dpi. 55 | 56 | So far, we only discussed the width of a figure, what about the height? 57 | 58 | Figure composition 59 | ------------------ 60 | 61 | **Try to create composite figures by code.** 62 | 63 | Most figures in journals are multi-panel figures. There are a two ways to create 64 | such figures: either we create the single figures first and do the composition 65 | of the figures with a vector graphics program like *Inkscape* or we create the 66 | the multi-panal graphic directly in matplotlib. So which one should be used? 67 | 68 | Using a vector graphics program has several advantages: *wysiwyg*, easy 69 | fine-tuning and mouse support. But this is bought with some serve drawbacks: if 70 | you need change on of the sub-figures, you need to adjust the rest of the figure 71 | as well. Also, if you have to change fonts due to a resubmission, you have to 72 | apply it to both, the single figures and later added graphical elements. Also, 73 | version control not commonly supported for graphics format and exactly 74 | recreating a figure requires a lot manual steps. 75 | 76 | Hence, if possible, do the whole figure in matplotlib. Initially, this can 77 | result if a lot manual adjustment for things like text labels. This can be often 78 | circumvented by using the text alignment settings and changing the 79 | transformation. 80 | 81 | So how to built a more complex layout ? Matplotlib offers multiple ways to 82 | layout figures. The most flexible way is use a `gridspec 83 | `__. For 84 | simple cases, the ``plt.sublots`` function. It supports the sharing of an axis 85 | and also takes Avoid the ``plt.subplot`` function. 86 | 87 | Font selection 88 | -------------- 89 | 90 | **Select a font identical or similar to the font of the publication. 91 | When in doubt, choose Arial or Helvetica.** 92 | 93 | The default font of matplotlib is DejaVu Sans. Its advantage it is free license 94 | and its great coverage of Unicode glyphs. But it looks quite unique and hence 95 | conflicts with the rest of document. Therefore I strongly advocate to change the 96 | font. Most journals have their own preference. If the journal does not propose a 97 | font, I suggest to use *Arial, Helvetica* or *TeX Gyre Heros*. While this may 98 | lead to an boring figure, it also looks professional. Most journals have 99 | guidelines for the font size in figures, use them by directly adjusting the 100 | rcParams. For additional fine adjustment try to use relative font sizes. 101 | 102 | Colors 103 | ------ 104 | 105 | **Make your colors distinguishable.** 106 | 107 | The choice of colors is matter of preferences, hence different people 108 | like different color-cycles. In general, the default matplotlib 109 | color-cycle works quite well. Still many people prefer other colors, 110 | e.g. the matlab color cycle, or you want to go for a more unique look. 111 | As long as you choose easily distinguishable colors, your are fine. 112 | Remember that figures are quite often used in presentations, hence to 113 | avoid remaking figures don't use bright colors on a white background. In 114 | general, the color should be chosen with contrast in mind. 115 | 116 | The color cycle is set via the axes cycler. See the `matplotlib 117 | documentation `__. 118 | Note that matploblib also supports xkcd ans CSS `color 119 | names `__. 120 | 121 | File Format 122 | ----------- 123 | 124 | **Use vector formats. Rasterize large artists.** 125 | 126 | Luckily, most publishers accept or require the figures in a vector format. 127 | Therefore save the figure as an svg oder pdf file. If you somehow must supply a 128 | pixel format, use png and make sure the dpi is set to at least 300. 129 | 130 | In general, using a vector format also reduces the file size. If the plot 131 | contains complex colormaps or images, it is appropriate to rasterize the 132 | corresponding artists. That means that these are embedded as an pixel graphic 133 | within the vector figure, thus decreasing the file size and the rendering time 134 | enormously. 135 | 136 | 137 | -------------------------------------------------------------------------------- /skultrafast/base_funcs/pytorch_fitter.py: -------------------------------------------------------------------------------- 1 | # %% 2 | from lmfit.parameter import Parameters 3 | from numpy.lib.function_base import trim_zeros 4 | from skultrafast import plot_helpers 5 | 6 | import torch, attr, math 7 | import numpy as np 8 | from skultrafast.dataset import TimeResSpec 9 | from scipy.optimize import least_squares 10 | import lmfit 11 | from typing import Optional, Callable 12 | exp_half = math.exp(1 / 2.) 13 | 14 | @torch.jit.script 15 | def lstsq(b, y, alpha: float=0.1): 16 | """ 17 | Batched linear least-squares for pytorch with optional L1 regularization. 18 | 19 | Parameters 20 | ---------- 21 | 22 | b : shape(L, M, N) 23 | y : shape(L, M) 24 | 25 | Returns 26 | ------- 27 | tuple of (coefficients, model, residuals) 28 | 29 | """ 30 | bT = b.transpose(-1, -2) 31 | AA = torch.bmm(bT, b) 32 | 33 | if alpha != 0: 34 | diag = torch.diagonal(AA, dim1=1, dim2=2) 35 | diag += alpha 36 | RHS = torch.bmm(bT, y[:, :, None]) 37 | X, LU = torch.solve(RHS, AA) 38 | fit = torch.bmm(b, X)[..., 0] 39 | res = y - fit 40 | return X[..., 0], fit, res 41 | 42 | @torch.jit.script 43 | def make_base(tt, w, tau, model_coh: bool=False) -> torch.Tensor: 44 | """ 45 | Calculates the basis for the linear least squares problem 46 | 47 | Parameters 48 | ---------- 49 | tt : ndarry 50 | 2D-time points 51 | w : float 52 | System response 53 | tau : ndarray 54 | 1D decay times 55 | model_coh : bool 56 | If true, appends a gaussian and its first two 57 | dervitates to model coherent behavior at time-zero. 58 | 59 | 60 | Returns 61 | ------- 62 | [type] 63 | [description] 64 | """ 65 | 66 | k = 1 / (tau[None, None, ...]) 67 | 68 | t = (tt)[..., None] 69 | scaled_tt = tt / w 70 | if False: 71 | A = torch.exp(-k * tt) 72 | else: 73 | nw = w[:, None] 74 | A = 0.5 * torch.erfc(-t / nw + nw * k / (2.0)) 75 | A *= torch.exp(k * (nw * nw * k / (4.0) - t)) 76 | 77 | if model_coh: 78 | exp_half = torch.exp(0.5) 79 | scaled_tt = tt / w 80 | coh = torch.exp(-0.5 * scaled_tt * scaled_tt) 81 | coh = coh[:, :, None].repeat((1, 1, 3)) 82 | coh[..., 1] *= (-scaled_tt * exp_half) 83 | coh[..., 2] *= (scaled_tt - 1) 84 | A = torch.cat((A, coh), dim=-1) 85 | 86 | #if torch.isnan(A).any(): 87 | # print(A) 88 | torch.nan_to_num(A, out=A) 89 | return A 90 | 91 | 92 | @attr.s(auto_attribs=True) 93 | class FitterTorch: 94 | dataset: TimeResSpec = attr.ib() 95 | buf_dataset: Optional[TimeResSpec] = None 96 | zero_func: Callable = attr.ib(lambda x: np.zeros_like(x)) 97 | done_eval: bool = attr.ib(False) 98 | use_cuda: Optional[bool] = attr.ib(None) 99 | disp_poly_deg: int = attr.ib(2) 100 | sigma_deg: int = attr.ib(1) 101 | model_coh: bool = attr.ib(False) 102 | extra_base: Optional[np.ndarray] = attr.ib(None) 103 | 104 | def __attrs_post_init__(self): 105 | ds = self.dataset 106 | self.dev_data = torch.from_numpy(ds.data.T) 107 | if self.extra_base is not None: 108 | self.extra_base = torch.from_numpy(self.extra_base) 109 | 110 | if self.use_cuda: 111 | self.dev_data = self.dev_data.cuda() 112 | if self.extra_base is not None: 113 | self.extra_base.cuda() 114 | 115 | def eval(self, tt, w, tau, buffer=False): 116 | """ 117 | Evaluates a model for given arrays 118 | 119 | Parameters 120 | ---------- 121 | tt : ndarray 122 | Contains the delay-times, should have the same shape as the data. 123 | w : float 124 | The IRF width. 125 | tau : ndarray 126 | Contains the decay times. 127 | """ 128 | 129 | tt = torch.from_numpy(tt) 130 | tau = torch.from_numpy(tau) 131 | w = torch.from_numpy(w)[:, None] 132 | if self.use_cuda: 133 | tt = tt.cuda() 134 | tau = tau.cuda() 135 | w = w.cuda() 136 | tau = tau.cuda() 137 | 138 | A = make_base(tt, w, tau, self.model_coh) 139 | if self.extra_base is not None: 140 | if self.use_cuda: 141 | self.extra_base = self.extra_base.cuda() 142 | 143 | A = torch.cat((A, self.extra_base), dim=2) 144 | X, fit, res = lstsq(A, self.dev_data) 145 | self.A = A 146 | self.done_eval = True 147 | self.c = X 148 | self.model = fit 149 | self.residuals = res 150 | return X, fit, res 151 | 152 | def fit_func(self, x): 153 | ds = self.dataset 154 | self.disp_coefs = x[:self.disp_poly_deg] 155 | taus = x[self.disp_poly_deg:-self.sigma_deg] 156 | w_coefs = x[-self.sigma_deg:] 157 | x = ds.wavenumbers 158 | xn = 2*(x - x.min()) / x.ptp()-1 159 | self.t_zeros = np.poly1d(self.disp_coefs)(xn) 160 | if len(w_coefs) > 1: 161 | self.w = np.poly1d(w_coefs)(xn) 162 | self.w = np.maximum(self.w, 0.01) 163 | else: 164 | self.w = w_coefs 165 | self.tt = np.subtract.outer(ds.t, self.t_zeros).T 166 | c, model, res = self.eval(self.tt, self.w, taus, True) 167 | return res.cpu().numpy().ravel() 168 | 169 | 170 | def start_lmfit(self, 171 | w, 172 | taus, 173 | fix_last_tau=False, 174 | fix_width=False, 175 | fix_disp=False, 176 | disp_params=None, 177 | least_squares_kw=None): 178 | 179 | ds = self.dataset 180 | if disp_params is None: 181 | time_zeros = self.zero_func(ds.wavenumbers) 182 | x = ds.wavenumbers 183 | xn = (x - x.min()) / x.ptp() 184 | disp_guess = np.polyfit(xn, time_zeros, self.disp_poly_deg - 1) 185 | else: 186 | disp_guess = disp_params 187 | 188 | paras = lmfit.Parameters() 189 | for i, p in enumerate(disp_guess): 190 | paras.add('p%d' % i, value=p, vary=not fix_disp) 191 | for i, p in enumerate(taus): 192 | fixed = fix_last_tau & (i == len(taus) - 1) 193 | paras.add('tau_%d' % i, min=0.01, value=p, vary=not fixed) 194 | self.sigma_deg = len(w) 195 | for i, p in enumerate(w): 196 | paras.add('w%d' % i, value=p, vary=not fix_disp) 197 | 198 | def fix_func(x: lmfit.Parameters): 199 | x = np.array(x) 200 | return self.fit_func(x) 201 | 202 | mini = lmfit.Minimizer(fix_func, paras) 203 | return paras, mini 204 | 205 | 206 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_2d_signal_calculation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Signal processing of a 2D-IR experiment 3 | ======================================= 4 | 5 | In this example we will show how to process a 2D-IR signal, starting at the raw data. 6 | The setup uses a pump-probe configuration with a delay stage for the pump pulse. 7 | A AOM is used to generate the double pulse and adjust the phases. 8 | 9 | We use a 4-step phase cycle to reduce scatter and background. Also we use a 10 | roating frame to recude the number of necessary data points. 11 | """ 12 | 13 | # %% 14 | # First the necessary imports. 15 | import numpy as np 16 | import matplotlib.pyplot as plt 17 | import h5py as h5 18 | 19 | from skultrafast.plot_helpers import enable_style 20 | 21 | enable_style() 22 | 23 | plt.rcParams["pcolor.shading"] = "gouraud" 24 | # %% The data is stored in a HDF5 file. We will use the h5py library to read the 25 | # data. Not that file also contains partly processed data, however we will start 26 | # from the raw data. 27 | 28 | fpath = r"D:\boxup\skultrafast\skultrafast\examples\data\24-02-22 12_07 2D SCN5_pXylol - Copy (2).messpy" 29 | 30 | with h5.File(fpath, "r") as f: 31 | # The data is stored in the group 'data'. We will extract the data and the axes. 32 | t1 = f["t1"][:] # Inter Pump-pump delay axis 33 | t2 = f["t2"][:] # Pump-Probe delay axis, also called waiting time 34 | wl = f["wl"][:] # Probe Wavelength axis, given by the spectrometer and the detector 35 | 36 | 37 | # %% 38 | # Our setup uses three detector lines, two probe lines and one reference line. 39 | # Here, we will only use one Probe line. Each sub-group contains the data for 40 | # a single waiting time. Each singl waiting time contains the data for all 41 | # scans. 42 | 43 | with h5.File(fpath, "r") as f: 44 | frames = f["frames/Probe1"] # The data itself from the first probe. 45 | n_wt = len(frames) 46 | 47 | # Lets look at the data of the second waiting time and the first scan. 48 | # This should correspond to 0.3 ps. 49 | data = frames["10"]["0"][:] 50 | n_scans = len(frames["2"]) 51 | 52 | # %% 53 | # Lets compara the shape of the data with the shape of the axes. 54 | print("data", data.shape, "t1", t1.shape, "wl", wl.shape) 55 | 56 | # %% Notice that the second axis of the data is four times the length of the t1 57 | # axis. This is due to the 4-step phase cycle, where we record the signal for 58 | # each delay in four different phase cycles. Currently, the data are the raw 59 | # pixel values averaged over some repeated measurements. Looking at the data, we 60 | # do not see much, since the pump-induced change is very small compared to the 61 | # probe. 62 | 63 | fig, ax = plt.subplots() 64 | ax.imshow(data, aspect="auto") 65 | 66 | fig, (ax, ax2) = plt.subplots(2, figsize=(4, 3), constrained_layout=True) 67 | n = 58 68 | ax.plot(data[n], label="Channel %d, %d nm" % (n, wl[n])) 69 | 70 | ax.legend() 71 | ax2.plot(wl, data[:, 0], label="Channel 1, t1 = %.1f fs" % t1[0]) 72 | ax2.plot(wl, data[:, 1], label="Channel 1, t1 = %.1f fs" % t1[0]) 73 | ax2.plot(wl, data[:, 2], label="Channel 1, t1 = %.1f fs" % t1[0]) 74 | ax2.axvline(wl[58], color="k", ls="--") 75 | ax2.legend() 76 | ax2.set_xlabel("Wavelength [nm]") 77 | fig.supylabel("Pixel Count [a.u.]") 78 | 79 | # %% 80 | # In the used cycling scheme, the relative phase of the pump pulse is changed in 81 | # each cycle, hence the signal is given by the difference of the signal in the 82 | # two phases. We also modulate the phase relative to the probe pulse by pi. 83 | # So the signal is given by the difference of the signal in the two phases and 84 | # summed up over two the two phases. This gives us the inferogram. 85 | 86 | # Sig = (S1 - S2) + (S3 - S4) 87 | 88 | # Note that this is the linear approximation of the signal. The actual signal 89 | # is the log of the ratio of the signal in the two phases. 90 | sig = np.log(data[:, ::4] / data[:, 1::4]) #+ np.log1p((data[:, 2::4] / data[:, 3::4])) 91 | 92 | # This also has to be dived by mean, since the signal is the relative change. 93 | sig = sig / np.mean(sig, axis=1)[:, None] 94 | 95 | 96 | 97 | fig, ax = plt.subplots() 98 | ax.pcolormesh(t1, wl, sig, shading="gouraud") 99 | 100 | # %% 101 | # Since this is a little bit noisy, lets do the same for the average of all scans. 102 | # First we average over the scans and then calculate the signal. 103 | 104 | with h5.File(fpath, "r") as f: 105 | frames = f["frames/Probe1"] # The data itself from the first probe. 106 | n_scans = len(frames) 107 | 108 | # Lets look at the data of the fith waiting time and the first scan. 109 | # This should correspond to 0.4 ps. 110 | data_mean = np.mean([frames["%d" % i]["2"][:] for i in range(n_scans)], axis=0) 111 | 112 | # If optical resolution is less than 1 pixel, we can smooth the data Along 113 | # the wavelength data to reduce noise. Here we use the savgol filter. 114 | from scipy.signal import savgol_filter 115 | data_mean = savgol_filter(data_mean, 11, 5, axis=0) 116 | 117 | diff1 = (data_mean[:, ::4] - data_mean[:, 1::4]) 118 | diff2 = (data_mean[:, 2::4] - data_mean[:, 3::4]) 119 | diffm = data_mean.mean(1) 120 | 121 | sig_mean = np.log1p(diff1 / diffm[:, None]) + np.log1p(diff2 / diffm[:, None]) 122 | 123 | fig, ax = plt.subplots() 124 | ax.pcolormesh(t1, wl, sig_mean, shading="gouraud", cmap="RdBu_r") 125 | 126 | fig, (ax, ax2) = plt.subplots(2, figsize=(4, 3), constrained_layout=True) 127 | n = 58 128 | ax.plot(sig_mean[n], label="Channel %d, %d nm" % (n, wl[n])) 129 | ax.legend() 130 | ax2.plot(wl, sig_mean[:, 0], label="Channel 1, t1 = %.1f fs" % t1[0]) 131 | ax2.plot(wl, sig_mean[:, 1], label="Channel 1, t1 = %.1f fs" % t1[1]) 132 | ax2.plot(wl, sig_mean[:, 2], label="Channel 1, t1 = %.1f fs" % t1[2]) 133 | ax2.legend() 134 | 135 | # %% 136 | # Now we a have a nice inferogram. The next step is to do a Fourier transform. 137 | # We will use the numpy fft module for this. 138 | # The fourier transform is done along the t1 axis, since this is the axis that 139 | # is modulated by the pump pulse. 140 | 141 | zero_pad = 2 142 | sig_mean[:, 0] *= 0.5 143 | sig_ft = np.fft.fftshift(np.fft.fft(sig_mean, axis=1, n=zero_pad * sig_mean.shape[1]), 144 | axes=1) 145 | 146 | # The frequency axis is given by the inverse of the time axis. 147 | # The zero frequency is in the middle of the axis. 148 | 149 | freq = np.fft.fftshift(np.fft.fftfreq(sig_ft.shape[1], np.diff(t1)[0])) 150 | 151 | # We want to plot the frequency in cm^-1, so we convert the frequency axis. 152 | # Currently the frequency is in rad/ps, so we convert it to cm-1. 153 | 154 | freq_cm = freq * 2 * np.pi * 33.35641 # 1 ps = 33.35641 cm^-1 155 | 156 | fig, ax = plt.subplots() 157 | from matplotlib.colors import TwoSlopeNorm 158 | ax.pcolormesh(freq_cm, wl, sig_ft.real, shading="gouraud", cmap="RdBu_r", norm=TwoSlopeNorm(0)) 159 | ax.set_xlabel("Frequency [cm^-1]") 160 | # %% 161 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_compart_modeling.py: -------------------------------------------------------------------------------- 1 | """ 2 | Comparment Modelling 3 | ==================== 4 | 5 | Here in this tutorial, we will use skultrafast to do global analysis. We are 6 | using the approach as presented by van Stokkum 7 | (doi:10.1016/j.bbabio.2004.04.01). 8 | 9 | The general outline is as follows: 10 | 11 | 1. Generate a decay associated spectrum 12 | 2. Build the transfer matrix K, choosing starting concentrations j 13 | 3. Generate transformation matricies from K and j 14 | 4. Apply them to the basis vectors of the DAS 15 | 16 | Here we assume step 1 is already done. skultrafast has a module to help with step 2. 17 | For that we need the `Model` class. Please note that the module is quite barebones 18 | and may still have bugs. 19 | """ 20 | # sphinx_gallery_thumbnail_number = 3 21 | # %% 22 | from skultrafast import dataset, data_io, plot_helpers 23 | import numpy as np 24 | from matplotlib import pyplot as plt 25 | from skultrafast.kinetic_model import Model 26 | 27 | model = Model() 28 | 29 | # %% 30 | # To build the transfer matrix, we have to add transitions. The if one the 31 | # compartments is not part of the model it will be added automatically. The 32 | # following line means that the model has an transition from S2 to S1 with a 33 | # rate k2. Since no yield is given, it defaults to 1. 34 | 35 | model.add_transition('S2', 'S1', 'k2') 36 | 37 | # %% 38 | # Lets also assume, that S1 decays to zero. The comparment zero is special, 39 | # since it is not modelled explicty and the name `zero` is reserved. 40 | 41 | model.add_transition('S1', 'zero', 'k1') 42 | 43 | # %% 44 | # To get the corresponding matrix from the model we have to call the 45 | # `build_matrix`-function 46 | 47 | mat = model.build_matrix() 48 | mat 49 | 50 | # %% 51 | # To fill the matrix with numeric values we can use the `build_matfunc` method. 52 | # This gives us a function which takes all free parameters, first the rates and 53 | # then the yields. 54 | 55 | f = model.build_mat_func() 56 | f 57 | 58 | # %% 59 | # Now, what can we do with that matrix? We can use it to project the DAS to the 60 | # described model. For that, we first evaluate the matrix nummerically by 61 | # supplying values for the parameters 62 | 63 | num_mat = f(k2=2, k1=1) 64 | num_mat 65 | 66 | # %% 67 | # Next, we need the eigenvectors of the matrix. Note, that the presented approach 68 | # assumes that the eigenvalues are simple eigenvalues. If this is not the case, 69 | # one has to use the jordan normal form. As we see, the eigenvalues of the 70 | # matrix are the negative rates. That basically means, that the eigenbasis of the 71 | # problem is given by a diagonal transfer matrix, which is the parallel model 72 | # described by an DAS. Hence, the eigenvectors allow us to transform the DAS to 73 | # a SAS and vice versa by using the inverse. 74 | 75 | vals, vecs = np.linalg.eig(num_mat) 76 | vals 77 | 78 | # %% 79 | # To continue, we also need to choose the starting concentrations. 80 | 81 | j = np.zeros(len(vals)) 82 | j[0] = 1 83 | 84 | j 85 | # %% 86 | # The transformation matrix is then given by 87 | 88 | A = vecs @ np.diag(np.linalg.inv(vecs) @ j) 89 | A_inv = np.linalg.inv(A) 90 | 91 | # %% 92 | # Now `DAS @ A_inv` should give the SAS, while `A_inv @ basis_vecs` should 93 | # return the time-depedence of the concentrations. Let's test that on some data. 94 | # Load test data and correct the dispersion. 95 | 96 | plot_helpers.enable_style() 97 | 98 | wl, t, d = data_io.load_example() 99 | ds = dataset.TimeResSpec(wl, t, d) 100 | dsb = ds.bin_freqs(50) # Bin dataset to save computation while building docs 101 | res = dsb.estimate_dispersion(heuristic_args=(1.5, ), deg=3, shift_result=.15) 102 | 103 | # %% 104 | # Fit the DAS first. 105 | 106 | ids = res.correct_ds 107 | fr = ids.fit_exp([0.0, 0.08, 1, 500000], 108 | model_coh=True, fix_sigma=False, fix_t0=False) 109 | ids.plot.das() 110 | 111 | # %% 112 | # Make sequential Model 113 | 114 | m = Model() 115 | m.add_transition('S1hot', 'S1', 'k1') 116 | m.add_transition('S1', 'zero', 'const') 117 | m.build_matrix() 118 | 119 | # %% 120 | # Make transformation matricies. Notice that we reverse the order of the 121 | # eigenvectors. Initally, they are sorted by they eigenvalue, therefore the 122 | # slowest componet comes first. Since this order is inverse of the skultrafast 123 | # order, we reserve it so we don't have to reverse our spectra and basis vectors 124 | # later. 125 | 126 | func = m.build_mat_func() 127 | num_mat = func(1 / fr.lmfit_res.params['t0'], 128 | 1/fr.lmfit_res.params['t1']) 129 | vals, vecs = np.linalg.eig(num_mat) 130 | 131 | # Reverse order 132 | vecs = vecs[:, ::-1] 133 | 134 | j = np.zeros(len(vals)) 135 | j[0] = 1 136 | 137 | A = (vecs @ np.diag(np.linalg.inv(vecs) @ j)).T 138 | A_inv = np.linalg.inv(A) 139 | 140 | # %% 141 | # The DAS are members of the fitter object. Since we are also modeling coherent 142 | # contributions, we only take the first two components. Also, the eigenvalue 143 | # routine sorts the eigenvectors after their eigenvalue, which is the opposite 144 | # of our usal sorting, where the fast component comes first. T 145 | 146 | fig, ax = plt.subplots(2, figsize=(3, 4)) 147 | 148 | das = fr.fitter.c[:, :2] 149 | ax[0].plot(dsb.wn, das) 150 | ax[0].set_title('DAS') 151 | plot_helpers.lbl_spec(ax[0]) 152 | 153 | sas = A_inv @ das.T 154 | edas = np.cumsum(das, axis=1) 155 | ax[1].plot(dsb.wn, sas.T) 156 | ax[1].set_title('SAS') 157 | plot_helpers.lbl_spec(ax[1]) 158 | # %% 159 | # As we can see, we sucessfully get the SAS, which in this case are just EDAS. 160 | # Let's also look at the concentrations. 161 | 162 | fig, ax = plt.subplots(2, figsize=(3, 4)) 163 | ax[0].plot(dsb.t, fr.fitter.x_vec[:, :2]) 164 | ax[0].set_title('DAS') 165 | plot_helpers.lbl_trans(ax[0], use_symlog=False) 166 | ct = fr.fitter.x_vec[:, :2] @ A 167 | ax[1].plot(dsb.t, ct) 168 | ax[1].set_title('SAS') 169 | plot_helpers.lbl_trans(ax[1], use_symlog=False) 170 | # %% 171 | # So why does it work? The dataset is given by the outer product of the 172 | # concentrations and the spectrum 173 | 174 | C = fr.fitter.x_vec[:, :2] 175 | S = das[:, :2] 176 | 177 | fit = C @ S.T 178 | fit - fr.fitter.model 179 | 180 | # %% 181 | # Now we can insert `1 = A @ A_inv`. As expected this does not change the 182 | # product. 183 | 184 | ((C@A) @ (A_inv @ S.T)) - (C @ S.T) 185 | 186 | # %% 187 | # All steps above are done in the `make_sas` method of the the fitresult, taking 188 | # a `Model` as a parameter. If yields are required, they also have to be supplied. 189 | 190 | sas, ct = fr.make_sas(model, {}) 191 | 192 | fig, ax = plt.subplots(2, figsize=(3, 4)) 193 | ax[0].plot(fr.fitter.wl, sas.T) 194 | ax[0].set_title('SAS') 195 | plot_helpers.lbl_trans(ax[0], use_symlog=False) 196 | ct = fr.fitter.x_vec[:, :2] @ A 197 | ax[1].plot(dsb.t, ct) 198 | ax[1].set_title('Convoluted Concentrations') 199 | plot_helpers.lbl_trans(ax[1], use_symlog=False) 200 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_start.py: -------------------------------------------------------------------------------- 1 | """ 2 | Starting tutorial 3 | ================= 4 | """ 5 | 6 | # %% [rst] 7 | # First we import numpy, matplotlib and skultrafast. For reproducebilty we should 8 | # always print out the version of skultrafast 9 | 10 | import matplotlib.pyplot as plt 11 | from skultrafast.dataset import TimeResSpec 12 | from skultrafast import plot_helpers 13 | import skultrafast 14 | from skultrafast import data_io 15 | skultrafast.__version__ 16 | 17 | 18 | # %% 19 | # Some matplotlib setting for nicer pictures. 20 | plt.rcParams['figure.dpi'] = 150 21 | plt.rcParams['figure.figsize'] = (4, 3) 22 | plt.rcParams['figure.autolayout'] = True 23 | plt.rcParams['font.size'] = 9 24 | 25 | # %% [markdown] 26 | # Creating a TimeResSpec 27 | # ---------------------- 28 | # In this tuturial we use the example data which is provided by *skultrafast*. 29 | # The `load_example` function gives us three arrays. One containing the wavelengths, 30 | # another one containing the delay times and one two dimensional array containing 31 | # the data in mOD. 32 | 33 | wavelengths, t_ps, data_mOD = data_io.load_example() 34 | 35 | # %% 36 | # Lets look at the constructor of the `TimeResSpec` (Time resolved Spectra) class, 37 | # which is the main object when working with single data: 38 | print(TimeResSpec.__init__.__doc__) 39 | 40 | 41 | # %% 42 | # As we see, we can supply all the required parameters. 43 | # Since the `freq_unit` defaults to 'nm' we don't need to supply this argument. 44 | 45 | ds = TimeResSpec(wavelengths, t_ps, data_mOD) 46 | 47 | 48 | # %% 49 | # The TimeResSpec object simply consists of data itself and methods using that 50 | # data. The attributes containing the data can be accessed under `ds.data`, 51 | # `ds.wavenumbers`, `ds.wavelengths` and `ds.t`. 52 | 53 | print(ds.data.shape, ds.t.shape, ds.wavelengths.shape) 54 | 55 | # %% 56 | # The TimeResSpec object also has some helper methods to work with the data. 57 | # These functions find the index of the nearest value for a given number, e.g. to find 58 | # the position in the time array where the time is zero we can call the `t_idx` 59 | # method 60 | 61 | print(ds.t_idx(0), ds.t[ds.t_idx(0)]) 62 | 63 | # %% 64 | # Hence the spectrum at t = 0 is given by 65 | 66 | y_0 = ds.data[ds.t_idx(0), :]; 67 | 68 | # %% 69 | # In addition, there is also a shorthand to return the data at these indices directly. 70 | 71 | assert(sum(ds.t_d(0) - y_0) == 0) 72 | 73 | # %% 74 | # Overview map 75 | # ------------ 76 | # To get an general idea of the transient spectra we need to see it. 77 | # All plotting functions are in the `TimeResSpec.plot` object, which is 78 | # an instance of `TimeResSpecPlotter`. The plotting functions are using 79 | # the `disp_freq_unit` of the dataset as frequency scale by default. This can be 80 | # changed by changing the `disp_freq_unit` of the `TimeResSpecPlotter` object. 81 | 82 | # %% 83 | ds.plot.disp_freq_unit = 'nm' # does nothing, since 'nm' is the default 84 | # ds.plot.disp_freq_unit = 'cm' would use wavenumbers 85 | 86 | # %% 87 | # First, we want to check if the dataset is corrected for dispersion. For that 88 | # we plot a colormap around the time-zero. 89 | 90 | ds.plot.map(symlog=0, con_step=10., con_filter=(3, 10)) 91 | plt.ylim(-2, 2) 92 | 93 | # %% 94 | # Evidently, the dataset is not corrected for dispersion. Since it is easier to 95 | # work with a dispersion corrected dataset, we try to estimate the 96 | # dispersion using the data directly. 97 | # 98 | # Dispersion estimation and correction 99 | # ------------------------------------ 100 | # *skultrafast* does this by first using a simple heuristic for determining the time- 101 | # zero for each transient. The resulting dispersion curve is then fitted with a poly- 102 | # nominal, using a robust fitting method. More details are given in the documentation. 103 | # 104 | # To estimate the dispersion just call the function. It will plot two colormaps, one 105 | # with the original dataset, the time-zeros found by the heuristic and the robust 106 | # polynomial fit of these values. The bottom color map shows the dispersion corrected 107 | # data. 108 | 109 | res = ds.estimate_dispersion(heuristic_args=(1.5,), deg=3) 110 | 111 | # %% 112 | # By default, *skultrafast* uses a very simple heuristic to find the time-zero. 113 | # It looks for the earliest value above a given limit in each transient, and 114 | # therefore underestimates the time-zero systematically. Therefore we slightly 115 | # shift the time-zero. 116 | # 117 | # This generally works surprisingly well. But if the exact time-zero is 118 | # necessary, I recommend to try other methods or measure the dispersion 119 | # directly. 120 | # 121 | # **WARNING**: The cell below changes the dataset inplace. Therefore repeated 122 | # calls to the cell will shift the time-zero again and again. The shifting 123 | # can also be applied setting the `shift_result` parameter in the call 124 | # to `ds.estimate_dispersio`. 125 | 126 | new_ds = res.correct_ds # Warning, this does not copy the dataset! 127 | new_ds.t -= 0.2 128 | 129 | # %% 130 | # Plotting spectra and transients 131 | # ------------------------------- 132 | # A major part of *skultrafast* are convenience functions for generating 133 | # figures. Starting with the colormap from above, we see now that our 134 | # dataset looks correct: 135 | 136 | new_ds.plot.map(con_step=10., con_filter=(3, 5)) 137 | 138 | # %% 139 | # To plot spectra at given delay times: 140 | 141 | lines = res.correct_ds.plot.spec(-.2, 0.05, 0.3, 1, 2, 150) 142 | 143 | # %% 144 | # Or plot transients for given wavelengths: 145 | 146 | lines = res.correct_ds.plot.trans(500, 550, 620, 680) 147 | 148 | # %% 149 | # All these function offer a number of options. More information can be found in 150 | # their docstrings. 151 | # 152 | # Exponential fitting 153 | # ------------------- 154 | # Fitting a decay-associated spectra (DAS) is a one-liner in skultrafast. If the 155 | # dataset is dispersion corrected, only a starting guess is necessary. Please 156 | # look at the docstring to see how the starting guess is structured. 157 | # _Note_, the the fitting interface may change in the future. 158 | 159 | fit_res = new_ds.fit_exp([-0.0, 0.05, 0.2, 2, 20, 10000], 160 | model_coh=True, fix_sigma=False, fix_t0=False) 161 | fit_res.lmfit_res.params 162 | 163 | # %% 164 | # Lets plot the DAS 165 | new_ds.plot.das() 166 | 167 | # %% 168 | # We can always work with the results directly to make plots manually. Here, 169 | # the `t_idx`, `wl_idx` and `wn_idx` methods of the dataset are very useful: 170 | for wl in [500, 580, 620]: 171 | t0 = fit_res.lmfit_res.params['p0'].value 172 | idx = new_ds.wl_idx(wl) 173 | plt.plot(new_ds.t - t0, fit_res.fitter.data[:, idx], 'o', color='k', ms=4, 174 | alpha=0.4) 175 | plt.plot(new_ds.t - t0, fit_res.fitter.model[:, idx], lw=2, label='%d nm' % wl) 176 | plt.xlim(-1, 10) 177 | plot_helpers.lbl_trans(use_symlog=False) 178 | plt.legend(loc='best', ncol=1) 179 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_spectrometer_calibration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Spectrograph calibration 3 | ======================== 4 | 5 | In this tutorial we look at the calibration of an spectrograph. So what we 6 | looking for is a function which is mapping the index of our detector element to 7 | a wavelength. 8 | 9 | We discuss two cases: 10 | 11 | 1. The spectrograph grating is fixed. 12 | 2. The spectrograph grating is rotateable. 13 | 14 | For the first case, we need to have serveral identifiable feautres with known 15 | spectral positions. The can be a calibration pen lamp (ideal), a sample with 16 | known absorption peaks or known filters. The calibration than is done by fitting 17 | the result index vs. wavelength points with a function. For a typical grating 18 | spectrometer the function should be a linear function. Depending on the imaging 19 | geometry, a higher polynomial can be used. 20 | 21 | For the second case, we can get away with one known feature. For this, we must 22 | assume that grating information and stepper settings are correct, e.g. the 23 | spectrograph moves the grating the correct distancance. This can be checked by 24 | first adjusting the zero-oder correctly and then moving the grating to a known 25 | peak. The peak-postion should then coindcide with the zero-order postion. If the 26 | zero-order can not be aligned, it may be that the zero-offsets of the 27 | spectrograph are incorrect. These can often be modified, the easies way is to 28 | use the software of the spectrograph. We then record the spectrum by scanning 29 | the feature in a way, that it moves from one side of the detector to the other. 30 | Now by comparing the wavelength which has been set to feature position, we can 31 | directly see the dispersion per pixel. This, again, should be linear for most 32 | spectrographs. 33 | 34 | Notice that tracking a feature over the whole range manually is not efficent, if 35 | possible it should be automated, e.g. by always looking for the lowest intensity. 36 | 37 | Lets look at an example. Here, we are looking at the calibartion of an 38 | 128 channels spectrometer in the mid-IR. 39 | """ 40 | # %% 41 | # Imports we will use later. 42 | 43 | import numpy as np 44 | import matplotlib.pyplot as plt 45 | import scipy.ndimage as nd 46 | from scipy.stats import linregress 47 | from skultrafast import data_io 48 | 49 | 50 | # %% 51 | # Nicer plots. 52 | 53 | from skultrafast.plot_helpers import enable_style 54 | enable_style() 55 | 56 | # %% 57 | # The file contains the measured intensities of the recored lines, 58 | # the set wavelength and the calculated wavelengths, which we want to 59 | # check and adjust. 60 | 61 | p = data_io.get_example_path('ir_polyfilm') 62 | a = np.load(p) 63 | list(a.keys()) 64 | 65 | # %% 66 | # Use some helper variables 67 | 68 | wl = a['wl'] 69 | N = 63 # center channel 70 | cwl = a['wl'][:, N] 71 | pr = a['probe'] 72 | 73 | # %% 74 | # Lets plot the spectrum of the center channel. 75 | 76 | fig, ax = plt.subplots(figsize=(5, 2.4)) 77 | ax.plot(cwl, pr[:, 64], lw=1) 78 | #ax.secondary_xaxis('top', functions=(lambda x: 1e7 / x, lambda x: 1e7 / x)) 79 | plt.setp(ax, xlabel='Wavelength', ylabel='Couts') 80 | 81 | # %% 82 | # The spectrum consists of the probe, with absorption lines showing up as dips. 83 | # The sharp lines are caused by water-vapor and the wider lines are from a 84 | # polystyrene calibration film. 85 | # 86 | # To extract the real absorption spectrum we can measure the spectrum without 87 | # the film and calculate the absorption. It is also possible to subtract the 88 | # baseline. This approach also allows us to use water-vapor lines for 89 | # calibration. Notice that latter also depend on the humidity and temperature, 90 | # also the presented baseline approach is just an approximation. 91 | # 92 | # We approximate the baseline by taking the local maxima and interpolate 93 | # inbetween. 94 | 95 | from scipy.interpolate import interp1d 96 | for ch in [63, 58]: 97 | fig, ax = plt.subplots() 98 | back = nd.maximum_filter1d(pr[:, ch], 15) 99 | idx = back == pr[:, ch] 100 | 101 | idx[:200] = False 102 | idx[-100:] = False 103 | touching = back[idx] 104 | f = interp1d(cwl[idx], touching, bounds_error=False, kind='cubic') 105 | plt.plot(cwl, f(cwl)) 106 | plt.plot(cwl, pr[:, ch]) 107 | plt.plot(cwl, np.interp(cwl, cwl[idx], touching) - pr[:, ch] + 5000, lw=1) 108 | 109 | # Load water vapor data 110 | p = data_io.get_example_path('vapor') 111 | ftir_x, ftir_vapor= np.load(p).T 112 | 113 | # Convolve vapor spectrum with a gaussian 114 | ftir_vapor = nd.gaussian_filter(ftir_vapor, 5) * 52000 115 | 116 | ax.plot(1e7 / ftir_x, ftir_vapor + 5000, scaley=0, lw=1, zorder=1, alpha=0.5, label='Water Vapor') 117 | plt.plot(cwl, pr[:, ch] + np.interp(cwl, 1e7 / ftir_x, ftir_vapor), lw=2, color='C5') 118 | ax.axhline(5000, lw=1, c='0.3') 119 | 120 | ax.vlines(1e7 / np.array([1493, 1452, 1601]), 121 | 5000, 122 | 8000, 123 | color='C4', 124 | zorder=3, 125 | label='Polysteren peak pos.') 126 | 127 | ax.set(xlim=(6000, 7000), 128 | ylim=(0, 10000), 129 | xlabel='Wavelenght / nm', 130 | title=f'Channel: {ch}') 131 | ax.legend(ncol=4) 132 | #ax.secondary_xaxis('top', functions=(lambda x: 1e7 / x, lambda x: 1e7 / x)) 133 | 134 | # %% 135 | # Using the initial channel 63 clearly leads to an offset, indicating the 136 | # zero-order position was not correct. Using instead 58 as the center channel we 137 | # get an good agreement. The peak at 1601 is isolated from water vapor lines, 138 | # hence we will use it to calibrate the dispersion. For that we will look at 139 | # three spectra at once: One where the peak is at the center channel and one for 140 | # each side. We will try to find a suitable dispersion factor to get some 141 | # reasonable overlap. As seen below, a factor of 7.7 nm/pixel gives us really 142 | # good fit. 143 | 144 | fig, ax = plt.subplots() 145 | i = np.argmin(abs(cwl - 1e7/1601)) 146 | disp = 7.7 147 | new_x = disp * (np.arange(128) - 58) 148 | ax.plot(new_x + cwl[i], pr[i, :]) 149 | ax.plot(new_x + cwl[i - 130], 0.58 * pr[i - 130, :]) 150 | ax.plot(new_x + cwl[i + 130], 1.4 * pr[i + 130, :]) 151 | 152 | new_wl = disp * (np.arange(128) - 58)[:, None] + cwl[None, :] 153 | # %% 154 | # Using that factor we can extract the region around the peak for multiple spectra. 155 | # In this region, we just look for the minimum. 156 | 157 | fig, (ax, ax2) = plt.subplots(2, sharex=True, figsize=(3, 4)) 158 | 159 | mask = (abs(new_wl - 1e7/1601) < 80).T 160 | ax.plot(np.arange(128) - 58, cwl[np.argmax(mask, 0)], lw=1, c='k', ls='--') 161 | ax.plot(np.arange(128) - 58, cwl[800 - np.argmax(mask[::-1], 0)], lw=1, c='k', ls='--') 162 | ax.pcolormesh(np.arange(128) - 58, cwl, pr, rasterized=True, shading='auto') 163 | 164 | tmp = np.where(mask, pr, np.inf) 165 | from scipy.stats import linregress 166 | 167 | x = np.arange(128) - 58 168 | y = cwl[np.argmin(tmp, 0)] 169 | res 170 | # %% 171 | -------------------------------------------------------------------------------- /skultrafast/referencing.py: -------------------------------------------------------------------------------- 1 | # This file contains function to apply adavanced referencing to raw dataset 2 | 3 | import h5py 4 | import numpy as np 5 | from collections import defaultdict 6 | from scipy.ndimage import gaussian_filter1d 7 | from attr import dataclass 8 | 9 | 10 | def get_stats(grp: h5py.Group) -> np.ndarray: 11 | keys = list(grp.keys()) 12 | n = len(keys) 13 | dim = grp[keys[0]].shape 14 | out = np.zeros((n, *dim), dtype=np.float32) 15 | for i in range(n): 16 | out[i, :] = grp[str(i)][:].astype(np.float32) 17 | return out 18 | 19 | 20 | def get_scans(f: h5py.File) -> dict[str, list[np.ndarray]]: 21 | out = defaultdict(list) 22 | for p in 'Probe1', 'Probe2': 23 | for i in range(len(f['frames'][p])): 24 | out[p].append(get_stats(f['frames'][p][str(i)])) 25 | return out 26 | 27 | 28 | def get_ref_stats(f: h5py.File) -> list[np.ndarray]: 29 | out = [] 30 | for i in range(len(f['ref_data'])): 31 | out.append(get_stats(f['ref_data'][str(i)])) 32 | return out 33 | 34 | 35 | def get_all_scans(f: h5py.File, filter_val=0.8) -> np.ndarray: 36 | data = get_scans(f) 37 | max_scan = min([arr.shape[0] for arr in data['Probe1']]) 38 | _, n_wl, num_frames = data['Probe1'][0].shape 39 | n_t2 = len(data['Probe1']) 40 | if 'ref_data' in f: 41 | has_ref = f['ref_data/0/0'].shape[1] == num_frames 42 | ref_data = get_ref_stats(f) 43 | else: 44 | has_ref = False 45 | ref_data = None 46 | lines = 3 if has_ref else 2 47 | 48 | all_scans = np.empty((lines, max_scan, n_t2, n_wl, num_frames)) 49 | for i in range(n_t2): 50 | all_scans[0, :, i, ...] = data['Probe1'][i][:max_scan, ...] 51 | all_scans[1, :, i, ...] = data['Probe2'][i][:max_scan, ...] 52 | if has_ref: 53 | assert ref_data is not None 54 | all_scans[2, :, i, ...] = ref_data[i][:max_scan, ...] 55 | if filter_val > 0: 56 | all_scans = gaussian_filter1d(all_scans, filter_val, axis=-2) 57 | return all_scans 58 | 59 | 60 | def build_basis(all_scans): 61 | pass 62 | 63 | 64 | def use_edge_ref(probe_wn, all_scan, low, high, ref_scans=2): 65 | """ 66 | This function takes the whole 2D dataset and applies edge referencing to it. 67 | After referencing, the 2D-signal is calculated and returned along with the 68 | corresponding wavenumbers. 69 | 70 | Assumes 4 phase cycling steps. 71 | """ 72 | 73 | data_idx = (probe_wn < high) & (probe_wn > low) 74 | data = all_scan[:, :, :, data_idx, :] 75 | edge = all_scan[:, :, :, ~data_idx, :] 76 | data_diffs = np.diff(data, axis=-1) 77 | 78 | edge_diffs = np.diff(edge, axis=-1) 79 | num_frames = data.shape[-1] 80 | out = np.empty_like(data_diffs)[..., ::4] 81 | max_scan = data.shape[1] 82 | for k in [0, 1]: 83 | for i in range(data.shape[1]): 84 | zero0 = np.concatenate(data_diffs[k, i, :ref_scans, ...], axis=-1) 85 | zero_base0 = np.concatenate(edge_diffs[k, i, :ref_scans, ...], axis=-1) 86 | if i != max_scan - 1: 87 | zero_tmp = np.concatenate(data_diffs[k, i+1, :ref_scans, ...], axis=-1) 88 | zero_base_tmp = np.concatenate( 89 | edge_diffs[k, i+1, :ref_scans, ...], axis=-1) 90 | zero0 = np.concatenate((zero0, zero_tmp), axis=-1) 91 | zero_base0 = np.concatenate((zero_base0, zero_base_tmp), axis=-1) 92 | 93 | coeffs = np.linalg.lstsq(zero_base0.T, zero0.T, rcond=None)[0] 94 | for j in range(ref_scans, data_diffs.shape[2]): 95 | sig = data_diffs[k, i, j, ...] 96 | means = data[k, i, j, ...].mean(axis=-1) 97 | reffed = (edge_diffs[k, i, j, ...].T @ coeffs).T 98 | sig = (sig - reffed)[:, ::2] 99 | sig = (sig[:, :-1:2] + sig[:, 1::2])/means[:, None] 100 | fac = -1000/np.log(10) 101 | sig = np.log1p(sig)*fac 102 | sig[..., 0] *= 0.5 103 | sig *= np.hamming(num_frames/2)[sig.shape[1]:] 104 | out[k, i, j, ...] = sig 105 | out2d = np.fft.rfft(out.mean(1), axis=-1, n=2*num_frames).real 106 | return out2d, probe_wn[data_idx] 107 | 108 | 109 | def use_edge_ref_full(all_scan, probe_wn, low, high, add_higher_degree): 110 | """ 111 | This function takes the whole 2D dataset and applies edge referencing to it. 112 | After referencing, the 2D-signal is calculated and returned along with the 113 | corresponding wavenumbers. Assumes 4 phase cycling steps. 114 | 115 | 116 | Parameters 117 | ---------- 118 | all_scan : np.ndarray 119 | The 2D dataset. 120 | probe_wn : np.ndarray 121 | The wavenumbers of the dataset. 122 | low : float 123 | The lower bound of the wavenumber range to exclude. 124 | high : float 125 | The upper bound of the wavenumber range to exclude. 126 | add_higher_degree : bool 127 | Whether to add higher degree polynomials to the referencing. 128 | """ 129 | 130 | data_idx = (probe_wn < high) & (probe_wn > low) 131 | data = all_scan[:, :, :, data_idx, :] 132 | edge = all_scan[:, :, :, ~data_idx, :] 133 | data_diffs = np.diff(data, axis=-1) 134 | edge_diffs = np.diff(edge, axis=-1) 135 | num_frames = data.shape[-1] 136 | 137 | zero_a = np.concatenate(data_diffs[0, :, :2, ...], axis=-1) 138 | zero_a = np.concatenate(zero_a, axis=-1) 139 | zero_b = np.concatenate(data_diffs[1, :, :2, ...], axis=-1) 140 | zero_b = np.concatenate(zero_b, axis=-1) 141 | zero_base_a = np.concatenate(edge_diffs[0, :, :2, ...], axis=-1) 142 | zero_base_a = np.concatenate(zero_base_a, axis=-1) 143 | zero_base_b = np.concatenate(edge_diffs[1, :, :2, ...], axis=-1) 144 | zero_base_b = np.concatenate(zero_base_b, axis=-1) 145 | 146 | if add_higher_degree: 147 | zero_base_a = np.vstack((zero_base_a, zero_base_a**2)) 148 | zero_base_b = np.vstack((zero_base_b, zero_base_b**2)) 149 | print(zero_base_a.shape, zero_base_b.shape) 150 | coefs_a = np.linalg.lstsq(zero_base_a.T, zero_a.T, rcond=None)[0] 151 | coefs_b = np.linalg.lstsq(zero_base_b.T, zero_b.T, rcond=None)[0] 152 | 153 | sig_a = data_diffs.mean(1)[0] 154 | base = np.swapaxes(edge_diffs[0].mean(0), -1, -2) 155 | 156 | if add_higher_degree: 157 | base = np.dstack((base, base**2)) 158 | 159 | reffed_a = (base @ coefs_a).swapaxes(-1, -2) 160 | 161 | sig_b = data_diffs.mean(1)[1] 162 | 163 | base = np.swapaxes(edge_diffs[1].mean(0), -1, -2) 164 | if add_higher_degree: 165 | base = np.dstack((base, base**2)) 166 | reffed_b = (base @ coefs_b).swapaxes(-1, -2) 167 | 168 | sig = np.stack((sig_a, sig_b), axis=0) 169 | reffed = np.stack((reffed_a, reffed_b), axis=0) 170 | sig.shape, reffed.shape 171 | 172 | means = data[:2].mean(1).mean(-1) 173 | sig = (sig - reffed)[..., ::2] 174 | # If not using 4 phase cycling steps, change the indexing here 175 | sig = (sig[..., :-1:2] + sig[..., 1::2])/means[..., None] 176 | fac = 1000/np.log(10) 177 | sig = np.log1p(sig)*fac 178 | sig[..., 0] *= 0.5 179 | sig *= np.hamming(num_frames/2)[sig.shape[-1]:] 180 | 181 | out2d = np.fft.rfft(sig, axis=-1, n=4*num_frames//4).real 182 | return out2d, probe_wn[data_idx] 183 | -------------------------------------------------------------------------------- /skultrafast/examples/tutorial_figures.py: -------------------------------------------------------------------------------- 1 | """ 2 | Creating publication-ready figures 3 | ================================== 4 | 5 | While the ad-hoc settings of skultrafast are generally fine, some 6 | additional adjustment may be necessary to create figures for an article. 7 | Here, in this tutorial will the most common steps. If you think anything 8 | is missing or wrong, don't hesitate to submit an issue or pull-request. 9 | If you looking for way to do something, please also have a look at the 10 | `matplotlib documentation `__. 11 | It contains many examples and multiple tutorials. 12 | 13 | \*\ **tl;dr** 14 | 15 | Call ``plot_helpers.enable_style()`` before creating your figures. 16 | 17 | Checklist 18 | --------- 19 | 20 | - Figure size set according to journal guidelines 21 | - Font 22 | - Selected according to journal guidelines 23 | - (optionally) mathfont selected accordingly to the main font 24 | - Distinguishable colors 25 | - Vector format used 26 | - Set rasterized ``True`` for large images and colormaps in the figure 27 | - No part of the figure is clipped 28 | - Consistent energy axes 29 | - File creating the figure is under version control 30 | 31 | 32 | Throughout the document we work with some example data 33 | """ 34 | # %% 35 | import matplotlib.pyplot as plt 36 | from skultrafast import data_io, dataset 37 | wl, t, d = data_io.load_example() 38 | ds = dataset.TimeResSpec(wl, t, d) 39 | ds.auto_plot = False 40 | cds = ds.estimate_dispersion("gauss_diff").correct_ds 41 | wls = (500, 5550, 590, 620, 630, 670) 42 | 43 | # %% 44 | # Figure size 45 | # ----------- 46 | # _Use the journal supplied figure size_ 47 | # 48 | # The most important step is to get the figure size correct. By default, 49 | # matplotlib creates a 4 in by 3 in figure (1 in = 2.54 cm) which is a reasonable 50 | # size for the screen but unsuitably large on paper. If the figure is just 51 | # manually scaled down after creation, the font-size is too small and 52 | # other small details like ticks may be not recognizable anymore. 53 | 54 | 55 | fig, ax = plt.subplots() 56 | cds.plot.trans(*wls, ax=ax) 57 | 58 | # %% 59 | # Instead, most journals author guidelines give values for the maximum width of 60 | # figures, use them. It is not always necessary to use the full 61 | # width, sometimes its okay to leave some free space. As a rule of thump, a 62 | # single column figure should have width of around 3.5 in, a two column figure 63 | # about 7.5". 64 | 65 | fig, ax = plt.subplots(figsize=(3.5, 3.5*(3/4))) # Figure with the same 4:3 ratio 66 | cds.plot.trans(*wls, ax=ax) 67 | 68 | # %% 69 | # If the figure is too small on screen, which is often the case in the 70 | # Jupyter-notebook, resist the urge to make the figure larger. Instead, increase 71 | # the dpi of the figure. The matplotlib defaults of 100 dpi are quite low 72 | # compared to modern screens. Desktop-screens often have around 144 dpi and 73 | # laptop screens can get up 230 dpi. 74 | 75 | fig, ax = plt.subplots(figsize=(3.5, 3.5*(3/4)), dpi=144) # Figure with the same 4:3 ratio 76 | cds.plot.trans(*wls, ax=ax) 77 | 78 | # %% 79 | # Still the figure looks a little bit toy-like. This is partly caused by the 80 | # default font-size of matplotlib, which is to large for most publications. 81 | # Before we continue, we save the changes we did so far as a default and also 82 | # change the default font size. Most journals have guidelines for the font size 83 | # in figures, apply them by directly adjusting the rcParams. For additional fine 84 | # adjustment try to use relative font sizes. The suggested font-sizes for 85 | # figures are generally between 7 and 9. We also use the constrainted layout for 86 | # a better figure layout. 87 | 88 | plt.rcParams['figure.figsize'] = (3.5, 3.5*(3/4)) 89 | plt.rcParams['figure.dpi'] = 144 90 | plt.rcParams['figure.constrained_layout.use'] = True 91 | plt.rcParams['font.size'] = 8 92 | 93 | fig, ax = plt.subplots() # Figure uses rcParams as defaults 94 | cds.plot.trans(*wls, ax=ax) 95 | 96 | # %% 97 | # Font selection 98 | # -------------- 99 | # **Select a font identical or similar to the font of the publication. When in 100 | # doubt, choose Arial or Helvetica.** The default font of matplotlib is DejaVu 101 | # Sans. Its advantage it is free license and its great coverage of Unicode 102 | # glyphs. But it looks quite unique and hence conflicts with the rest of 103 | # document. Therefore I strongly advocate to change the font. Most journals have 104 | # their own preference. If the journal does not propose a font, I suggest to use 105 | # *Arial, Helvetica* or *TeX Gyre Heros*. While this may look boring, it 106 | # also looks professional. 107 | 108 | plt.rcParams['font.family'] = ["Arial", "Helvetica", "TeX Gyre Heros"] 109 | fig, ax = plt.subplots() # Figure uses rcParams as defaults 110 | cds.plot.trans(*wls, ax=ax) 111 | 112 | # %% 113 | # If the figure contains mathematical expressions, chaning the mathext font is 114 | # also required for an uniform look. 115 | 116 | plt.rcParams['mathtext.fontset'] = "stixsans" 117 | plt.rcParams['mathtext.default'] = 'regular' 118 | fig, ax = plt.subplots() # Figure uses rcParams as defaults 119 | cds.plot.trans(*wls, ax=ax) 120 | 121 | ax.text(0, -50, r"$A = \sum_{i=1}^n sin\left(-\frac{t}{\tau_i}\right)$") 122 | 123 | 124 | 125 | # %% 126 | # Figure composition 127 | # ------------------ 128 | # 129 | # **Try to create composite figures by code.** 130 | # 131 | # Most figures in journals are multi-panel figures. There are a two ways to 132 | # create such figures: either we create the single figures first and do the 133 | # composition of the figures with a vector graphics program like *Inkscape* or 134 | # we create the the multi-panal graphic directly in matplotlib. So which one 135 | # should be used? 136 | # 137 | # Using a vector graphics program has several advantages: wysiwyg, easy 138 | # fine-tuning and mouse support. But this bought with some serve drawbacks: if 139 | # you need change on of the sub-figures, you need to adjust the rest of the 140 | # figure as well. Also, if you have to change fonts due to a resubmission, you 141 | # have to apply it to both, the single figures and later added graphical 142 | # elements. Also, version control not commonly supported for graphics format and 143 | # exactly recreating a figure requires a lot manual steps. 144 | # 145 | # Hence, if possible, do the whole figure in matplotlib. Initially, this can 146 | # result if a lot manual adjustment for things like text labels. This can be 147 | # often circumvented by using the text alignment settings and changing the 148 | # transformation. 149 | # 150 | # So how to built a more complex layout ? Matplotlib offers multiple ways to 151 | # layout figures. The most flexible way is use a `gridspec 152 | # `__. For 153 | # simple cases, the ``plt.sublots`` function. It supports the sharing of an axis 154 | # and also takes Avoid the ``plt.subplot`` function. 155 | # 156 | # Colors 157 | # ------ 158 | # 159 | # **Make your colors distinguishable.** 160 | # 161 | # The choice of colors is matter of preferences, hence different people like 162 | # different color-cycles. In general, the default matplotlib color-cycle works 163 | # quite well. Still many people prefer other colors, e.g. the matlab color 164 | # cycle, or you want to go for a more unique look. As long as you choose easily 165 | # distinguishable colors, your are fine. Remember that figures are quite often 166 | # used in presentations, hence to avoid remaking figures don't use bright colors 167 | # on a white background. In general, the color should be chosen with contrast in 168 | # mind. 169 | # 170 | # The color cycle is set via the axes cycler. See the `matplotlib documentation 171 | # `__. 172 | # Note that matploblib also supports xkcd ans CSS `color names 173 | # `__. 174 | # 175 | # File Format 176 | # ----------- 177 | # 178 | # **Use vector formats. Rasterize large artists.** 179 | # 180 | # Luckily, most publishers accept or require the figures in a vector format. 181 | # Therefore save the figure as an svg oder pdf file. If you somehow must supply 182 | # a pixel format, use png and make sure the dpi is set to at least 300. 183 | # 184 | # In general, using a vector format also reduces the file size. If the plot 185 | # contains complex colormaps or images, it is appropriate to rasterize the 186 | # corresponding artists. That means that these are embedded as an pixel graphic 187 | # within the vector figure, thus decreasing the file size and the rendering time 188 | # enormously. 189 | # 190 | # 191 | 192 | --------------------------------------------------------------------------------