├── .github
└── workflows
│ ├── codeql-analysis.yml
│ ├── python-package-conda.yml
│ └── python-publish.yml
├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── README.rst
├── Tutorial_Notebooks
├── Data
│ ├── Compare
│ │ ├── Master
│ │ │ └── TA_Ru-dppz_400nm_ACN_paral.hdf5
│ │ ├── TA_Ru-dppz_400nm_DCM_paral.hdf5
│ │ ├── TA_Ru-dppz_400nm_H2O_paral.hdf5
│ │ └── UVvis_SEC_Rudppz_ACN.dat
│ ├── Fitting-1
│ │ ├── TA_Ru-dppz_400nm_ACN.SIA
│ │ ├── TA_Ru-dppz_400nm_ACN_chirp.dat
│ │ ├── TA_Ru-dppz_400nm_DCM.SIA
│ │ ├── TA_Ru-dppz_400nm_DCM_chirp.dat
│ │ ├── TA_Ru-dppz_400nm_H2O.SIA
│ │ └── TA_Ru-dppz_400nm_H2O_chirp.dat
│ ├── Fitting-2
│ │ ├── TA_Ru-dppz_400nm_ACN.SIA
│ │ └── TA_Ru-dppz_400nm_ACN_chirp.dat
│ ├── Introduction
│ │ ├── Fitted_Oscillations_with_confidence.hdf5
│ │ ├── catalysis1.SIA
│ │ ├── catalysis2.SIA
│ │ ├── con_1.SIA
│ │ ├── con_1_solved.hdf5
│ │ ├── con_2.SIA
│ │ ├── con_2_chirp.dat
│ │ ├── con_3.SIA
│ │ ├── con_4.SIA
│ │ ├── con_5.SIA
│ │ ├── con_6.SIA
│ │ ├── con_6_chirp.dat
│ │ ├── full_consecutive_fit.hdf5
│ │ ├── full_consecutive_fit_with_GS.hdf5
│ │ └── sample_1_chirp.dat
│ ├── MultiModal
│ │ ├── XES_on.SIA
│ │ └── combined_optical_spectrum.SIA
│ └── Scans
│ │ ├── ACN_001.SIA
│ │ ├── ACN_002.SIA
│ │ ├── ACN_003.SIA
│ │ ├── ACN_004.SIA
│ │ ├── ACN_005.SIA
│ │ ├── ACN_006.SIA
│ │ ├── ACN_007.SIA
│ │ ├── ACN_008.SIA
│ │ ├── ACN_009.SIA
│ │ ├── TA_Ru-dppz_400nm_ACN_mean.SIA
│ │ └── TA_Ru-dppz_400nm_ACN_mean_chirp.dat
├── Function_library_overview.pdf
├── KiMoPack_tutorial_0_Introduction.ipynb
├── KiMoPack_tutorial_0_Introduction_Compact.ipynb
├── KiMoPack_tutorial_1_Fitting-1_Colab.ipynb
├── KiMoPack_tutorial_1_Fitting.ipynb
├── KiMoPack_tutorial_2_Fitting-2_Colab.ipynb
├── KiMoPack_tutorial_2_Fitting.ipynb
├── KiMoPack_tutorial_3_CompareFit.ipynb
├── KiMoPack_tutorial_3_CompareFit_Colab.ipynb
├── KiMoPack_tutorial_4_ScanHandling.ipynb
├── KiMoPack_tutorial_4_ScanHandling_Colab.ipynb
├── KiMoPack_tutorial_5_MultiModal.ipynb
├── function_library.py
├── img
│ ├── Cor_Chirp.gif
│ ├── Fig1_parallel_model.png
│ ├── Fig2_consecutive_model.png
│ ├── Fig3_complex_model.png
│ ├── Intro_tutorial.png
│ └── Model_selection.jpg
└── import_library.py
├── Workflow_tools
├── Data
│ ├── FeCM02-266nm-4mw-QB390-t6-G63-w450-s150-556ms-E100.dat
│ ├── FeCM02-266nm-4mw-QB390-t6-G63-w450-s150-556ms-E100_chirp.dat
│ ├── Sample_1.SIA
│ ├── Sample_2.SIA
│ ├── Sample_2_chirp.dat
│ ├── XES_diff.SIA
│ ├── XES_on.SIA
│ ├── sample_1.hdf5
│ ├── sample_1_chirp.dat
│ └── sample_2.hdf5
├── Function_library_overview.pdf
├── Streak_camera_analysis.ipynb
├── TA_Advanced_Fit.ipynb
├── TA_Raw_plotting.ipynb
├── TA_Raw_plotting_and_Simple_Fit.ipynb
├── TA_comparative_plotting_and_data_extraction.ipynb
├── TA_single_scan_handling.ipynb
├── XES_Raw_plotting_and_Simple_Fit.ipynb
├── function_library.py
└── import_library.py
├── docs
├── Makefile
├── make.bat
└── source
│ ├── Changelog.rst
│ ├── Comparing.rst
│ ├── Fitting.rst
│ ├── Installation.rst
│ ├── Introduction.rst
│ ├── Main_tasks.rst
│ ├── Opening.rst
│ ├── Plotting.rst
│ ├── Saving.rst
│ ├── Shaping.rst
│ ├── _static
│ ├── KiMoPack_logo.png
│ ├── custom.css
│ └── structure.png
│ ├── conf.py
│ ├── genindex.rst
│ ├── index.rst
│ ├── modules.rst
│ ├── plot_func.rst
│ └── requirements.txt
├── environment.txt
├── pyproject.toml
└── src
└── KiMoPack
├── __init__.py
└── plot_func.py
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ "main" ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ "main" ]
20 | schedule:
21 | - cron: '22 15 * * 4'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 | permissions:
28 | actions: read
29 | contents: read
30 | security-events: write
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | language: [ 'python' ]
36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
37 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
38 |
39 | steps:
40 | - name: Checkout repository
41 | uses: actions/checkout@v3
42 |
43 | # Initializes the CodeQL tools for scanning.
44 | - name: Initialize CodeQL
45 | uses: github/codeql-action/init@v2
46 | with:
47 | languages: ${{ matrix.language }}
48 | # If you wish to specify custom queries, you can do so here or in a config file.
49 | # By default, queries listed here will override any specified in a config file.
50 | # Prefix the list here with "+" to use these queries and those in the config file.
51 |
52 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
53 | # queries: security-extended,security-and-quality
54 |
55 |
56 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
57 | # If this step fails, then you should remove it and run the build manually (see below)
58 | - name: Autobuild
59 | uses: github/codeql-action/autobuild@v2
60 |
61 | # ℹ️ Command-line programs to run using the OS shell.
62 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
63 |
64 | # If the Autobuild fails above, remove it and uncomment the following three lines.
65 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
66 |
67 | # - run: |
68 | # echo "Run, Build Application using script"
69 | # ./location_of_script_within_repo/buildscript.sh
70 |
71 | - name: Perform CodeQL Analysis
72 | uses: github/codeql-action/analyze@v2
73 |
--------------------------------------------------------------------------------
/.github/workflows/python-package-conda.yml:
--------------------------------------------------------------------------------
1 | name: Python Package using Conda
2 |
3 | on: [push]
4 |
5 | jobs:
6 | build-linux:
7 | runs-on: ubuntu-latest
8 | strategy:
9 | max-parallel: 5
10 |
11 | steps:
12 | - uses: actions/checkout@v2
13 | - name: Set up Python 3.10
14 | uses: actions/setup-python@v2
15 | with:
16 | python-version: 3.10
17 | - name: Add conda to system path
18 | run: |
19 | # $CONDA is an environment variable pointing to the root of the miniconda directory
20 | echo $CONDA/bin >> $GITHUB_PATH
21 | - name: Install dependencies
22 | run: |
23 | conda env update --file environment.yml --name base
24 | - name: Lint with flake8
25 | run: |
26 | conda install flake8
27 | # stop the build if there are Python syntax errors or undefined names
28 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
29 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
30 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
31 | - name: Test with pytest
32 | run: |
33 | conda install pytest
34 | pytest
35 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package to PiPy
10 |
11 | on:
12 | push:
13 | tags:
14 | - '*'
15 |
16 | jobs:
17 | build:
18 |
19 | runs-on: ubuntu-latest
20 |
21 | steps:
22 | - uses: actions/checkout@v2
23 | - name: Set up Python
24 | uses: actions/setup-python@v2
25 | with:
26 | python-version: 3.8
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | python -m pip install setuptools wheel twine
31 | pip install build
32 | - name: Build package
33 | run: python -m build
34 | - name: Publish package
35 | uses: pypa/gh-action-pypi-publish@release/v1
36 | with:
37 | password: ${{ secrets.PIPYKIMOPACKDEPLOY }}
38 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 | create_and_upload_pypi.bat
131 | conda/create_build_Anaconda.bat
132 | conda/Untitled.ipynb
133 |
134 | pip_upload_token.txt
135 | conda/
136 | alt/
137 | video use/
138 |
139 | _version.py
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the version of Python and other tools you might need
9 | build:
10 | os: ubuntu-20.04
11 | tools:
12 | python: "3.8"
13 |
14 |
15 | # Build documentation in the docs/ directory with Sphinx
16 | sphinx:
17 | configuration: docs/source/conf.py
18 |
19 | # If using Sphinx, optionally build your docs in additional formats such as PDF
20 | formats: all
21 |
22 | submodules:
23 | include: all
24 |
25 | python:
26 | install:
27 | - requirements: docs/source/requirements.txt
28 | - method: pip
29 | path: .
30 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | .. image:: https://readthedocs.org/projects/kimopack/badge/?version=latest
2 | :target: https://kimopack.readthedocs.io/en/latest/?badge=latest
3 | :alt: Documentation Status
4 |
5 | .. image:: https://img.shields.io/badge/License-GPL%20v3-blue.svg
6 | :target: http://www.gnu.org/licenses/gpl-3.0
7 | :alt: License: GPL v3
8 |
9 | .. image:: https://anaconda.org/erdzeichen/kimopack/badges/version.svg
10 | :target: https://conda.anaconda.org/erdzeichen
11 |
12 | .. image:: https://badge.fury.io/py/KiMoPack.svg
13 | :target: https://badge.fury.io/py/KiMoPack
14 |
15 | .. image:: https://anaconda.org/erdzeichen/kimopack/badges/latest_release_date.svg
16 | :target: https://anaconda.org/erdzeichen/kimopack
17 |
18 | .. image:: https://colab.research.google.com/assets/colab-badge.svg
19 | :target: https://colab.research.google.com/github/erdzeichen/KiMoPack/blob/main/Tutorial_Notebooks/KiMoPack_tutorial_0_Introduction.ipynb
20 |
21 | .. image:: https://mybinder.org/badge_logo.svg
22 | :target: https://mybinder.org/v2/gh/erdzeichen/KiMoPack/HEAD
23 |
24 | .. image:: https://zenodo.org/badge/400527965.svg
25 | :target: https://zenodo.org/badge/latestdoi/400527965
26 |
27 | KiMoPack
28 | ==========
29 |
30 | KiMoPack is a project for the handling of spectral data measure at
31 | multiple time-points. The current design is optimised for the use with
32 | optical transient absorption data, but it has been successfully adapted
33 | for the use with transient x-ray emission and spectro-electro chemistry
34 | data.
35 |
36 | It focuses on the main tasks an experimentator has:
37 | Loading and shaping of experiments, plotting of experiments, comparing of experiments,
38 | analysing experiments with fast and/or advanced fitting routines and saving/exporting/presenting
39 | the results.
40 |
41 | For typical use a series of juypter notebooks are provided that guide
42 | through the a number of different use scenarios, and are suggesting the
43 | parameter that are typically set.
44 |
45 | Installation
46 | --------------
47 |
48 | The basis of the program is a module called "plot_func.py" that contains all the necessary functions and classes.
49 | We recommend to use a package manager to install the program.
50 |
51 | Install using "pip":
52 |
53 | .. code-block:: text
54 |
55 | $ pip install KiMoPack
56 |
57 | Install and update using "conda" from the channel erdzeichen:
58 |
59 | .. code-block:: text
60 |
61 | $ conda install -c erdzeichen kimopack
62 |
63 | Hint: the pip version is usually more recent than the conda version
64 | The files can also be downloaded from the github directory https://github.com/erdzeichen/KiMoPack or zenodo (see below)
65 |
66 | These commands are installing only KiMoPack and the absolutely needed dependencies. However, there are several modules that work better if additional packages are installed. In general one should try to install all packages at the same time. Additional packages that I generally recommend are h5py and tables (for saving files), python-pptx (for saving power point slides) and keyboard (Window only, for interrupting the fits). Quite useful is also nbopen that allows you to double click on the notebook files. nbopen requires an activation at the end.
67 |
68 | (leave away keyboard for Linux!)
69 | .. code-block:: text
70 |
71 | $ pip install KiMoPack h5py tables nbopen python-pptx
72 | (windows) python -m nbopen.install_win
73 | (Linux) python3 -m nbopen.install_xdg
74 | (MacOS) Clone the repository and run ./osx-install.sh
75 |
76 | Upgrade if already installed:
77 |
78 | .. code-block:: text
79 |
80 | $ pip install KiMoPack -U
81 |
82 |
83 | In general it is a good idea to create a local environment to install files in python if you are using python for many tasks. In a local environment only the packages that are needed are installed, which usually avoids that conflicts can appear. It is very easy to do that.
84 |
85 | Under Windows: open the anaconda command prompt or power shell (type anaconda under windows start)
86 | Under Linuxs: open a console
87 |
88 | .. code-block:: text
89 |
90 | $ conda create --name kimoPack
91 | $ conda activate kimokack
92 | $ pip install KiMoPack h5py tables keyboard nbopen python-pptx
93 |
94 | Or if you also want make sure to have a later version of python
95 |
96 | .. code-block:: text
97 |
98 | $ conda create --name kimopack python=3.11 ipython jupyterlab jupyter
99 | $ conda activate kimopack
100 | $ pip install KiMoPack h5py tables keyboard nbopen python-pptx
101 |
102 |
103 | Error: insufficient rights: If one of the installs complains (error) that the user does not has sufficient rights, this installation can be done attaching "--user"
104 |
105 | .. code-block:: text
106 |
107 | $ conda create --name kimoPack
108 | $ conda activate kimokack
109 | $ pip install KiMoPack h5py tables keyboard nbopen python-pptx --user
110 |
111 | Error: pytables:
112 | in some versions I have been running in a problem with pytables when loading saved data.
113 | Using the conda forge version solved this problem for me
114 |
115 | .. code-block:: text
116 |
117 | conda install -c conda-forge pytables
118 |
119 | Best usage
120 | -----------
121 | While KiMoPack is a python library, we facilitate its use with Jupyter notebooks. For the typical analysis tasks we have developed a series of Notebooks that guide through the tasks.\n
122 | These notebooks can be downloaded from https://github.com/erdzeichen/KiMoPack/tree/main/Workflow_tools or by command line.
123 |
124 | You can try either of these "lazy" oneliners
125 |
126 | .. code-block:: text
127 |
128 | ipython -c "import KiMoPack; KiMoPack.download_notebooks()"
129 | python -c "import KiMoPack; KiMoPack.download_notebooks()"
130 | python3 -c "import KiMoPack; KiMoPack.download_notebooks()"
131 |
132 | If none of these work then start any console (under windows e.g. type "cmd" and hit enter). In the console you then start python by typing "python" and hit enter, lastly you import Kimopack and run a function that downloads the files for you by typing "import KiMoPack; KiMoPack.download_all()" This downloads the notebooks and tutorials from github for you. If you instead use "import KiMoPack; KiMoPack.download_notebooks()" then only the workflow tools are downloaded.
133 | Please copy one of these notebooks into your data analysis folder and rename them to create a analysis log of your session. For more information please see the publication https://doi.org/10.1021/acs.jpca.2c00907, the tutorial videos, or the tutorial notebooks under https://github.com/erdzeichen/KiMoPack/tree/main/Tutorial_Notebooks_for_local_use.
134 |
135 | Citation
136 | ------------
137 | We have published a paper introducing the toolbox under https://doi.org/10.1021/acs.jpca.2c00907
138 |
139 | Links
140 | -----
141 | * Overview talk: I gave a recent overview talk at the LaserLab Europe meeting: https://youtu.be/z9QqVLFWYrs
142 | * Publication: https://pubs.acs.org/doi/10.1021/acs.jpca.2c00907
143 | * Documentation: https://kimopack.readthedocs.io/
144 | * PyPI Releases: https://pypi.org/project/KiMoPack/
145 | * Source Code: https://github.com/erdzeichen/KiMoPack
146 | * Issue Tracker: https://github.com/erdzeichen/KiMoPack/issues
147 | * Website: https://www.chemphys.lu.se/research/projects/kimopack/
148 | * Zenodo: https://zenodo.org/badge/latestdoi/400527965
149 | * Tutorial videos: https://www.youtube.com/channel/UCmhiK0P9wXXjs_PJaitx8BQ
150 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Compare/Master/TA_Ru-dppz_400nm_ACN_paral.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Data/Compare/Master/TA_Ru-dppz_400nm_ACN_paral.hdf5
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Compare/TA_Ru-dppz_400nm_DCM_paral.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Data/Compare/TA_Ru-dppz_400nm_DCM_paral.hdf5
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Compare/TA_Ru-dppz_400nm_H2O_paral.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Data/Compare/TA_Ru-dppz_400nm_H2O_paral.hdf5
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Fitting-1/TA_Ru-dppz_400nm_ACN_chirp.dat:
--------------------------------------------------------------------------------
1 | -1.5237612469552094e-10,3.9345348234469423e-07,-0.00038608129048966724,0.17317879035701106,-29.8521315319899
2 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Fitting-1/TA_Ru-dppz_400nm_DCM_chirp.dat:
--------------------------------------------------------------------------------
1 | -1.0822751464325822e-10,2.853569552533645e-07,-0.0002881630557680664,0.1353701266975376,-24.523744364266484
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Fitting-1/TA_Ru-dppz_400nm_H2O_chirp.dat:
--------------------------------------------------------------------------------
1 | -1.2553473635922694e-10,3.2440687984857845e-07,-0.00032057470415350044,0.1472966821796405,-26.22763451458251
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Fitting-2/TA_Ru-dppz_400nm_ACN_chirp.dat:
--------------------------------------------------------------------------------
1 | -2.884956467514378e-11,1.0427830900492817e-07,-0.00013723067900582616,0.0804558285401515,-17.325192834047602
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Introduction/Fitted_Oscillations_with_confidence.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Data/Introduction/Fitted_Oscillations_with_confidence.hdf5
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Introduction/con_1_solved.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Data/Introduction/con_1_solved.hdf5
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Introduction/con_2_chirp.dat:
--------------------------------------------------------------------------------
1 | -1.2978149063579588e-11,4.725466178832376e-08,-6.364211327422314e-05,0.03773962948644208,-8.087836205247715
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Introduction/con_6_chirp.dat:
--------------------------------------------------------------------------------
1 | -4.011870787443943e-12,1.5227604920136978e-08,-2.203584504747727e-05,0.014551940057522323,-3.368366583891265
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Introduction/full_consecutive_fit.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Data/Introduction/full_consecutive_fit.hdf5
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Introduction/full_consecutive_fit_with_GS.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Data/Introduction/full_consecutive_fit_with_GS.hdf5
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Introduction/sample_1_chirp.dat:
--------------------------------------------------------------------------------
1 | -9.24533938810069e-12,3.475419190026255e-08,-4.841521982194385e-05,0.029760489437088835,-6.4626798980861
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Data/Scans/TA_Ru-dppz_400nm_ACN_mean_chirp.dat:
--------------------------------------------------------------------------------
1 | -9.379688715409935e-11,2.5186029114927185e-07,-0.0002588262218091004,0.1238382854321507,-23.000059559103793
--------------------------------------------------------------------------------
/Tutorial_Notebooks/Function_library_overview.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/Function_library_overview.pdf
--------------------------------------------------------------------------------
/Tutorial_Notebooks/KiMoPack_tutorial_2_Fitting-2_Colab.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Adavanced Kinetic Modeling\n",
8 | "## Target analysis $-$ Defining an own kinetic model"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "!pip install kimopack\n",
18 | "!pip install python-pptx\n",
19 | "!git clone https://github.com/erdzeichen/KiMoPack.git"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": null,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "import os,sys\n",
29 | "import pandas as pd\n",
30 | "import numpy as np\n",
31 | "import matplotlib,lmfit\n",
32 | "import matplotlib.pyplot as plt\n",
33 | "try:\n",
34 | " import KiMoPack.plot_func as pf\n",
35 | "except:\n",
36 | " print(\"General installation did not work, import from the same folder as a workaround\")\n",
37 | " import plot_func as pf"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "## 1) Import data"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "solvent = 'ACN' #'DCM' or 'H2O'\n",
54 | "\n",
55 | "filename = 'TA_Ru-dppz_400nm_'+str(solvent) # set name of the file to fit\n",
56 | "filepath = os.path.join('KiMoPack','Tutorial_Notebooks','Data', 'Fitting-2')# set path to file to fit\n",
57 | "ta=pf.TA(filename=filename+'.SIA', # title of the measurement file\n",
58 | " path=filepath) # path to measuremnt file\n",
59 | "\n",
60 | "#Alternative:\n",
61 | "# ta=pf.TA('gui') #and navigate to the corresponding file"
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "## 2) Standard corrections"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "ta.Filter_data(value=20) # remove artificial values\n",
78 | "ta.Background(uplimit=-0.5) # subtract background before time zero\n",
79 | "ta.Cor_Chirp(shown_window=[-2.3,1.8]) # choose time-window used in the active plot"
80 | ]
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "metadata": {},
85 | "source": [
86 | "## 3) Plot pre-processed data\n",
87 | "\n",
88 | "In this example the pre-processed data is visualized in three plots (as indicated in the titles), \n",
89 | "\n",
90 | "1. as kinetic traces (x: $\\Delta{}t$, y: $\\Delta$Absorbance)\n",
91 | "2. transient spectra (x: $\\lambda_{probe}$, y: $\\Delta$Absorbance)\n",
92 | "3. 2D-contour plot (x: $\\lambda_{probe}$, y: $\\Delta{}t$, z: $\\Delta$Absorbance). \n",
93 | "\n",
94 | "Several features can be used to alter the appearance of those plots (see Documentation or type ```ta.Plot?``` in the notebook). \n",
95 | "\n",
96 | "* The parameters ```rel_time``` and ```rel_wave``` are used to pre-select interesting $\\Delta{}t$ and $\\lambda_{probe}$ values to show specific kinetic traces (```plotting=1```) or transient spectra (```plotting=2```) of the dataset. \n",
97 | "* The parameters ```timelimits```, ```bordercut``` and ```intensity_range``` are specified to control the displayed region, by specifying upper and lower limits of delay times, probe wavelengths and TA signal intensities, respectively. \n",
98 | "* The scale of the TA signals can be changed to a logarithmic scaling using \\code{log\\_scale=True}. \n",
99 | "* The ```scattercut``` argument takes a probe wavelength interval that is ignored (set to zero) in the plots, to suppress the plotting of scattered excitation light. Here the scatter region was found to be between 380 and 405 nm (excitation at 400 nm).\n",
100 | "* The ```time_width_percent``` variable is set to 5%, meaning that the transient spectra are shown at the given delay time plus/minus 5% of that value (e.g. 0.522 ps means 0.5 to 0.55 ps). The respective range is indicated in the legend of the transient spectra. In all plots the unfitted data is plotted as dots, interpolated with lines (Savitzky-Golay)."
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "ta.rel_time=[0.5,1.5,20,100,1000] # certain delay times for TA spectra plot\n",
110 | "ta.rel_wave=[350,440,520,600] # certain probe wavelengths for kinetic traces plot \n",
111 | "ta.timelimits=[-1,1400] # plotted delay time range\n",
112 | "ta.bordercut=[320,770] # plotted probe wavelength range\n",
113 | "ta.intensity_range=[-55e-3,55e-3] # plotted intensity range\n",
114 | "ta.scattercut=[378,407] # ignored probe wavelength region (set to zero)\n",
115 | "ta.time_width_percent=5 # number in percent defining a delay time region \n",
116 | " # plotted in the TA spectra\n",
117 | "\n",
118 | "ta.Plot_RAW(title='Kinetic traces at selected probe wavelengths', plotting=1)\n",
119 | "ta.Plot_RAW(title='TA spectra at selected delay-times', plotting=2)\n",
120 | "ta.Plot_RAW(title='2D-Plot', plotting=0)"
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "metadata": {},
126 | "source": [
127 | "## 4) Fitting of the data \n",
128 | "\n",
129 | "### 4a) Global analysis $-$ parallel Model\n",
130 | "\n",
131 | "Upon photoexcitation of [(tbbpy)$_2$Ru(dppz)]$^{2+}$ (**Ru-dppz**) at 400 nm an ensemble of metal-to-ligand charge-transfer (MLCT) states localized in both ligand spheres, *i.e.*, $^1$MLCT$_{tbbpy}$ and $^1$MLCT$_{dppz}$ is populated. Extensive photophysical studies revealed that the subsequent excited state dynamics is determined by the polarity and hydrogen bond donor ability of the surrounding solvent molecules. It was found that long-lived emissive states are populated in polar aprotic solvents. However, this emission switches *off* when the molecules interact with water. This interesting property is based on a solvent sensitive excited state equilibrium between a non-emissive and an emissive state localized on the phenazine (phz) and phenanthroline (phen) moiety of the dppz ligand. \n",
132 | "\n",
133 | "Extensive photophysical studies in acetonitrile revealed that due to the stabiization of the charge-transfer excited states, the *dark* phz-centered state is formed from the *bright* $^3$MLCT$_{phen}$ state and decays back to the ground state on the sub-ns timescale. This formation of a long-lived long-lived $^3$MLCT$_{phen}$ state is manifested, *i.e.*, in the spectral changes at 340 and 580 nm, which can be quantitatively described by two characteristic time-constants: the first one associated with intersystem crossing, vibrational cooling and interligand hopping and a second one attributed to the non-radiative decay of a subset of $^3$MLCT states with excess electron density on the phenazine sphere of the dppz ligand ($^3$MLCT$_{phz}$), ultimately populating the long-lived $^3$MLCT$_{phen}$ state. Hence, the three kinetic parameters ```k0```, ```k1``` and ```k2``` are added to the the parameter object. The value for ```k2``` is fixed to 180 ns as obtained from nanosecond time-resolved studies.\n",
134 | "\n",
135 | ""
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": null,
141 | "metadata": {},
142 | "outputs": [],
143 | "source": [
144 | "# Define fit parameters\n",
145 | "ta.par=lmfit.Parameters()\n",
146 | "# rate constants\n",
147 | "ta.par.add('k0',value=1/2, min=1/10.0, max=1/0.25, vary=True) \n",
148 | "ta.par.add('k1',value=1/150, min=1/200.0, max=1/10.0, vary=True) \n",
149 | "ta.par.add('k2',value=1/100000, vary=False) \n",
150 | "\n",
151 | "# time-zero parameter fixed during fit\n",
152 | "ta.par.add('t0',value=0.0, min=-0.1, max=0.1, vary=False) \n",
153 | "\n",
154 | "# pump-pulse width parameter fixed during fit\n",
155 | "ta.par.add('resolution', value=0.12, min=0.04, max=0.20, vary=False) \n",
156 | "\n",
157 | "# Select a in-build model (here: independent parallel decay)\n",
158 | "ta.mod='exponential' \n",
159 | "\n",
160 | "# set delay-time range for fit\n",
161 | "ta.timelimits=[0.35,2000] \n",
162 | "\n",
163 | "# global lifetime analysis (kinetic modeling)\n",
164 | "ta.Fit_Global()"
165 | ]
166 | },
167 | {
168 | "cell_type": "markdown",
169 | "metadata": {},
170 | "source": [
171 | "### 4a) Repeat the global analysis to estimate the errors "
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": null,
177 | "metadata": {},
178 | "outputs": [],
179 | "source": [
180 | "# fit-error estimation in a confidence interval of 95%\n",
181 | "ta.Fit_Global(confidence_level=0.95) "
182 | ]
183 | },
184 | {
185 | "cell_type": "markdown",
186 | "metadata": {},
187 | "source": [
188 | "### Plotting of error Analysis (advanced handling)\n",
189 | "Plotting of the results of the error analysis is challenging and due to the potential large amount of combinations not possible to perform automatic. However here is an example on a single parameter"
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": null,
195 | "metadata": {},
196 | "outputs": [],
197 | "source": [
198 | "ta_listen=[ta.Copy(),ta.Copy()] #create a list for comparision\n",
199 | "#the Filename can be manipulated to use the automatic naming \n",
200 | "ta_listen[0].filename=\"upper confidence limit\"\n",
201 | "ta_listen[1].filename=\"lower confidence limit\"\n",
202 | "\n",
203 | "for i in range(2):\n",
204 | " #short name for the calculated results for reduced writing\n",
205 | " par=ta.re['fit_results_rates'].copy() \n",
206 | " if i == 0:\n",
207 | " #overwrite the value with the limits\n",
208 | " par.loc['k0','value']=par.loc['k0','upper_limit']\n",
209 | " else:\n",
210 | " par.loc['k0','value']=par.loc['k0','lower_limit']\n",
211 | " # Write the fit results as input parameter\n",
212 | " ta_listen[i].par=pf.pardf_to_par(par) \n",
213 | " for key in ta_listen[i].par.keys():\n",
214 | " # Lock the parameter so that only the spectra are calculated\n",
215 | " ta_listen[i].par[key].vary=False \n",
216 | " # Run the global fit to calculate the new spectra\n",
217 | " ta_listen[i].Fit_Global() "
218 | ]
219 | },
220 | {
221 | "cell_type": "markdown",
222 | "metadata": {},
223 | "source": [
224 | "### Shaping of the plot\n",
225 | "\n",
226 | "The plot could be re-shaped using the plotting GUI. Here we need to use a trick to achieve this automatically. Firstly, we get a handle to the drawn axis, so that we can directly manipulate the plot. Secondly, we set the ylimit of the plot."
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "execution_count": null,
232 | "metadata": {},
233 | "outputs": [],
234 | "source": [
235 | "ta.Compare_at_wave(fitted=True, \n",
236 | " other=ta_listen, \n",
237 | " rel_wave=[450,590], \n",
238 | " width=50, \n",
239 | " linewidth=3)\n",
240 | " \n",
241 | "ax=plt.gca() # get a handle to the drawn axis \n",
242 | "ax.set_ylim(-50e-3,20e-3) # set the ylimit of the plot"
243 | ]
244 | },
245 | {
246 | "cell_type": "markdown",
247 | "metadata": {},
248 | "source": [
249 | "### 4b) Target analysis $-$ propose a model\n",
250 | "\n",
251 | "Several studies in acetonitrile reveal that the *dark* phz-centered excited state is populated from the initially excited $^3$MLCT states. Ultimately, a *bright* $^3$MLCT$_{phen}$ is formed from the tbbpy, phen and phz centered states. Herein, it is shown how to define an own model function based on such *a priori* knowledge.\n",
252 | "\n",
253 | "### 4b.1) Define own model function\n",
254 | "\n",
255 | "Based on the literature findings a model is proposed, where initially proximal MLCT (tbbpy and phen, A) and distal MLCT states (phz, B) are populated. Subsequently those states decay forming the *bright* MLCT$_{phen}$ state (C). This in turn decays back to the ground state (C'). The respective kinetic rate constants can be written as\n",
256 | "\n",
257 | "- $\\displaystyle \\frac{d[A]}{dt} = -k_0 \\cdot [A]$\n",
258 | "- $\\displaystyle \\frac{d[B]}{dt} = -k_0 \\cdot [B]$\n",
259 | "- $\\displaystyle \\frac{d[C]}{dt} = k_0 \\cdot ([A] + [B]) -k_1 \\cdot [C]$,\n",
260 | "\n",
261 | "with brackets indicating the concentration of A, B and C. Those rate equations are defined in the python function, like:\n",
262 | "\n",
263 | "```python\n",
264 | "# state: A, d[A]/dt\n",
265 | "dc[0] = -pardf['k0']*dt*c_temp[0] + g[i]*dt\n",
266 | "# state: B, d[B]/dt\n",
267 | "dc[1] = -pardf['k0']*dt*c_temp[1] + g[i]*dt\n",
268 | "# state: C, d[C]/dt\n",
269 | "dc[2] = pardf['k0']*dt*c_temp[0] + pardf['k0']*dt*c_temp[1] - pardf['k1']*dt*c_temp[2]\n",
270 | " \n",
271 | "```\n",
272 | "\n",
273 | ""
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": null,
279 | "metadata": {},
280 | "outputs": [],
281 | "source": [
282 | "FWHM=2.35482\n",
283 | "\n",
284 | "def gauss(t,sigma=0.1,mu=0):\n",
285 | " y=np.exp(-0.5*((t-mu)**2)/sigma**2)\n",
286 | " y/=sigma*np.sqrt(2*np.pi)\n",
287 | " return y\n",
288 | "\n",
289 | "def Rudppz(times,pardf):\n",
290 | " '''\n",
291 | " Define a model where initially A and B are populated and decay forming C. \n",
292 | " Subsequently, C decays back to the ground-state (C').\n",
293 | " \n",
294 | " args:\n",
295 | " pardf: pandas.DataFrame with the column 'value' named in 'pardf' (type: \n",
296 | " pandas.DataFrame)\n",
297 | " times: vector (type:list)\n",
298 | " \n",
299 | " returns:\n",
300 | " c: DataFrame with the times as index and in the columns the an expression\n",
301 | " of the relative concentrations of A, B and C (type: dictionary)\n",
302 | " '''\n",
303 | " # create an empty concentration matrix\n",
304 | " c=np.zeros((len(times),3),dtype='float')\n",
305 | " # create IRF \n",
306 | " g=gauss(times,sigma=pardf['resolution']/FWHM,mu=pardf['t0'])\n",
307 | " # defining how many extra steps are taken between the main time_points\n",
308 | " sub_steps=10\n",
309 | " for i in range(1,len(times)):\n",
310 | " # initial change for each concentration (3 refers to the number of states) \n",
311 | " dc=np.zeros((3,1),dtype='float')\n",
312 | " # adaption of the time-intervals to the sub_steps\n",
313 | " dt=(times[i]-times[i-1])/(sub_steps)\n",
314 | " # create a temporary concentration matrix\n",
315 | " c_temp=c[i-1,:]\n",
316 | " for j in range(int(sub_steps)):\n",
317 | " # state: A, d[A]/dt\n",
318 | " dc[0] = -pardf['k0']*dt*c_temp[0] + g[i]*dt\n",
319 | " # state: B, d[B]/dt\n",
320 | " dc[1] = -pardf['k0']*dt*c_temp[1] + g[i]*dt\n",
321 | " # state: C, d[C]/dt\n",
322 | " dc[2] = pardf['k0']*dt*c_temp[0] + pardf['k0']*dt*c_temp[1] - pardf['k1']*dt*c_temp[2]\n",
323 | " for b in range(c.shape[1]):\n",
324 | " #check that all concentrations are > 0\n",
325 | " c_temp[b] =np.nanmax([(c_temp[b]+dc[b]),0.])\n",
326 | " # store temporary concentrations into the main matrix\n",
327 | " c[i,:] =c_temp\n",
328 | " c=pd.DataFrame(c,index=times)\n",
329 | " c.index.name='time' # name the delay-times\n",
330 | " c.columns=['A','B','C'] # name the species\n",
331 | "\n",
332 | " return c\n"
333 | ]
334 | },
335 | {
336 | "cell_type": "markdown",
337 | "metadata": {},
338 | "source": [
339 | "### 4b.2) Define fitting parameters"
340 | ]
341 | },
342 | {
343 | "cell_type": "code",
344 | "execution_count": null,
345 | "metadata": {},
346 | "outputs": [],
347 | "source": [
348 | "ta.par=lmfit.Parameters()\n",
349 | "# rate constants\n",
350 | "ta.par.add('k0',value=1/2.0, min=1/10.0, max=1/0.1, vary=True)\n",
351 | "ta.par.add('k1',value=1/100000, vary=False)\n",
352 | "\n",
353 | "# time-zero parameter fixed during fit\n",
354 | "ta.par.add('t0',value=0.0, min=-0.1, max=0.1, vary=False) \n",
355 | "\n",
356 | "# pump-pulse width parameter fixed during fit\n",
357 | "ta.par.add('resolution', value=0.07, min=0.04, max=0.20, vary=False) "
358 | ]
359 | },
360 | {
361 | "cell_type": "markdown",
362 | "metadata": {},
363 | "source": [
364 | "### 4b.3) Fitting of the Data $-$ Kinetic Modeling (**Ru-dppz** Model)"
365 | ]
366 | },
367 | {
368 | "cell_type": "code",
369 | "execution_count": null,
370 | "metadata": {},
371 | "outputs": [],
372 | "source": [
373 | "ta.mod=Rudppz # model selection (own model)\n",
374 | "\n",
375 | "ta.timelimits=[0.35,2000] # set delay-time range for fit\n",
376 | "ta.log_fit=False # fitting on linear time scale\n",
377 | "\n",
378 | "ta.Fit_Global() # pass parameter object (par) to global fit "
379 | ]
380 | },
381 | {
382 | "cell_type": "markdown",
383 | "metadata": {},
384 | "source": [
385 | "## 5) Plot the fit results"
386 | ]
387 | },
388 | {
389 | "cell_type": "code",
390 | "execution_count": null,
391 | "metadata": {},
392 | "outputs": [],
393 | "source": [
394 | "plt.close('all')\n",
395 | "ta.Plot_fit_output(title='2D-Plots', plotting=4)"
396 | ]
397 | },
398 | {
399 | "cell_type": "code",
400 | "execution_count": null,
401 | "metadata": {},
402 | "outputs": [],
403 | "source": [
404 | "ta.Plot_fit_output(title='summed TA signals', plotting=1)\n",
405 | "ta.Plot_fit_output(title='Decay Associated Spectra', plotting=0)\n",
406 | "ta.Plot_fit_output(title='concentration profiles', plotting=5) #or: ta.re['c'].plot()"
407 | ]
408 | },
409 | {
410 | "cell_type": "markdown",
411 | "metadata": {},
412 | "source": [
413 | "## 6) Save results"
414 | ]
415 | },
416 | {
417 | "cell_type": "code",
418 | "execution_count": null,
419 | "metadata": {},
420 | "outputs": [],
421 | "source": [
422 | "savename = filename+'_own'\n",
423 | "ta.Save_project(filename=savename, # set save name\n",
424 | " path='results') # set name of save folder\n",
425 | "\n",
426 | "ta.Save_data(save_RAW=False, # do not save pre-processed data\n",
427 | " save_Fit=True, # save pre-processed and fitted data\n",
428 | " filename=savename, # set save name\n",
429 | " path='results') # set name of save folder"
430 | ]
431 | },
432 | {
433 | "cell_type": "code",
434 | "execution_count": null,
435 | "metadata": {},
436 | "outputs": [],
437 | "source": []
438 | }
439 | ],
440 | "metadata": {
441 | "ipub": {
442 | "titlepage": {
443 | "author": "Jens Uhlig",
444 | "email": "jens.uhlig@chemphys.lu.se",
445 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
446 | "subtitle": "Main Worksheet",
447 | "title": "Transient Absorption Worksheet"
448 | }
449 | },
450 | "kernelspec": {
451 | "display_name": "Python 3 (ipykernel)",
452 | "language": "python",
453 | "name": "python3"
454 | },
455 | "language_info": {
456 | "codemirror_mode": {
457 | "name": "ipython",
458 | "version": 3
459 | },
460 | "file_extension": ".py",
461 | "mimetype": "text/x-python",
462 | "name": "python",
463 | "nbconvert_exporter": "python",
464 | "pygments_lexer": "ipython3",
465 | "version": "3.11.5"
466 | }
467 | },
468 | "nbformat": 4,
469 | "nbformat_minor": 2
470 | }
471 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/KiMoPack_tutorial_2_Fitting.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Adavanced Kinetic Modeling\n",
8 | "## Target analysis $-$ Defining an own kinetic model"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "import os,sys\n",
18 | "import pandas as pd\n",
19 | "import numpy as np\n",
20 | "import matplotlib,lmfit\n",
21 | "import matplotlib.pyplot as plt\n",
22 | "try:\n",
23 | " import KiMoPack.plot_func as pf\n",
24 | "except:\n",
25 | " print(\"General installation did not work, import from the same folder as a workaround\")\n",
26 | " import plot_func as pf\n",
27 | "%matplotlib tk\n",
28 | "#change this line to \n",
29 | "# %matplotlib qt \n",
30 | "#if you have qt installed"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "## 1) Import data"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "solvent = 'ACN' #'DCM' or 'H2O'\n",
47 | "\n",
48 | "filename = 'TA_Ru-dppz_400nm_'+str(solvent) # set name of the file to fit\n",
49 | "filepath = os.path.join(os.getcwd(), 'Data', 'Fitting-2') # set path to file to fit\n",
50 | "\n",
51 | "ta=pf.TA(filename=filename+'.SIA', # title of the measurement file\n",
52 | " path=filepath) # path to measuremnt file\n",
53 | "\n",
54 | "#Alternative:\n",
55 | "# ta=pf.TA('gui') #and navigate to the corresponding file"
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "## 2) Standard corrections"
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": null,
68 | "metadata": {},
69 | "outputs": [],
70 | "source": [
71 | "ta.Filter_data(value=20,replace_bad_values=0) # remove artificial values\n",
72 | "ta.Background(uplimit=-0.5) # subtract background before time zero\n",
73 | "ta.Cor_Chirp(shown_window=[-2.3,1.8]) # choose time-window used in the active plot"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "## 3) Plot pre-processed data\n",
81 | "\n",
82 | "In this example the pre-processed data is visualized in three plots (as indicated in the titles), \n",
83 | "\n",
84 | "1. as kinetic traces (x: $\\Delta{}t$, y: $\\Delta$Absorbance)\n",
85 | "2. transient spectra (x: $\\lambda_{probe}$, y: $\\Delta$Absorbance)\n",
86 | "3. 2D-contour plot (x: $\\lambda_{probe}$, y: $\\Delta{}t$, z: $\\Delta$Absorbance). \n",
87 | "\n",
88 | "Several features can be used to alter the appearance of those plots (see Documentation or type ```ta.Plot?``` in the notebook). \n",
89 | "\n",
90 | "* The parameters ```rel_time``` and ```rel_wave``` are used to pre-select interesting $\\Delta{}t$ and $\\lambda_{probe}$ values to show specific kinetic traces (```plotting=1```) or transient spectra (```plotting=2```) of the dataset. \n",
91 | "* The parameters ```timelimits```, ```bordercut``` and ```intensity_range``` are specified to control the displayed region, by specifying upper and lower limits of delay times, probe wavelengths and TA signal intensities, respectively. \n",
92 | "* The scale of the TA signals can be changed to a logarithmic scaling using \\code{log\\_scale=True}. \n",
93 | "* The ```scattercut``` argument takes a probe wavelength interval that is ignored (set to zero) in the plots, to suppress the plotting of scattered excitation light. Here the scatter region was found to be between 380 and 405 nm (excitation at 400 nm).\n",
94 | "* The ```time_width_percent``` variable is set to 5%, meaning that the transient spectra are shown at the given delay time plus/minus 5% of that value (e.g. 0.522 ps means 0.5 to 0.55 ps). The respective range is indicated in the legend of the transient spectra. In all plots the unfitted data is plotted as dots, interpolated with lines (Savitzky-Golay)."
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "ta.rel_time=[0.5,1.5,20,100,1000] # certain delay times for TA spectra plot\n",
104 | "ta.rel_wave=[350,440,520,600] # certain probe wavelengths for kinetic traces plot \n",
105 | "ta.timelimits=[-1,1400] # plotted delay time range\n",
106 | "ta.bordercut=[320,770] # plotted probe wavelength range\n",
107 | "ta.intensity_range=[-55e-3,55e-3] # plotted intensity range\n",
108 | "ta.scattercut=[378,407] # ignored probe wavelength region (set to zero)\n",
109 | "ta.time_width_percent=5 # number in percent defining a delay time region \n",
110 | " # plotted in the TA spectra\n",
111 | "\n",
112 | "ta.Plot_RAW(title='Kinetic traces at selected probe wavelengths', plotting=1)\n",
113 | "ta.Plot_RAW(title='TA spectra at selected delay-times', plotting=2)\n",
114 | "ta.Plot_RAW(title='2D-Plot', plotting=0)"
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "## 4) Fitting of the data \n",
122 | "\n",
123 | "### 4a) Global analysis $-$ parallel Model\n",
124 | "\n",
125 | "Upon photoexcitation of [(tbbpy)$_2$Ru(dppz)]$^{2+}$ (**Ru-dppz**) at 400 nm an ensemble of metal-to-ligand charge-transfer (MLCT) states localized in both ligand spheres, *i.e.*, $^1$MLCT$_{tbbpy}$ and $^1$MLCT$_{dppz}$ is populated. Extensive photophysical studies revealed that the subsequent excited state dynamics is determined by the polarity and hydrogen bond donor ability of the surrounding solvent molecules. It was found that long-lived emissive states are populated in polar aprotic solvents. However, this emission switches *off* when the molecules interact with water. This interesting property is based on a solvent sensitive excited state equilibrium between a non-emissive and an emissive state localized on the phenazine (phz) and phenanthroline (phen) moiety of the dppz ligand. \n",
126 | "\n",
127 | "Extensive photophysical studies in acetonitrile revealed that due to the stabiization of the charge-transfer excited states, the *dark* phz-centered state is formed from the *bright* $^3$MLCT$_{phen}$ state and decays back to the ground state on the sub-ns timescale. This formation of a long-lived long-lived $^3$MLCT$_{phen}$ state is manifested, *i.e.*, in the spectral changes at 340 and 580 nm, which can be quantitatively described by two characteristic time-constants: the first one associated with intersystem crossing, vibrational cooling and interligand hopping and a second one attributed to the non-radiative decay of a subset of $^3$MLCT states with excess electron density on the phenazine sphere of the dppz ligand ($^3$MLCT$_{phz}$), ultimately populating the long-lived $^3$MLCT$_{phen}$ state. Hence, the three kinetic parameters ```k0```, ```k1``` and ```k2``` are added to the the parameter object. The value for ```k2``` is fixed to 180 ns as obtained from nanosecond time-resolved studies.\n",
128 | "\n",
129 | ""
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "# Define fit parameters\n",
139 | "ta.par=lmfit.Parameters()\n",
140 | "# rate constants\n",
141 | "ta.par.add('k0',value=1/2, min=1/10.0, max=1/0.25, vary=True) \n",
142 | "ta.par.add('k1',value=1/150, min=1/200.0, max=1/10.0, vary=True) \n",
143 | "ta.par.add('k2',value=1/100000, vary=False) \n",
144 | "\n",
145 | "# time-zero parameter fixed during fit\n",
146 | "ta.par.add('t0',value=0.0, min=-0.1, max=0.1, vary=False) \n",
147 | "\n",
148 | "# pump-pulse width parameter fixed during fit\n",
149 | "ta.par.add('resolution', value=0.12, min=0.04, max=0.20, vary=False) \n",
150 | "\n",
151 | "# Select a in-build model (here: independent parallel decay)\n",
152 | "ta.mod='exponential' \n",
153 | "\n",
154 | "# set delay-time range for fit\n",
155 | "ta.timelimits=[0.35,2000] \n",
156 | "\n",
157 | "# global lifetime analysis (kinetic modeling)\n",
158 | "ta.Fit_Global()"
159 | ]
160 | },
161 | {
162 | "cell_type": "markdown",
163 | "metadata": {},
164 | "source": [
165 | "### 4a) Repeat the global analysis to estimate the errors "
166 | ]
167 | },
168 | {
169 | "cell_type": "code",
170 | "execution_count": null,
171 | "metadata": {},
172 | "outputs": [],
173 | "source": [
174 | "# fit-error estimation in a confidence interval of 95%\n",
175 | "ta.Fit_Global(confidence_level=0.95) "
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "metadata": {},
181 | "source": [
182 | "### Plotting of error Analysis (advanced handling)\n",
183 | "Plotting of the results of the error analysis is challenging and due to the potential large amount of combinations not possible to perform automatic. However here is an example on a single parameter"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "ta_listen=[ta.Copy(),ta.Copy()] #create a list for comparision\n",
193 | "#the Filename can be manipulated to use the automatic naming \n",
194 | "ta_listen[0].filename=\"upper confidence limit\"\n",
195 | "ta_listen[1].filename=\"lower confidence limit\"\n",
196 | "\n",
197 | "for i in range(2):\n",
198 | " #short name for the calculated results for reduced writing\n",
199 | " par=ta.re['fit_results_rates'].copy() \n",
200 | " if i == 0:\n",
201 | " #overwrite the value with the limits\n",
202 | " par.loc['k0','value']=par.loc['k0','upper_limit']\n",
203 | " else:\n",
204 | " par.loc['k0','value']=par.loc['k0','lower_limit']\n",
205 | " # Write the fit results as input parameter\n",
206 | " ta_listen[i].par=pf.pardf_to_par(par) \n",
207 | " for key in ta_listen[i].par.keys():\n",
208 | " # Lock the parameter so that only the spectra are calculated\n",
209 | " ta_listen[i].par[key].vary=False \n",
210 | " # Run the global fit to calculate the new spectra\n",
211 | " ta_listen[i].Fit_Global() "
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "metadata": {},
217 | "source": [
218 | "### Shaping of the plot\n",
219 | "\n",
220 | "The plot could be re-shaped using the plotting GUI. Here we need to use a trick to achieve this automatically. Firstly, we get a handle to the drawn axis, so that we can directly manipulate the plot. Secondly, we set the ylimit of the plot."
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "ta.Compare_at_wave(fitted=True, \n",
230 | " other=ta_listen, \n",
231 | " rel_wave=[450,590], \n",
232 | " width=50, \n",
233 | " linewidth=3)\n",
234 | " \n",
235 | "ax=plt.gca() # get a handle to the drawn axis \n",
236 | "ax.set_ylim(-50e-3,20e-3) # set the ylimit of the plot"
237 | ]
238 | },
239 | {
240 | "cell_type": "markdown",
241 | "metadata": {},
242 | "source": [
243 | "### 4b) Target analysis $-$ propose a model\n",
244 | "\n",
245 | "Several studies in acetonitrile reveal that the *dark* phz-centered excited state is populated from the initially excited $^3$MLCT states. Ultimately, a *bright* $^3$MLCT$_{phen}$ is formed from the tbbpy, phen and phz centered states. Herein, it is shown how to define an own model function based on such *a priori* knowledge.\n",
246 | "\n",
247 | "### 4b.1) Define own model function\n",
248 | "\n",
249 | "Based on the literature findings a model is proposed, where initially proximal MLCT (tbbpy and phen, A) and distal MLCT states (phz, B) are populated. Subsequently those states decay forming the *bright* MLCT$_{phen}$ state (C). This in turn decays back to the ground state (C'). The respective kinetic rate constants can be written as\n",
250 | "\n",
251 | "- $\\displaystyle \\frac{d[A]}{dt} = -k_0 \\cdot [A]$\n",
252 | "- $\\displaystyle \\frac{d[B]}{dt} = -k_0 \\cdot [B]$\n",
253 | "- $\\displaystyle \\frac{d[C]}{dt} = k_0 \\cdot ([A] + [B]) -k_1 \\cdot [C]$,\n",
254 | "\n",
255 | "with brackets indicating the concentration of A, B and C. Those rate equations are defined in the python function, like:\n",
256 | "\n",
257 | "```python\n",
258 | "# state: A, d[A]/dt\n",
259 | "dc[0] = -pardf['k0']*dt*c_temp[0] + g[i]*dt\n",
260 | "# state: B, d[B]/dt\n",
261 | "dc[1] = -pardf['k0']*dt*c_temp[1] + g[i]*dt\n",
262 | "# state: C, d[C]/dt\n",
263 | "dc[2] = pardf['k0']*dt*c_temp[0] + pardf['k0']*dt*c_temp[1] - pardf['k1']*dt*c_temp[2]\n",
264 | " \n",
265 | "```\n",
266 | "\n",
267 | ""
268 | ]
269 | },
270 | {
271 | "cell_type": "code",
272 | "execution_count": null,
273 | "metadata": {},
274 | "outputs": [],
275 | "source": [
276 | "FWHM=2.35482\n",
277 | "\n",
278 | "def gauss(t,sigma=0.1,mu=0):\n",
279 | " y=np.exp(-0.5*((t-mu)**2)/sigma**2)\n",
280 | " y/=sigma*np.sqrt(2*np.pi)\n",
281 | " return y\n",
282 | "\n",
283 | "def Rudppz(times,pardf):\n",
284 | " '''\n",
285 | " Define a model where initially A and B are populated and decay forming C. \n",
286 | " Subsequently, C decays back to the ground-state (C').\n",
287 | " \n",
288 | " args:\n",
289 | " pardf: pandas.DataFrame with the column 'value' named in 'pardf' (type: \n",
290 | " pandas.DataFrame)\n",
291 | " times: vector (type:list)\n",
292 | " \n",
293 | " returns:\n",
294 | " c: DataFrame with the times as index and in the columns the an expression\n",
295 | " of the relative concentrations of A, B and C (type: dictionary)\n",
296 | " '''\n",
297 | " # create an empty concentration matrix\n",
298 | " c=np.zeros((len(times),3),dtype='float')\n",
299 | " # create IRF \n",
300 | " g=gauss(times,sigma=pardf['resolution']/FWHM,mu=pardf['t0'])\n",
301 | " # defining how many extra steps are taken between the main time_points\n",
302 | " sub_steps=10\n",
303 | " # initial change for each concentration (3 refers to the number of states) \n",
304 | " dc=np.zeros((3,1),dtype='float')\n",
305 | " for i in range(1,len(times)):\n",
306 | " # adaption of the time-intervals to the sub_steps\n",
307 | " dt=(times[i]-times[i-1])/(sub_steps)\n",
308 | " # create a temporary concentration matrix\n",
309 | " c_temp=c[i-1,:]\n",
310 | " for j in range(int(sub_steps)):\n",
311 | " # state: A, d[A]/dt\n",
312 | " dc[0] = -pardf['k0']*dt*c_temp[0] + g[i]*dt\n",
313 | " # state: B, d[B]/dt\n",
314 | " dc[1] = -pardf['k0']*dt*c_temp[1] + g[i]*dt\n",
315 | " # state: C, d[C]/dt\n",
316 | " dc[2] = pardf['k0']*dt*c_temp[0] + pardf['k0']*dt*c_temp[1] - pardf['k1']*dt*c_temp[2]\n",
317 | " for b in range(c.shape[1]):\n",
318 | " #check that all concentrations are > 0\n",
319 | " c_temp[b] =np.nanmax([(c_temp[b]+np.squeeze(dc[b])),0.])\n",
320 | " # store temporary concentrations into the main matrix\n",
321 | " c[i,:] =c_temp\n",
322 | " c=pd.DataFrame(c,index=times)\n",
323 | " c.index.name='time' # name the delay-times\n",
324 | " c.columns=['A','B','C'] # name the species\n",
325 | "\n",
326 | " return c\n"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "metadata": {},
332 | "source": [
333 | "### 4b.2) Define fitting parameters"
334 | ]
335 | },
336 | {
337 | "cell_type": "code",
338 | "execution_count": null,
339 | "metadata": {},
340 | "outputs": [],
341 | "source": [
342 | "ta.par=lmfit.Parameters()\n",
343 | "# rate constants\n",
344 | "ta.par.add('k0',value=1/2.0, min=1/10.0, max=1/0.1, vary=True)\n",
345 | "ta.par.add('k1',value=1/100000, vary=False)\n",
346 | "# time-zero parameter fixed during fit\n",
347 | "ta.par.add('t0',value=0.0, min=-0.1, max=0.1, vary=False) \n",
348 | "\n",
349 | "# pump-pulse width parameter fixed during fit\n",
350 | "ta.par.add('resolution', value=0.07, min=0.04, max=0.20, vary=False) "
351 | ]
352 | },
353 | {
354 | "cell_type": "markdown",
355 | "metadata": {},
356 | "source": [
357 | "### 4b.3) Fitting of the Data $-$ Kinetic Modeling (**Ru-dppz** Model)"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": null,
363 | "metadata": {},
364 | "outputs": [],
365 | "source": [
366 | "ta.mod=Rudppz # model selection (own model)\n",
367 | "\n",
368 | "ta.timelimits=[0.35,2000] # set delay-time range for fit\n",
369 | "ta.log_fit=False # fitting on linear time scale\n",
370 | "\n",
371 | "ta.Fit_Global() # pass parameter object (par) to global fit "
372 | ]
373 | },
374 | {
375 | "cell_type": "markdown",
376 | "metadata": {},
377 | "source": [
378 | "## 5) Plot the fit results"
379 | ]
380 | },
381 | {
382 | "cell_type": "code",
383 | "execution_count": null,
384 | "metadata": {},
385 | "outputs": [],
386 | "source": [
387 | "plt.close('all')\n",
388 | "ta.Plot_fit_output(title='2D-Plots', plotting=4)"
389 | ]
390 | },
391 | {
392 | "cell_type": "code",
393 | "execution_count": null,
394 | "metadata": {},
395 | "outputs": [],
396 | "source": [
397 | "ta.Plot_fit_output(title='summed TA signals', plotting=1)\n",
398 | "ta.Plot_fit_output(title='Decay Associated Spectra', plotting=0)\n",
399 | "ta.Plot_fit_output(title='concentration profiles', plotting=5) #or: ta.re['c'].plot()"
400 | ]
401 | },
402 | {
403 | "cell_type": "markdown",
404 | "metadata": {},
405 | "source": [
406 | "## 6) Save results"
407 | ]
408 | },
409 | {
410 | "cell_type": "code",
411 | "execution_count": null,
412 | "metadata": {},
413 | "outputs": [],
414 | "source": [
415 | "savename = filename+'_own'\n",
416 | "ta.Save_project(filename=savename, # set save name\n",
417 | " path='results') # set name of save folder\n",
418 | "\n",
419 | "ta.Save_data(save_RAW=False, # do not save pre-processed data\n",
420 | " save_Fit=True, # save pre-processed and fitted data\n",
421 | " filename=savename, # set save name\n",
422 | " path='results') # set name of save folder"
423 | ]
424 | },
425 | {
426 | "cell_type": "code",
427 | "execution_count": null,
428 | "metadata": {},
429 | "outputs": [],
430 | "source": []
431 | }
432 | ],
433 | "metadata": {
434 | "ipub": {
435 | "titlepage": {
436 | "author": "Jens Uhlig",
437 | "email": "jens.uhlig@chemphys.lu.se",
438 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
439 | "subtitle": "Main Worksheet",
440 | "title": "Transient Absorption Worksheet"
441 | }
442 | },
443 | "kernelspec": {
444 | "display_name": "Python 3 (ipykernel)",
445 | "language": "python",
446 | "name": "python3"
447 | },
448 | "language_info": {
449 | "codemirror_mode": {
450 | "name": "ipython",
451 | "version": 3
452 | },
453 | "file_extension": ".py",
454 | "mimetype": "text/x-python",
455 | "name": "python",
456 | "nbconvert_exporter": "python",
457 | "pygments_lexer": "ipython3",
458 | "version": "3.11.9"
459 | }
460 | },
461 | "nbformat": 4,
462 | "nbformat_minor": 4
463 | }
464 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/KiMoPack_tutorial_3_CompareFit.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Advanced Features\n",
8 | "## Comparison of fit results"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": 1,
14 | "metadata": {},
15 | "outputs": [
16 | {
17 | "name": "stdout",
18 | "output_type": "stream",
19 | "text": [
20 | "Qt was found consider switching to qt mode with %matplotlib qt (more comfortable)\n",
21 | "Plot_func version 7.4.17\n",
22 | "was imported from path:\n",
23 | " C:\\Users\\jensu\\anaconda3\\Lib\\site-packages\\KiMoPack\n",
24 | "The current working folder is:\n",
25 | " c:\\Users\\jensu\\Dropbox\\coding\\github\\KiMoPack\\Tutorial_Notebooks\n"
26 | ]
27 | }
28 | ],
29 | "source": [
30 | "import os,sys\n",
31 | "import pandas as pd\n",
32 | "import numpy as np\n",
33 | "import matplotlib,lmfit\n",
34 | "import matplotlib.pyplot as plt\n",
35 | "import KiMoPack.plot_func as pf\n",
36 | "%matplotlib tk\n",
37 | "#change this line to \n",
38 | "# %matplotlib qt \n",
39 | "#if you have qt installed"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "## 1) Load Projects\n",
47 | "\n",
48 | "In this example it is demonstrated how to work with saved *hdf5* projects. Upon saving a project all parameters of the analysis are dumped to the *hdf5* file. This includes the standard corrected (background, arrival time, scattercut, ...) TA data as well as the fit results and parameter. \n",
49 | "\n",
50 | "Herein, the TA projects of **Ru-dppz** collected in three solvents, namely dichloromethane (DCM), acetonitrile (ACN) and water (H$_2$O) at 400 nm excitation are loaded and compared. Firstly, the master project (here in ACN) is loaded to the ```ta_ACN``` object. Secondly all *hdf5* projects from a specified folder (including the data in DCM and H$_2$O) are loaded into the object ```compare_projects```. For loading the comparison projects the function ```GUI_open``` is employed. You can either read all *hdf5* projects from a folder (```project_list='all'```) or you can select single projects from the file explorer (```project_list='gui'```)."
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": 2,
56 | "metadata": {},
57 | "outputs": [],
58 | "source": [
59 | "# initialize ta_ACN object including the TA data in ACN\n",
60 | "ta_ACN = pf.TA(filename='TA_Ru-dppz_400nm_ACN_paral.hdf5',\n",
61 | " path=os.path.join('Data', 'Compare', 'Master'))\n",
62 | "\n",
63 | "# initialize an object including the TA data in DCM and H2O\n",
64 | "compare_projects=pf.GUI_open(project_list='all',\n",
65 | " path=os.path.join('Data', 'Compare'))\n",
66 | "\n",
67 | "# plot TA spectra of the master project\n",
68 | "ta_ACN.Plot_fit_output(title='Master project, ACN, 400 nm', plotting=[3])"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "## 2) Compare transient spectra\n",
76 | "\n",
77 | "Upon loading the TA data of several projects, the user can compare the transient spectra by plotting multiple spectra into the same figure at given delay times (```rel_time```). Herein, the TA spectra of **Ru-dppz** obtained in different solvents, namely dichlorometahne (DCM), acetonitrile (ACN) and water (H$_2$O) are compared.\n",
78 | "\n",
79 | "### General settings\n",
80 | "\n",
81 | "- For a better comparison the data of each project can be normalized to the master object (in this example ```ta_ACN```). For the normalization a normalization range is defined by the lower and upper limits of deay times and probe wavelengths (*e.g.* ```norm_window=[0.5,0.7,420,470]```). \n",
82 | "\n",
83 | "- In order to be able to compare the individual data sets well with each other, the use of a highly diverging colormap is recommended. Herein the colormap *Accent* is used. For more available maps see: https://matplotlib.org/stable/tutorials/colors/colormaps.html"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 3,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "norm_window = [0.5,0.7,420,470] # norm window\n",
93 | "ta_ACN.cmap=pf.cm.Accent # use a diverge colormap for comparison"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "### 2a) Compare transient spectra"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 4,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "for delay_time in [1,10,100]: # plot spectra for each selected delay time\n",
110 | " ta_ACN.Compare_at_time(fitted=False, # compare pre-processed data\n",
111 | " other=compare_projects, # list of projects to compare\n",
112 | " rel_time=[delay_time], # selected deay times to compare\n",
113 | " norm_window=norm_window) # set norm window"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "metadata": {},
119 | "source": [
120 | "### 2b) Compare transient and external spectra\n",
121 | "\n",
122 | "To compare the transient spectra at a certain delay time to an external spectrum (*e.g.* spectro-electro-chemistry or steady state absorption data), the user can pass a dataframe with such spectra to the ```Compare_at_time```function. Herein, the absorption data of electrochemically unmodified **Ru-dppz** and its singly reduced form are loaded into a Pandas DataFrame with the wavelength column as index. In the example the scaled difference spectrum of the reduced and unreduced complex is plotted for comparison to the TA spectra of **Ru-dppz** in ACN and H$_2$O obtained at a delay time of 1 ps."
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "# define name of external spectra\n",
132 | "spectra_name = 'UVvis_SEC_Rudppz_ACN.dat'\n",
133 | "\n",
134 | "# define path of external spectra\n",
135 | "spectra_path = os.path.join(os.getcwd(), 'Data', 'Compare')\n",
136 | "\n",
137 | "# create dataframe of external spectra\n",
138 | "SEC_df = pd.read_csv(os.path.join(spectra_path, spectra_name), index_col=0, sep=\"\\t\", header=0)\n",
139 | "diff_spectrum=(SEC_df['red']-SEC_df['ocp'])*0.05 # create difference spectrum\n",
140 | "diff_spectrum.name='Rudppz_ACN - difference' # give it a name for the plot\n",
141 | "\n",
142 | "ta_ACN.Compare_at_time(fitted=False,\n",
143 | " rel_time=1.0, # selected delay time\n",
144 | " other=compare_projects, # list of projects to compare\n",
145 | " spectra=diff_spectrum, # external spectra to compare\n",
146 | " norm_window=norm_window) # set norm window"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "### 2c) Compare kinetic traces\n",
154 | "\n",
155 | "The kinetic traces of several projects at a given probe wavelength (```rel_wave```) can be plotted into the same figure by the ```Comapre_at_wave```function. This allows to compare the kinetics at various conditions (*e.g.* pump intensity, quencher concentration, solvent). A normalization window can be given at which all the plotted curves are normalized to. This window does not have to be in the plotted region.In this example the TA kinetics of the excited state absorption at 340 and 580 nm and the ground state bleach minimum at 440 nm in DCM, ACN and H$_2$O are compared."
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {},
162 | "outputs": [],
163 | "source": [
164 | "plt.close('all')\n",
165 | "ta_ACN.cmap=pf.cm.Accent\n",
166 | "for nm in [340,440,580]: # plot kinetics at each selected wavelengths\n",
167 | " ta_ACN.timelimits=[-0.5,1500] # set timelimits of the plot\n",
168 | " ta_ACN.Compare_at_wave(fitted=False, # plot preprocessed data\n",
169 | " other=compare_projects, # list of projects to compare\n",
170 | " rel_wave=nm, # selected wavelengths to compare\n",
171 | " norm_window=norm_window) # norm window"
172 | ]
173 | }
174 | ],
175 | "metadata": {
176 | "ipub": {
177 | "titlepage": {
178 | "author": "Jens Uhlig",
179 | "email": "jens.uhlig@chemphys.lu.se",
180 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
181 | "subtitle": "Main Worksheet",
182 | "title": "Transient Absorption Worksheet"
183 | }
184 | },
185 | "kernelspec": {
186 | "display_name": "Python 3 (ipykernel)",
187 | "language": "python",
188 | "name": "python3"
189 | },
190 | "language_info": {
191 | "codemirror_mode": {
192 | "name": "ipython",
193 | "version": 3
194 | },
195 | "file_extension": ".py",
196 | "mimetype": "text/x-python",
197 | "name": "python",
198 | "nbconvert_exporter": "python",
199 | "pygments_lexer": "ipython3",
200 | "version": "3.11.9"
201 | }
202 | },
203 | "nbformat": 4,
204 | "nbformat_minor": 4
205 | }
206 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/KiMoPack_tutorial_3_CompareFit_Colab.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Advanced Features\n",
8 | "## Comparison of fit results"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "#Colab specific:\n",
18 | "!pip install kimopack\n",
19 | "!pip install python-pptx\n",
20 | "!git clone https://github.com/erdzeichen/KiMoPack.git"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "metadata": {},
27 | "outputs": [],
28 | "source": [
29 | "import os,sys\n",
30 | "import pandas as pd\n",
31 | "import numpy as np\n",
32 | "import matplotlib,lmfit\n",
33 | "import matplotlib.pyplot as plt\n",
34 | "try:\n",
35 | " import KiMoPack.plot_func as pf\n",
36 | "except:\n",
37 | " print(\"General installation did not work, try to import from the same folder as a workaround\")\n",
38 | " import plot_func as pf"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {},
44 | "source": [
45 | "## 1) Load Projects\n",
46 | "\n",
47 | "In this example it is demonstrated how to work with saved *hdf5* projects. Upon saving a project all parameters of the analysis are dumped to the *hdf5* file. This includes the standard corrected (background, arrival time, scattercut, ...) TA data as well as the fit results and parameter. \n",
48 | "\n",
49 | "Herein, the TA projects of **Ru-dppz** collected in three solvents, namely dichloromethane (DCM), acetonitrile (ACN) and water (H$_2$O) at 400 nm excitation are loaded and compared. Firstly, the master project (here in ACN) is loaded to the ```ta_ACN``` object. Secondly all *hdf5* projects from a specified folder (including the data in DCM and H$_2$O) are loaded into the object ```compare_projects```. For loading the comparison projects the function ```GUI_open``` is employed. You can either read all *hdf5* projects from a folder (```project_list='all'```) or you can select single projects from the file explorer (```project_list='gui'```)."
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": null,
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "# initialize ta_ACN object including the TA data in ACN\n",
59 | "\n",
60 | "ta_ACN = pf.TA(filename='TA_Ru-dppz_400nm_ACN_paral.hdf5',\n",
61 | " path=os.path.join('KiMoPack','Tutorial_Notebooks','Data', 'Compare', 'Master'))\n",
62 | "\n",
63 | "# initialize an object including the TA data in DCM and H2O\n",
64 | "compare_projects=pf.GUI_open(project_list='all',\n",
65 | " path=os.path.join('KiMoPack','Tutorial_Notebooks','Data', 'Compare'))\n",
66 | "\n",
67 | "# plot TA spectra of the master project\n",
68 | "ta_ACN.Plot_fit_output(title='Master project, ACN, 400 nm', plotting=[3])"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "## 2) Compare transient spectra\n",
76 | "\n",
77 | "Upon loading the TA data of several projects, the user can compare the transient spectra by plotting multiple spectra into the same figure at given delay times (```rel_time```). Herein, the TA spectra of **Ru-dppz** obtained in different solvents, namely dichlorometahne (DCM), acetonitrile (ACN) and water (H$_2$O) are compared.\n",
78 | "\n",
79 | "### General settings\n",
80 | "\n",
81 | "- For a better comparison the data of each project can be normalized to the master object (in this example ```ta_ACN```). For the normalization a normalization range is defined by the lower and upper limits of deay times and probe wavelengths (*e.g.* ```norm_window=[0.5,0.7,420,470]```). \n",
82 | "\n",
83 | "- In order to be able to compare the individual data sets well with each other, the use of a highly diverging colormap is recommended. Herein the colormap *Accent* is used. For more available maps see: https://matplotlib.org/stable/tutorials/colors/colormaps.html"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "norm_window = [0.5,0.7,420,470] # norm window\n",
93 | "ta_ACN.cmap=pf.cm.Accent # use a diverge colormap for comparison"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "### 2a) Compare transient spectra"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "for delay_time in [1,10,100]: # plot spectra for each selected delay time\n",
110 | " ta_ACN.Compare_at_time(fitted=False, # compare pre-processed data\n",
111 | " other=compare_projects, # list of projects to compare\n",
112 | " rel_time=[delay_time], # selected deay times to compare\n",
113 | " norm_window=norm_window) # set norm window"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "metadata": {},
119 | "source": [
120 | "### 2b) Compare transient and external spectra\n",
121 | "\n",
122 | "To compare the transient spectra at a certain delay time to an external spectrum (*e.g.* spectro-electro-chemistry or steady state absorption data), the user can pass a dataframe with such spectra to the ```Compare_at_time```function. Herein, the absorption data of electrochemically unmodified **Ru-dppz** and its singly reduced form are loaded into a Pandas DataFrame with the wavelength column as index. In the example the scaled difference spectrum of the reduced and unreduced complex is plotted for comparison to the TA spectra of **Ru-dppz** in ACN and H$_2$O obtained at a delay time of 1 ps."
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "# define name of external spectra\n",
132 | "spectra_name = 'UVvis_SEC_Rudppz_ACN.dat'\n",
133 | "\n",
134 | "# define path of external spectra\n",
135 | "spectra_path = os.path.join(os.getcwd(), 'Data', 'Compare')\n",
136 | "\n",
137 | "# create dataframe of external spectra\n",
138 | "SEC_df = pd.read_csv(os.path.join(spectra_path, spectra_name), index_col=0, sep=\"\\t\", header=0)\n",
139 | "diff_spectrum=(SEC_df['red']-SEC_df['ocp'])*0.05 # create difference spectrum\n",
140 | "diff_spectrum.name='Rudppz_ACN - difference' # give it a name for the plot\n",
141 | "\n",
142 | "ta_ACN.Compare_at_time(fitted=False,\n",
143 | " rel_time=1.0, # selected delay time\n",
144 | " other=compare_projects, # list of projects to compare\n",
145 | " spectra=diff_spectrum, # external spectra to compare\n",
146 | " norm_window=norm_window) # set norm window"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "### 2c) Compare kinetic traces\n",
154 | "\n",
155 | "The kinetic traces of several projects at a given probe wavelength (```rel_wave```) can be plotted into the same figure by the ```Comapre_at_wave```function. This allows to compare the kinetics at various conditions (*e.g.* pump intensity, quencher concentration, solvent). A normalization window can be given at which all the plotted curves are normalized to. This window does not have to be in the plotted region.In this example the TA kinetics of the excited state absorption at 340 and 580 nm and the ground state bleach minimum at 440 nm in DCM, ACN and H$_2$O are compared."
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {},
162 | "outputs": [],
163 | "source": [
164 | "ta_ACN.cmap=pf.cm.Accent\n",
165 | "for nm in [340,440,580]: # plot kinetics at each selected wavelengths\n",
166 | " ta_ACN.timelimits=[-0.5,1500] # set timelimits of the plot\n",
167 | " ta_ACN.Compare_at_wave(fitted=False, # plot preprocessed data\n",
168 | " other=compare_projects, # list of projects to compare\n",
169 | " rel_wave=nm, # selected wavelengths to compare\n",
170 | " norm_window=norm_window) # norm window"
171 | ]
172 | }
173 | ],
174 | "metadata": {
175 | "ipub": {
176 | "titlepage": {
177 | "author": "Jens Uhlig",
178 | "email": "jens.uhlig@chemphys.lu.se",
179 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
180 | "subtitle": "Main Worksheet",
181 | "title": "Transient Absorption Worksheet"
182 | }
183 | },
184 | "kernelspec": {
185 | "display_name": "Python 3 (ipykernel)",
186 | "language": "python",
187 | "name": "python3"
188 | },
189 | "language_info": {
190 | "codemirror_mode": {
191 | "name": "ipython",
192 | "version": 3
193 | },
194 | "file_extension": ".py",
195 | "mimetype": "text/x-python",
196 | "name": "python",
197 | "nbconvert_exporter": "python",
198 | "pygments_lexer": "ipython3",
199 | "version": "3.11.5"
200 | }
201 | },
202 | "nbformat": 4,
203 | "nbformat_minor": 2
204 | }
205 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/KiMoPack_tutorial_4_ScanHandling.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Standard Features\n",
8 | "## Working with single measurement scans"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "import os,sys\n",
18 | "import pandas as pd\n",
19 | "import numpy as np\n",
20 | "import matplotlib,lmfit\n",
21 | "import matplotlib.pyplot as plt\n",
22 | "try:\n",
23 | " import KiMoPack.plot_func as pf\n",
24 | "except:\n",
25 | " print(\"General installation did not work, try to import from the same folder as a workaround\")\n",
26 | " import plot_func as pf\n",
27 | "#qt is mandatory for the functioning of this module\n",
28 | "%matplotlib tk\n",
29 | "#change this line to \n",
30 | "# %matplotlib qt \n",
31 | "#if you have qt installed"
32 | ]
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "metadata": {},
37 | "source": [
38 | "## 1) Read and average single scans\n",
39 | "\n",
40 | "Load single scans of a data set and select certain scans that are excluded from the summary (```Summarize_scans```). Therefore, up to two windows including lower and upper boundaries for delay times and probe wavelengths (*e.g.* ```[1,10,500,700]```) can be defined. In that region the TA signals are integrated. The respective integrals of each scan are shown in an active plot. The scans to exclude from the average are selcted by right-click on the respective data points in the active window. \n",
41 | "Advice, the GUI is sometimes hidden on the desktop. The window is recognizable with a little feather in the top left corner."
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {},
48 | "outputs": [],
49 | "source": [
50 | "#simple usage, select all files with \"ACN\" in the name and \"SIA\" ending in the folder with name \"scans\"\n",
51 | "ta=pf.Summarize_scans('gui', # use gui to select files\n",
52 | " list_to_dump='single', # select single points to be removed\n",
53 | " window1=[1,10,500,700]) # integration window"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "This is the advanced usage of the tool. By using filending, all files with a different ending are rejected. The \"filename_part\" works similar and looks if a certain string is in the filename, so can one e.g. filter all files with \"ACN\" in the name. The save_name is a useful option to keep track of the files. without it the file would be saved as \"combined.SIA\". The \"list_of_scans\" could be used to give e.g. a series of numbers that should be the last part of the filename. This is mainly useful if the option \"return_list_of_names\" is selected. Then e.g. multiple different selection series can be combined. See the Manual for more information on its usage. "
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "scanfolder = os.path.join(os.getcwd(), 'Data', 'Scans')# Define a folder, This trick works on windows and linux \n",
70 | "ta=pf.Summarize_scans(path_to_scans=scanfolder, # define path of the scan files\n",
71 | " list_of_scans=None, # read all scans from the given folder\n",
72 | " list_to_dump='single', # select single points to be removed\n",
73 | " window1=[1,10,500,700], # integration window \n",
74 | " window2=[1,10,410,470], # integration window\n",
75 | " fileending='.SIA', # file extension, ignore rest\n",
76 | " filename_part='ACN', # part of the filenames to read\n",
77 | " save_name='TA_Ru-dppz_400nm_'+'ACN'+'_mean.SIA') # set save name"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {},
84 | "outputs": [],
85 | "source": []
86 | }
87 | ],
88 | "metadata": {
89 | "ipub": {
90 | "titlepage": {
91 | "author": "Jens Uhlig",
92 | "email": "jens.uhlig@chemphys.lu.se",
93 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
94 | "subtitle": "Main Worksheet",
95 | "title": "Transient Absorption Worksheet"
96 | }
97 | },
98 | "kernelspec": {
99 | "display_name": "Python 3 (ipykernel)",
100 | "language": "python",
101 | "name": "python3"
102 | },
103 | "language_info": {
104 | "codemirror_mode": {
105 | "name": "ipython",
106 | "version": 3
107 | },
108 | "file_extension": ".py",
109 | "mimetype": "text/x-python",
110 | "name": "python",
111 | "nbconvert_exporter": "python",
112 | "pygments_lexer": "ipython3",
113 | "version": "3.9.16"
114 | }
115 | },
116 | "nbformat": 4,
117 | "nbformat_minor": 2
118 | }
119 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/KiMoPack_tutorial_4_ScanHandling_Colab.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Standard Features\n",
8 | "## Working with single measurement scans"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "#Colab specific:\n",
18 | "!pip install kimopack\n",
19 | "!pip install python-pptx\n",
20 | "!git clone https://github.com/erdzeichen/KiMoPack.git"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 2,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | "Qt was found consider switching to qt mode with %matplotlib qt (more comfortable)\n",
33 | "Plot_func version 7.5.5\n",
34 | "was imported from path:\n",
35 | " C:\\Users\\jensu\\anaconda3\\Lib\\site-packages\\KiMoPack\n",
36 | "The current working folder is:\n",
37 | " c:\\Users\\jensu\\Dropbox\\coding\\github\\KiMoPack\\Tutorial_Notebooks\n"
38 | ]
39 | }
40 | ],
41 | "source": [
42 | "import os,sys\n",
43 | "import pandas as pd\n",
44 | "import numpy as np\n",
45 | "import matplotlib,lmfit\n",
46 | "import matplotlib.pyplot as plt\n",
47 | "try:\n",
48 | " import KiMoPack.plot_func as pf\n",
49 | "except:\n",
50 | " print(\"General installation did not work, try to import from the same folder as a workaround\")\n",
51 | " import plot_func as pf\n",
52 | "#qt is mandatory for the functioning of this module Colab will only work for some of the functions"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "metadata": {},
58 | "source": [
59 | "## 1) Read and average single scans\n",
60 | "\n",
61 | "Load single scans of a data set and select certain scans that are excluded from the summary (```Summarize_scans```). Therefore, up to two windows including lower and upper boundaries for delay times and probe wavelengths (*e.g.* ```[1,10,500,700]```) can be defined. In that region the TA signals are integrated. The respective integrals of each scan are shown in an active plot. The scans to exclude from the average are selcted by right-click on the respective data points in the active window. \n",
62 | "Advice, the GUI is sometimes hidden on the desktop. The window is recognizable with a little feather in the top left corner."
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {},
68 | "source": [
69 | "## Please note that these interactive function do not work on mybinder/Colab"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": null,
75 | "metadata": {},
76 | "outputs": [],
77 | "source": [
78 | "#simple usage, select all files with \"ACN\" in the name and \"SIA\" ending in the folder with name \"scans\"\n",
79 | "scanfolder = os.path.join('KiMoPack','Tutorial_Notebooks','Data', 'Scans')\n",
80 | "scanfolder = os.path.join('Data', 'Scans')\n",
81 | "ta=pf.Summarize_scans(\n",
82 | " path_to_scans=scanfolder,\n",
83 | " fileending='.SIA', # file extension, ignore rest\n",
84 | " filename_part='ACN', # this must be in name, ignore rest\n",
85 | " zscore_filter_level=3, # Filter sigma=3\n",
86 | " dump_times = True, \n",
87 | " replace_values = None, \n",
88 | " drop_scans = False,\n",
89 | " window1=[1,10,500,700]) # integration window"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {},
95 | "source": [
96 | "This is the advanced usage of the tool. By using filending, all files with a different ending are rejected. The \"filename_part\" works similar and looks if a certain string is in the filename, so can one e.g. filter all files with \"ACN\" in the name. The save_name is a useful option to keep track of the files. without it the file would be saved as \"combined.SIA\". The \"list_of_scans\" could be used to give e.g. a series of numbers that should be the last part of the filename. This is mainly useful if the option \"return_list_of_names\" is selected. Then e.g. multiple different selection series can be combined. See the Manual for more information on its usage. "
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "## Please note that these interactive function do not work on mybinder"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 1,
109 | "metadata": {},
110 | "outputs": [
111 | {
112 | "ename": "NameError",
113 | "evalue": "name 'os' is not defined",
114 | "output_type": "error",
115 | "traceback": [
116 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
117 | "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
118 | "Cell \u001b[1;32mIn[1], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m scanfolder \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mKiMoPack\u001b[39m\u001b[38;5;124m'\u001b[39m,\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTutorial_Notebooks\u001b[39m\u001b[38;5;124m'\u001b[39m,\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mData\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mScans\u001b[39m\u001b[38;5;124m'\u001b[39m)\u001b[38;5;66;03m# Define a folder, This trick works on windows and linux \u001b[39;00m\n\u001b[0;32m 2\u001b[0m scanfolder \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mData\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mScans\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m 3\u001b[0m ta\u001b[38;5;241m=\u001b[39mpf\u001b[38;5;241m.\u001b[39mSummarize_scans(path_to_scans\u001b[38;5;241m=\u001b[39mscanfolder, \u001b[38;5;66;03m# define path of the scan files\u001b[39;00m\n\u001b[0;32m 4\u001b[0m list_of_scans\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;66;03m# read all scans from the given folder\u001b[39;00m\n\u001b[0;32m 5\u001b[0m list_to_dump\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msingle\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;66;03m# select single points to be removed\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 9\u001b[0m filename_part\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mACN\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;66;03m# part of the filenames to read\u001b[39;00m\n\u001b[0;32m 10\u001b[0m save_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTA_Ru-dppz_400nm_\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m+\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mACN\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m+\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m_mean.SIA\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
119 | "\u001b[1;31mNameError\u001b[0m: name 'os' is not defined"
120 | ]
121 | }
122 | ],
123 | "source": [
124 | "scanfolder = os.path.join('KiMoPack','Tutorial_Notebooks','Data', 'Scans')# Define a folder, This trick works on windows and linux \n",
125 | "scanfolder = os.path.join('Data', 'Scans')\n",
126 | "ta=pf.Summarize_scans(path_to_scans=scanfolder, # define path of the scan files\n",
127 | " list_of_scans=None, # read all scans from the given folder\n",
128 | " list_to_dump='single', # select single points to be removed\n",
129 | " window1=[1,10,500,700], # integration window \n",
130 | " window2=[1,10,410,470], # integration window\n",
131 | " fileending='.SIA', # file extension, ignore rest\n",
132 | " filename_part='ACN', # part of the filenames to read\n",
133 | " save_name='TA_Ru-dppz_400nm_'+'ACN'+'_mean.SIA') # set save name"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [],
141 | "source": []
142 | }
143 | ],
144 | "metadata": {
145 | "ipub": {
146 | "titlepage": {
147 | "author": "Jens Uhlig",
148 | "email": "jens.uhlig@chemphys.lu.se",
149 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
150 | "subtitle": "Main Worksheet",
151 | "title": "Transient Absorption Worksheet"
152 | }
153 | },
154 | "kernelspec": {
155 | "display_name": "Python 3 (ipykernel)",
156 | "language": "python",
157 | "name": "python3"
158 | },
159 | "language_info": {
160 | "codemirror_mode": {
161 | "name": "ipython",
162 | "version": 3
163 | },
164 | "file_extension": ".py",
165 | "mimetype": "text/x-python",
166 | "name": "python",
167 | "nbconvert_exporter": "python",
168 | "pygments_lexer": "ipython3",
169 | "version": "3.11.5"
170 | }
171 | },
172 | "nbformat": 4,
173 | "nbformat_minor": 2
174 | }
175 |
--------------------------------------------------------------------------------
/Tutorial_Notebooks/img/Cor_Chirp.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/img/Cor_Chirp.gif
--------------------------------------------------------------------------------
/Tutorial_Notebooks/img/Fig1_parallel_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/img/Fig1_parallel_model.png
--------------------------------------------------------------------------------
/Tutorial_Notebooks/img/Fig2_consecutive_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/img/Fig2_consecutive_model.png
--------------------------------------------------------------------------------
/Tutorial_Notebooks/img/Fig3_complex_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/img/Fig3_complex_model.png
--------------------------------------------------------------------------------
/Tutorial_Notebooks/img/Intro_tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/img/Intro_tutorial.png
--------------------------------------------------------------------------------
/Tutorial_Notebooks/img/Model_selection.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Tutorial_Notebooks/img/Model_selection.jpg
--------------------------------------------------------------------------------
/Tutorial_Notebooks/import_library.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import os
4 |
5 | def NRL(filename):
6 | ds=pd.read_csv(filename, sep=',', index_col=0, header=None)
7 | ds.columns=ds.iloc[0,:]
8 | ds.drop(0,inplace=True)
9 | ds.index=ds.index.astype(float)
10 | ds.columns=ds.columns.astype(float)
11 | ds.sort_index(inplace=True,axis=0)
12 | ds.sort_index(inplace=True,axis=1)
13 | ds.astype('float')
14 | ds=ds.T
15 | ds.index.name='Time in ps'
16 | ds.columns.name='Wavelength in nm'
17 | return ds,'differential absorption in mOD','ps'
18 |
19 | def Uppsala(filename):
20 | df=pd.read_csv(filename,index_col=0,sep=',').T
21 | #df=df.fillna(0)
22 | df=df.dropna(axis=1)
23 | df.index=df.index.astype('float')
24 | df.columns=df.columns.astype('float')
25 | df.sort_index(inplace=True)
26 | df.sort_index(inplace=True,axis=1)
27 | df.astype('float')
28 | df.index.name='Time in ps'
29 | df.columns.name='Wavelength in nm'
30 | return df,'differential absorption in mOD','ps'
31 |
32 |
33 | def Ivan_horse(filename):
34 | #print(filename)
35 | import scipy.constants as const
36 | ds=pd.read_csv(filename,sep='\t',index_col=0)
37 | ds.index=ds.index.astype(float)
38 | ds.columns=ds.columns.astype(float)
39 | #ds.index=ds.index.values/1e8
40 | #ds.columns=ds.columns.values/1e5
41 | #ds=ds.apply(lambda x:x*ds.index.values)
42 | #ds=ds.apply(lambda x:x*ds.columns.values,axis=1)
43 | #per_photon=const.h*const.c/(485e-9)
44 | #ds= ds*per_photon
45 | ds.index.name='Fluence in Photons/cm2 s'
46 | ds.columns.name='Repetitions rate in Hz'
47 | ds.sort_index(inplace=True)
48 | ds.sort_index(inplace=True,axis=1)
49 | return ds.T,'PLQY'
50 |
51 | def cor_streak_lund(filename):
52 | '''This reads the file of the "DAC" that is exported as corrected file from the streak camera software ending'''
53 | ds=pd.read_csv(filename,sep='\t',index_col=0)
54 | ds.columns.name="nm"
55 | ds.index.name="Time in ps"
56 | data_type="Emission intensity"
57 | baseunit="ps"
58 | ds.index=ds.index.astype(float)
59 | ds.columns=ds.columns.astype(float)
60 | ds.sort_index(inplace=True,axis=1)
61 | ds.sort_index(inplace=True,axis=0)
62 | return ds,data_type,baseunit
63 |
64 | def streak_Lund(filename):
65 | '''This is reading the filetype that is saved by the streak camera software as "dat" type'''
66 | code=str(filename).split(os.sep)[-1]#split of the path
67 | code=code.split('.')[0] #split of the dot and fileending
68 | code=code.split('-')
69 | ds=pd.read_csv(filename,sep='\t',header=None)
70 | n_times=len(ds.index.values)
71 | n_waves=len(ds.columns)
72 | times={ 't6':2000,
73 | 't5':1000,
74 | 't4':500,
75 | 't3':200,
76 | 't2':100,
77 | 't1':50}
78 | for i in [4,3,2,5]:#position can change
79 | try:
80 | times=times[code[i]]
81 | times=np.linspace(0,times,n_times)
82 | break
83 | except:
84 | pass
85 | for i in [6,5,7,8]:
86 | if code[i][0]=='w':
87 | center=code[i][1:]
88 | waves=np.linspace(float(center)-60,float(center)+75,n_waves)
89 | break
90 | else:
91 | continue
92 | ds.index=times
93 | ds.index=ds.index.astype(float)
94 | ds.columns=waves
95 | ds.columns=ds.columns.astype(float)
96 | ds.index.name='Time in ps'
97 | ds.columns.name='Wavelength in nm'
98 | return ds,'emission intensity'
99 |
100 | def Amine_func(filename):
101 | df=pd.read_csv(filename,sep='\t',header=None)
102 | wavelength=pd.Series(np.linspace(343.33,656.03,512))
103 | time=pd.Series(np.linspace(0,50.500,512))
104 | df.columns=wavelength
105 | df.index=time
106 | df.index=df.index.astype(float)
107 | df.columns=df.columns.astype(float)
108 | df.index.name='Time in ns'
109 | df.columns.name='Wavelength in nm'
110 | return df,'differential absorption','ns'
111 |
--------------------------------------------------------------------------------
/Workflow_tools/Data/FeCM02-266nm-4mw-QB390-t6-G63-w450-s150-556ms-E100_chirp.dat:
--------------------------------------------------------------------------------
1 | 0,0,0,0,556
--------------------------------------------------------------------------------
/Workflow_tools/Data/Sample_2_chirp.dat:
--------------------------------------------------------------------------------
1 | -1.7756381920270246e-11,6.568827058781319e-08,-8.996494264222904e-05,0.05401939947010713,-11.641690692911363
--------------------------------------------------------------------------------
/Workflow_tools/Data/sample_1.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Workflow_tools/Data/sample_1.hdf5
--------------------------------------------------------------------------------
/Workflow_tools/Data/sample_1_chirp.dat:
--------------------------------------------------------------------------------
1 | -9.24533938810069e-12,3.475419190026255e-08,-4.841521982194385e-05,0.029760489437088835,-6.4626798980861
--------------------------------------------------------------------------------
/Workflow_tools/Data/sample_2.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Workflow_tools/Data/sample_2.hdf5
--------------------------------------------------------------------------------
/Workflow_tools/Function_library_overview.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/Workflow_tools/Function_library_overview.pdf
--------------------------------------------------------------------------------
/Workflow_tools/Streak_camera_analysis.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Set standard imports"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": null,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import matplotlib,lmfit\n",
17 | "import matplotlib.pyplot as plt\n",
18 | "import numpy as np\n",
19 | "import pandas as pd\n",
20 | "\n",
21 | "import KiMoPack.plot_func as pf\n",
22 | "from importlib import reload\n",
23 | "reload(pf)\n",
24 | "import import_library as il\n",
25 | "reload(il)\n"
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 2,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "ta=pf.TA('gui',conversion_function=il.cor_streak_lund)\n",
35 | "#ta=pf.TA('recent',conversion_function=il.cor_streak_lund)\n",
36 | "#ta=pf.TA('FeCM02-266nm-4mw-QB390-t6-G63-w450-s150-556ms-E100.dat',conversion_function=il.streak_Lund,path='Data') #use "
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "# Coarse adjustment\n",
46 | "ta.intensity_range=[0,ta.ds.mean().max()]\n",
47 | "if 0: # set to 1 for adjusting the zero point with a GUI\n",
48 | " %matplotlib tk\n",
49 | " ta.timelimits=[ta.ds.index.values.min(),ta.ds.index.values.max()]\n",
50 | " ta.Cor_Chirp(shown_window=ta.timelimits,just_shift=True,cmap=pf.cm.jet)\n",
51 | " if 0: # set to 1 for fine adjustment\n",
52 | " shown_window=np.array([-50,50])+ta.fitcoeff[-1]\n",
53 | " ta.Man_Chirp(shown_window=shown_window,just_shift=True,cmap=pf.cm.jet)\n",
54 | "else: #use a manual plot to select t0\n",
55 | " %matplotlib inline\n",
56 | " pf.halfsize=True\n",
57 | " pf.changefonts()\n",
58 | " ta.Plot_RAW([0,1])\n",
59 | " t0=206\n",
60 | " ta.Cor_Chirp(fitcoeff=[0,0,0,0,t0])"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "plt.close('all')\n",
70 | "#ta.Background(uplimit=5,lowlimit=-100)\n",
71 | "#ta.rel_wave=np.arange(480,560,10)#[400,410,420,460,480]\n",
72 | "#ta.bordercut=[470,560]\n",
73 | "#ta.rel_time=[-10,10,30,100,200,300,1000,1500]\n",
74 | "#ta.timelimits=[1,1810]\n",
75 | "#ta.wave_nm_bin=None\n",
76 | "#ta.wavelength_bin=10\n",
77 | "#ta.log_scale=False\n",
78 | "#ta.intensity_range=[-2e3,3e4]\n",
79 | "ta.Plot_RAW(range(3),scale_type='log')"
80 | ]
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "metadata": {},
85 | "source": [
86 | "### Perform fast fitting using independent exponential decay (first order)"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "plt.close('all') # Close old spectra\n",
96 | "ta.mod='exponential' # Choose the model \n",
97 | "par=lmfit.Parameters() # create empty parameter object\n",
98 | "par.add('k0',value=1/10,vary=True,min=1/100) # Add second rate component\n",
99 | "par.add('k1',value=1/500,vary=True) # Add one rate component\n",
100 | "par.add('t0',value=0.28,vary=True) # Allow the arrival time to adjust (for all)\n",
101 | "par.add('resolution',value=67,vary=True) # Allow the instrument response to adjust (for all)\n",
102 | "#par.add('infinite') # Keyword for an non decaying component\n",
103 | "par.add('background') # Keyword to fit the Background \n",
104 | "ta.par=par # write parameter object \n",
105 | "ta.Fit_Global(fit_chirp=False) # trigger fitting\n",
106 | "\n",
107 | "ta.error_matrix_amplification=1\n",
108 | "ta.Plot_fit_output([0,1,2,4]) # plot the fit output"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "pf.halfsize=False\n",
118 | "pf.changefonts()\n",
119 | "ta.Save_Powerpoint()\n",
120 | "pf.halfsize=True\n",
121 | "pf.changefonts()"
122 | ]
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "metadata": {},
127 | "source": [
128 | "### Perform Error analysis and calculate confidence interval"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "metadata": {},
135 | "outputs": [],
136 | "source": [
137 | "ta.par=ta.par_fit #write the best results back as starting parameter\n",
138 | "ta.Fit_Global(confidence_level=0.66)"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "metadata": {},
145 | "outputs": [],
146 | "source": [
147 | "plt.close('all')\n",
148 | "ta.mod='consecutive' # very quick fit using independent exponential followed by one round of sequential\n",
149 | "ta.par=ta.par_fit # copy fitted parameter into a new fit (e.g. different model)\n",
150 | "ta.Fit_Global() # The iterative chrip is best optimised using 'exponential'\n",
151 | "ta.Plot_fit_output([0,1,2,4]) # plot the fit output"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": null,
157 | "metadata": {},
158 | "outputs": [],
159 | "source": []
160 | }
161 | ],
162 | "metadata": {
163 | "ipub": {
164 | "titlepage": {
165 | "author": "Jens Uhlig",
166 | "email": "jens.uhlig@chemphys.lu.se",
167 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
168 | "subtitle": "Main Worksheet",
169 | "title": "Transient Absorption Worksheet"
170 | }
171 | },
172 | "kernelspec": {
173 | "display_name": "base",
174 | "language": "python",
175 | "name": "python3"
176 | },
177 | "language_info": {
178 | "codemirror_mode": {
179 | "name": "ipython",
180 | "version": 3
181 | },
182 | "file_extension": ".py",
183 | "mimetype": "text/x-python",
184 | "name": "python",
185 | "nbconvert_exporter": "python",
186 | "pygments_lexer": "ipython3",
187 | "version": "3.11.5"
188 | }
189 | },
190 | "nbformat": 4,
191 | "nbformat_minor": 2
192 | }
193 |
--------------------------------------------------------------------------------
/Workflow_tools/TA_Advanced_Fit.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Set standard imports"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": null,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import matplotlib,lmfit,os,sys\n",
17 | "import matplotlib.pyplot as plt\n",
18 | "import numpy as np\n",
19 | "%matplotlib tk\n",
20 | "#for more comfort use\n",
21 | "#%matplotlib qt\n",
22 | "import KiMoPack.plot_func as pf\n",
23 | "from importlib import reload\n",
24 | "#reload(pf)"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "### Load Data and perform standard corrections"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "ta=pf.TA('sample_1.hdf5',path='Data') #reload saved projects using the same syntax, this reloads the parameter too"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "### Perform deeper fitting using external fitting function"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "import function_library as func # load the file with the example functions. must be in the active folder\n",
57 | "reload(func) # usefull if you play with the function"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "metadata": {},
64 | "outputs": [],
65 | "source": [
66 | "plt.close('all')\n",
67 | "ta.mod=func.P12 # if you want to use function P12 from the standard function library\n",
68 | "#ta.mod=func.consec_oscil # if you want to fit consecutive and osciallations\n",
69 | "par=lmfit.Parameters()\n",
70 | "par.add('k0',value=1/1.6,vary=True) # this function has a lot of parameter, they can be named\n",
71 | "par.add('k1',value=1/0.4,vary=True) # but a rate must be starting with \"k\" (this is how it is regognized)\n",
72 | "par.add('k2',value=1/6e11,vary=True)\n",
73 | "#par.add('f0',value=5) # add osciallation with this frequency\n",
74 | "#par.add('tk0',value=5) # (optional) add decay of osciallation\n",
75 | "\n",
76 | "par.add('t0',value=0.025,min=-2,max=2,vary=False) # if applicable, keep those\n",
77 | "par.add('resolution',value=0.25,min=0.04,max=0.5,vary=False)\n",
78 | "#par.add('infinite')\n",
79 | "#par.add('background')\n",
80 | "#par.add('explicit_GS') # add explicit background\n",
81 | "ta.par=par\n",
82 | "\n",
83 | "ta.Fit_Global() # Fitting syntax stays the same"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "ta.Plot_fit_output()"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "ta.Save_Powerpoint()"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "metadata": {},
108 | "outputs": [],
109 | "source": [
110 | "ta.par=ta.fit_par #reuse the fitted parameter\n",
111 | "ta.Fit_Global(dump_paras=True)\n",
112 | "ta.Fit_Global(dump_shapes=True)"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": []
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "ds=pandas.read_csv('spectra.csv',index_col=0)\n",
127 | "ds.columns=['GS']\n",
128 | "ta.Fit_Global(ext_spectra=ds)\n",
129 | "\n",
130 | "ta.par.add('ext_spectra_shift',value=0,vary=True)\n",
131 | "ta.par.add('ext_spectra_scale',value=0,vary=True)\n",
132 | "\n",
133 | "ta.par.add('ext_spectra_guide',value=0,vary=True)"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {},
139 | "source": [
140 | "### Optimise Chirp"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "ta1=ta.Copy() # before chirp fitting I recommend to \n",
150 | "ta1.Fit_Global(fit_chirp=True)"
151 | ]
152 | },
153 | {
154 | "cell_type": "markdown",
155 | "metadata": {},
156 | "source": [
157 | "### Error Analysis"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "metadata": {},
164 | "outputs": [],
165 | "source": [
166 | "ta.Fit_Global(confidence_level=0.95) "
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "### Fitting multiple datasets"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "metadata": {},
180 | "outputs": [],
181 | "source": [
182 | "ta1=pf.TA(filename='sample_2.hdf5',path='Data') # lets load a single second project measured at a different power\n",
183 | "other_projects=[ta1]\n",
184 | "\n",
185 | "other_projects=[pf.TA(filename,path='Data') for filename in ['sample_2.hdf5','sample_3.hdf5']]\n",
186 | "\n",
187 | "other_projects=pf.GUI_open()"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "metadata": {},
194 | "outputs": [],
195 | "source": [
196 | "ta.Fit_Global(multi_project=other_projects)#fit both projects together,\n",
197 | "ta.Fit_Global(multi_project=other_projects,unique_parameter=['f0']) #unique parameter allows to additionally freeze a parameter (that e.g. were optimized b)\n",
198 | "ta.Fit_Global(multi_project=other_projects,unique_parameter=['f0'],weights=[1,2])\n",
199 | "ta.Fit_Global(multi_project=other_projects,same_DAS = True)"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "### Using Global minimum search"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": null,
212 | "metadata": {},
213 | "outputs": [],
214 | "source": [
215 | "plt.close('all')\n",
216 | "ta.mod=func.P12 # if you want to use function P12 from the standard function library\n",
217 | "par=lmfit.Parameters()\n",
218 | "par.add('k0',value=1/1.6,vary=True,min=1/4,max=1) \n",
219 | "par.add('k1',value=1/0.4,vary=True,min=1/4,max=1) \n",
220 | "par.add('k2',value=1/6e11,vary=True,min=1/1e13,max=1)\n",
221 | "par.add('t0',value=0.025,min=-2,max=2,vary=False)\n",
222 | "par.add('resolution',value=0.25,min=0.04,max=0.5,vary=False)\n",
223 | "#par.add('background')\n",
224 | "#par.add('infinite')\n",
225 | "ta.par=par\n",
226 | "ta.Fit_Global(use_ampgo=True) # Fitting syntax stays the same\n",
227 | "'''Sometimes this optimization runs into extremely small value that overflow the double precission \n",
228 | "(making a lot of warnings). As a temporary fix enable the following line that will turn of all warnings.\n",
229 | "as most likely these solutions will be rejected anyhow, this should not be a big problem. Alternative is to \n",
230 | "give more restrictive limits.'''\n",
231 | "#np.seterr(all=\"ignore\")"
232 | ]
233 | },
234 | {
235 | "cell_type": "markdown",
236 | "metadata": {},
237 | "source": [
238 | "### export the results as images and save the project"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": null,
244 | "metadata": {},
245 | "outputs": [],
246 | "source": [
247 | "ta.Save_project() # This saves the whole project including raw data, fits and chirp correction into a single file"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": null,
253 | "metadata": {},
254 | "outputs": [],
255 | "source": [
256 | "ta.Save_Plots(savetype='png') # This saves all the figures and a overview powerpoint savetype=['png','svg']"
257 | ]
258 | },
259 | {
260 | "cell_type": "markdown",
261 | "metadata": {},
262 | "source": [
263 | "### Sometimes required options applicable also to fit plotting"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": null,
269 | "metadata": {},
270 | "outputs": [],
271 | "source": [
272 | "ta.Man_Chirp() # used to redo the chirp-correction\n",
273 | "ta.Save_data() # used to dump the Data and fits to disk\n",
274 | "\n",
275 | "ta.cmap=pf.cm.viridis # use different colour scheme (can also be a list of colours)\n",
276 | "pf.changefonts(weight='bold',font='standard',SMALL_SIZE=11,MEDIUM_SIZE=13,LARGE_SIZE=18) "
277 | ]
278 | }
279 | ],
280 | "metadata": {
281 | "ipub": {
282 | "titlepage": {
283 | "author": "Jens Uhlig",
284 | "email": "jens.uhlig@chemphys.lu.se",
285 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
286 | "subtitle": "Main Worksheet",
287 | "title": "Transient Absorption Worksheet"
288 | }
289 | },
290 | "kernelspec": {
291 | "display_name": "Python 3 (ipykernel)",
292 | "language": "python",
293 | "name": "python3"
294 | },
295 | "language_info": {
296 | "codemirror_mode": {
297 | "name": "ipython",
298 | "version": 3
299 | },
300 | "file_extension": ".py",
301 | "mimetype": "text/x-python",
302 | "name": "python",
303 | "nbconvert_exporter": "python",
304 | "pygments_lexer": "ipython3",
305 | "version": "3.11.9"
306 | }
307 | },
308 | "nbformat": 4,
309 | "nbformat_minor": 4
310 | }
311 |
--------------------------------------------------------------------------------
/Workflow_tools/TA_Raw_plotting.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Set standard imports"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": null,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import matplotlib,lmfit\n",
17 | "import matplotlib.pyplot as plt\n",
18 | "import numpy as np\n",
19 | "import pandas as pd\n",
20 | "%matplotlib tk\n",
21 | "#for more comfort use\n",
22 | "#%matplotlib qt\n",
23 | "import KiMoPack.plot_func as pf\n",
24 | "#from KiMoPack import pf"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "### Load Data and perform standard corrections"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "ta=pf.TA('sample_1.SIA',path='Data') #type filename and path (leave path empty if in the same folder)\n",
41 | "#ta=pf.TA('sample_1.hdf5',path='Data') #reload saved projects using the same syntax\n",
42 | "#ta=pf.TA('gui') #use a GUI to select data\n",
43 | "ta.Filter_data(value=20) #remove bad data\n",
44 | "ta.Cor_Chirp() #correct for arrival time difference ta.Cor_Chirp('gui') allows to choose file \n",
45 | "ta.Background(uplimit=-0.5) #substract Background before time_zero"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "metadata": {},
51 | "source": [
52 | "### Set parameter for plotting and plot the usual spectra"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "plt.close('all')\n",
62 | "#ta.rel_wave=[390,420,440,530,580,700,800,950,930,1050] # use to manually select interesting wavelength for the kinetics\n",
63 | "ta.rel_wave=np.arange(360,850,50) \n",
64 | "ta.wavelength_bin=10 # width of wavelength bin in kinetics\n",
65 | "ta.wave_nm_bin=5; # rebinning wavelength\n",
66 | "#ta.rel_time=[0.3,0.4,1,3,5,10,20,30,100,300,2000,] # use to manually set interesting times for the spectra\n",
67 | "ta.time_width_percent=0 # rebinning of time_points in percent\n",
68 | "ta.timelimits=[-1,5000] # use to manually limit the range to plot (and fit) \n",
69 | "ta.log_scale=False # use to plot the 2d plots with logarithmic intensity scale\n",
70 | "ta.bordercut=[350,1100] # use to set the outer wavelength limits\n",
71 | "ta.scattercut=[522,605] # this region is blanked out to block scatter, this can be a list\n",
72 | "ta.intensity_range=[-0.1e-3,3.2e-3] # set a value 5e-3 or a range [-1e-3,5e-3] for 2d plotting\n",
73 | "ta.Plot_RAW() # Plot Raw Spectra"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "### Commonly used saving options"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "ta.Save_project() # This saves the whole project including raw data, fits and chirp correction into a single file"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "ta.Save_Plots(savetype='png') # This saves all the figures and a overview powerpoint savetype=['png','svg']"
99 | ]
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {},
104 | "source": [
105 | "### Sometimes required options"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "ta.Man_Chirp() # used to redo the chirp-correction\n",
115 | "ta.Save_data() # used to dump the Data and fits to disk\n",
116 | "\n",
117 | "ta.cmap=pf.cm.viridis # use different colour scheme (can also be a list of colours)\n",
118 | "pf.changefonts(weight='bold',font='standard',SMALL_SIZE=11,MEDIUM_SIZE=13,LARGE_SIZE=18) "
119 | ]
120 | }
121 | ],
122 | "metadata": {
123 | "ipub": {
124 | "titlepage": {
125 | "author": "Jens Uhlig",
126 | "email": "jens.uhlig@chemphys.lu.se",
127 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
128 | "subtitle": "Main Worksheet",
129 | "title": "Transient Absorption Worksheet"
130 | }
131 | },
132 | "kernelspec": {
133 | "display_name": "Python 3 (ipykernel)",
134 | "language": "python",
135 | "name": "python3"
136 | },
137 | "language_info": {
138 | "codemirror_mode": {
139 | "name": "ipython",
140 | "version": 3
141 | },
142 | "file_extension": ".py",
143 | "mimetype": "text/x-python",
144 | "name": "python",
145 | "nbconvert_exporter": "python",
146 | "pygments_lexer": "ipython3",
147 | "version": "3.9.16"
148 | }
149 | },
150 | "nbformat": 4,
151 | "nbformat_minor": 2
152 | }
153 |
--------------------------------------------------------------------------------
/Workflow_tools/TA_Raw_plotting_and_Simple_Fit.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Set standard imports"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": null,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import matplotlib,lmfit\n",
17 | "import matplotlib.pyplot as plt\n",
18 | "import numpy as np\n",
19 | "import pandas as pd\n",
20 | "%matplotlib tk\n",
21 | "#for more comfort use\n",
22 | "#%matplotlib qt\n",
23 | "import KiMoPack.plot_func as pf\n",
24 | "from importlib import reload\n",
25 | "reload(pf)"
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "### Load and inspect Data "
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "ta=pf.TA('sample_1.SIA',path='Data') #type filename and path (leave path empty if in the same folder)\n",
42 | "#ta=pf.TA(\"sample_1.hdf5\",path=\"Data\") #reload saved projects using the same syntax\n",
43 | "#ta=pf.TA('gui') #use a GUI to select data"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "### perform standard corrections"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {},
57 | "outputs": [],
58 | "source": [
59 | "ta.Filter_data(value=21) #remove bad data here everything above abs=21 is considered bad comment out if not needed\n",
60 | "ta.intensity_range=ta.ds.describe().median(axis=1)['75%']*2\n",
61 | "ta.Cor_Chirp() #correct for arrival time difference ta.Cor_Chirp('gui') allows to choose file "
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "### Shape data and plot Raw images"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "plt.close('all')\n",
78 | "ta.Background() # Remove the background before t=0\n",
79 | "#ta.rel_wave=[390,420,440,530,580,700,800,950,930,1050] # use to manually select interesting wavelength for the kinetics\n",
80 | "ta.wavelength_bin=20 # width of wavelength bin in kinetics\n",
81 | "#ta.wave_nm_bin=5; # rebinning wavelength\n",
82 | "#ta.rel_time=[0.3,0.4,1,3,5,10,20,30,100,300,2000,3000] # use to manually set interesting times for the spectra\n",
83 | "ta.time_width_percent=10 # rebinning of time_points in percent\n",
84 | "ta.timelimits=[-1,5000] # use to manually limit the range to plot (and fit) \n",
85 | "#ta.log_scale=False # use to plot the 2d plots with logarithmic intensity scale\n",
86 | "ta.bordercut=[400,1100] # use to set the outer wavelength limits\n",
87 | "ta.scattercut=[522,605] # this region is blanked out to block scatter, this can be a list of regions\n",
88 | "ta.intensity_range=None#[0,3e-3] # set a value 5e-3 or a range [-1e-3,5e-3] for 2d plotting\n",
89 | "ta.Plot_RAW() # Plot Raw Spectra"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {},
95 | "source": [
96 | "### Perform fast fitting using independent exponential decay (first order)"
97 | ]
98 | },
99 | {
100 | "cell_type": "code",
101 | "execution_count": null,
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "ta.wave_nm_bin=2; \n",
106 | "plt.close('all') # Close old spectra\n",
107 | "ta.mod='exponential' # Choose the model 'exponential', 'consecutive' and 'fast_consecutive'are build in\n",
108 | "par=lmfit.Parameters() # create empty parameter object\n",
109 | "\n",
110 | "par.add('k0',value=1/0.1,vary=True) # Add second rate component\n",
111 | "par.add('k1',value=1/2.5,vary=True) # Add one rate component\n",
112 | "par.add('k2',value=1/40,vary=True) # Add second rate component\n",
113 | "par.add('t0',value=0,min=-2,max=2,vary=False) # Allow the arrival time to adjust (for all)\n",
114 | "par.add('resolution',value=0.086,min=0.04,max=0.5,vary=False) # Allow the instrument response to adjust (for all)\n",
115 | "#par.add('explicit_GS')\n",
116 | "#par.add('infinite') # Keyword for an non decaying component\n",
117 | "#par.add('background') # Keyword to fit the Background Disable the background substraction in the loading cell for best effect\n",
118 | "#ta.timelimits=[0.2,5000] # select time window in which to fit\n",
119 | "ta.ignore_time_region=[-0.1,0.5] # ignore the region over t0 for the fit\n",
120 | "ta.log_fit=False # fit in linear or log scale\n",
121 | "if 0:\n",
122 | " for key in par.keys():\n",
123 | " par[key].vary=False\n",
124 | "ta.par=par # write parameter object into file for fitting\n",
125 | "ta.Fit_Global() # trigger fitting\n",
126 | "\n",
127 | "ta.error_matrix_amplification=10; # Choose how much stronger should error be plotted \n",
128 | "ta.log_scale=False # 2D plots in linear or log scale\n",
129 | "ta.Plot_fit_output() # plot the fit output"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "ta.Save_Powerpoint()"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "metadata": {},
145 | "outputs": [],
146 | "source": [
147 | "par=lmfit.Parameters() \n",
148 | "par.add('k0',value=1/0.1,vary=True) \n",
149 | "par.add('k1',value=1/2.5,vary=True)\n",
150 | "par.add('k2',value=1/100,vary=True)\n",
151 | "par.add('infinite') \n",
152 | "par.add('t0',value=0,min=-2,max=2,vary=False) \n",
153 | "par.add('resolution',value=0.086,min=0.04,max=0.5,vary=False)\n",
154 | "ta.par=par # write parameter object into file for fitting\n",
155 | "ta.Fit_Global(fit_chirp=False,confidence_level=0.95) "
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {},
162 | "outputs": [],
163 | "source": [
164 | "plt.close('all')\n",
165 | "ta.cmap=pf.cm.gist_ncar\n",
166 | "ta.intensity_range=3e-3\n",
167 | "ta.Plot_fit_output()"
168 | ]
169 | },
170 | {
171 | "cell_type": "markdown",
172 | "metadata": {},
173 | "source": [
174 | "### export the results as images and save the project"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": null,
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "#ta.filename='new'+ta.filename\n",
184 | "ta.Save_project() # This saves the whole project including raw data, fits and chirp correction into a single file"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "ta.Save_Plots(savetype='png') # This saves all the figures and a overview powerpoint savetype=['png','svg']"
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "metadata": {},
199 | "source": [
200 | "### Perform Error analysis and calculate confidence interval"
201 | ]
202 | },
203 | {
204 | "cell_type": "code",
205 | "execution_count": null,
206 | "metadata": {},
207 | "outputs": [],
208 | "source": [
209 | "ta.par=ta.par_fit #write the best results back as starting parameter\n",
210 | "ta.Fit_Global(confidence_level=0.65)"
211 | ]
212 | },
213 | {
214 | "cell_type": "markdown",
215 | "metadata": {},
216 | "source": [
217 | "### Perform fast iterative fitting using independent exponential decay and optimise the chirp"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": null,
223 | "metadata": {},
224 | "outputs": [],
225 | "source": [
226 | "ta1=ta.Copy() # Make a copy of the fitting ptoject to test less stable things\n",
227 | "ta1.timelimits=None # To optimise the chirp the region around time-zero must be included\n",
228 | "ta1.ignore_time_region=None # To optimise the chirp the region around time-zero must be included\n",
229 | "ta1.par=ta.par_fit # copy fitted parameter into a new fit (e.g. different model)\n",
230 | "ta1.Fit_Global(fit_chirp=True) # trigger fitting with Chirp optimization best using 'exponential' for speed\n",
231 | "ta1.Plot_fit_output(title=None) # plot the fit output"
232 | ]
233 | },
234 | {
235 | "cell_type": "code",
236 | "execution_count": null,
237 | "metadata": {},
238 | "outputs": [],
239 | "source": [
240 | "ta=ta1.Copy() # if successful they can always be written back"
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "metadata": {},
246 | "source": [
247 | "### Perform fast fitting using consecutative exponential decay to obtain species associated spectrum assuming a simple model"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": null,
253 | "metadata": {},
254 | "outputs": [],
255 | "source": [
256 | "plt.close('all')\n",
257 | "ta.mod='consecutive' # very quick fit using independent exponential followed by one round of sequential\n",
258 | "ta.par=ta.par_fit # copy fitted parameter into a new fit (e.g. different model)\n",
259 | "#ta.par.add('explicit_GS')\n",
260 | "ta.Fit_Global(fit_chirp=False) # The iterative chrip is best optimised using 'exponential'\n",
261 | "ta.Plot_fit_output(title=None) # plot the fit output"
262 | ]
263 | },
264 | {
265 | "cell_type": "code",
266 | "execution_count": null,
267 | "metadata": {},
268 | "outputs": [],
269 | "source": [
270 | "ta.par=ta.par_fit\n",
271 | "ta.mod='full_consecutive' # Truely consecutative fitting, usually much slower\n",
272 | "ta.Fit_Global(fit_chirp=False) \n",
273 | "ta.Plot_fit_output(title=None)"
274 | ]
275 | },
276 | {
277 | "cell_type": "markdown",
278 | "metadata": {},
279 | "source": [
280 | "### Sometimes required options applicable also to fit plotting"
281 | ]
282 | },
283 | {
284 | "cell_type": "code",
285 | "execution_count": null,
286 | "metadata": {},
287 | "outputs": [],
288 | "source": [
289 | "#ta.Man_Chirp() # used to redo the chirp-correction\n",
290 | "#ta.Save_data() # used to dump the Data and fits to disk\n",
291 | "\n",
292 | "#ta.cmap=pf.cm.viridis # use different colour scheme (can also be a list of colours)\n",
293 | "#pf.changefonts(weight='bold',font='standard',SMALL_SIZE=18,MEDIUM_SIZE=18,LARGE_SIZE=18) "
294 | ]
295 | },
296 | {
297 | "cell_type": "code",
298 | "execution_count": null,
299 | "metadata": {},
300 | "outputs": [],
301 | "source": [
302 | "ta.Save_Powerpoint()"
303 | ]
304 | }
305 | ],
306 | "metadata": {
307 | "ipub": {
308 | "titlepage": {
309 | "author": "Jens Uhlig",
310 | "email": "jens.uhlig@chemphys.lu.se",
311 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
312 | "subtitle": "Main Worksheet",
313 | "title": "Transient Absorption Worksheet"
314 | }
315 | },
316 | "kernelspec": {
317 | "display_name": "Python 3 (ipykernel)",
318 | "language": "python",
319 | "name": "python3"
320 | },
321 | "language_info": {
322 | "codemirror_mode": {
323 | "name": "ipython",
324 | "version": 3
325 | },
326 | "file_extension": ".py",
327 | "mimetype": "text/x-python",
328 | "name": "python",
329 | "nbconvert_exporter": "python",
330 | "pygments_lexer": "ipython3",
331 | "version": "3.11.9"
332 | }
333 | },
334 | "nbformat": 4,
335 | "nbformat_minor": 4
336 | }
337 |
--------------------------------------------------------------------------------
/Workflow_tools/TA_comparative_plotting_and_data_extraction.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import matplotlib,lmfit\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "import numpy as np\n",
12 | "import pandas as pd\n",
13 | "%matplotlib tk\n",
14 | "#for more comfort use\n",
15 | "#%matplotlib qt\n",
16 | "import KiMoPack.plot_func as pf"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "### load base project and a few other projects to compare against"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "ta=pf.TA('sample_1.hdf5',path='Data') #load the core project (whose parameters will be used for all settings)\n",
33 | "project_list=pf.GUI_open(['sample_1.hdf5','sample_2.hdf5'],path='Data')"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "### Gui way to open base project and a few other projects to compare against"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "ta=pf.TA('gui') #load the core project (whose parameters will be used for all settings)\n",
50 | "project_list=pf.GUI_open() #use a gui to open a few projects that you would you like to compare to"
51 | ]
52 | },
53 | {
54 | "cell_type": "markdown",
55 | "metadata": {},
56 | "source": [
57 | "### comparing plots of multiple spectra at a set time"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "metadata": {},
64 | "outputs": [],
65 | "source": [
66 | "window=[0.9,1.1,450,470] #Normalize in this window (time_start, time_end, wavelength_start, wavelength_stop)\n",
67 | "ta.Compare_at_time(fitted=False,other=project_list,rel_time=[1,100],norm_window=window)#plot projects at 1ps an 10ps\n",
68 | "#ta.Compare_at_time(fitted=False,other=project_list,rel_time=[1,10],norm_window=window)#plot projects at 1ps an 10ps"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "### comparing plots of multiple kinetics"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": [
84 | "window=[0.6,0.85,450,470] #Normalize in this window (time_start, time_end, wavelength_start, wavelength_stop)\n",
85 | "ta.Compare_at_wave(fitted=False,other=project_list,rel_wave=[500,400],norm_window=window) "
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "plt.close('all')\n",
95 | "window=[0.6,0.85,450,470]\n",
96 | "ta.save_figures_to_folder=True #usefull switch that turns on automatic saving to disk\n",
97 | "for w in [460,500,600]:#at what wavelength to compare for multiple use: [400,460]\n",
98 | " ta.Compare_at_wave(fitted=False,other=project_list,rel_wave=w,norm_window=window) #no normalisation"
99 | ]
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {},
104 | "source": [
105 | "### compare DAS"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "plt.close('all')\n",
115 | "ta.Compare_DAC(other=project_list)"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "plt.close('all')\n",
125 | "ta.Compare_DAC(other=project_list,separate_plots=True)"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "metadata": {},
131 | "source": [
132 | "### Intensity analysis"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "# for all projects in the list plot intensity in the window\n",
142 | "window=[0.6,0.85,600,700]\n",
143 | "intensity={}\n",
144 | "for project in project_list:\n",
145 | " intensity[project.filename]=ta.ds.loc[window[0]:window[1],window[2]:window[3]].mean().mean()\n",
146 | "fig,ax=plt.subplots()\n",
147 | "pd.Series(intensity).plot(ax=ax,kind='bar',alpha=0.75, rot=90)\n",
148 | "fig.tight_layout()"
149 | ]
150 | },
151 | {
152 | "cell_type": "markdown",
153 | "metadata": {},
154 | "source": [
155 | "### for single project print intensity of spectra at a specfic wavelength"
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {},
162 | "outputs": [],
163 | "source": [
164 | "window=[600,700] #wavelength window for integration\n",
165 | "ta.re['DAC'].loc[window[0]:window[1]].mean()"
166 | ]
167 | },
168 | {
169 | "cell_type": "code",
170 | "execution_count": null,
171 | "metadata": {},
172 | "outputs": [],
173 | "source": [
174 | "# for all projects in the list print intensity of spectra at a specfic wavelength (assuming they all have same length)\n",
175 | "window=[600,700] #wavelength window for integration\n",
176 | "intensity={}\n",
177 | "for project in project_list:\n",
178 | " intensity[project.filename]=(ta.re['DAC'].loc[window[0]:window[1]].mean())\n",
179 | "fig,ax=plt.subplots()\n",
180 | "pd.DataFrame(intensity).plot(ax=ax,kind='bar',alpha=0.75, rot=90)\n",
181 | "fig.tight_layout()"
182 | ]
183 | }
184 | ],
185 | "metadata": {
186 | "kernelspec": {
187 | "display_name": "Python 3 (ipykernel)",
188 | "language": "python",
189 | "name": "python3"
190 | },
191 | "language_info": {
192 | "codemirror_mode": {
193 | "name": "ipython",
194 | "version": 3
195 | },
196 | "file_extension": ".py",
197 | "mimetype": "text/x-python",
198 | "name": "python",
199 | "nbconvert_exporter": "python",
200 | "pygments_lexer": "ipython3",
201 | "version": "3.9.16"
202 | }
203 | },
204 | "nbformat": 4,
205 | "nbformat_minor": 2
206 | }
207 |
--------------------------------------------------------------------------------
/Workflow_tools/TA_single_scan_handling.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import matplotlib,lmfit\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "import numpy as np\n",
12 | "import pandas as pd\n",
13 | "%matplotlib tk\n",
14 | "#for more comfort use\n",
15 | "#%matplotlib qt\n",
16 | "import KiMoPack.plot_func as pf"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "# Manual selection of which scans to drop \n",
24 | "## this is usually useful fo selecting damaging sample"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "The following command reads (from the beginning to the end):
\n",
32 | "\"ta=\" assign to \"ta\"
\n",
33 | "\"pf.Summarize_scans\" the output of the function Summarize_scans
\n",
34 | "\"list_of_scans='gui'\" use a gui to select the files to open
\n",
35 | "\",list_to_dump='single'\" click on single scans of this list to be removed
\n",
36 | "\",window1=[3,10,950,1000]\" plot the integrated value for each scan from 3-10ps and from 950-1000nm
\n",
37 | "\"window2=[3,10,480,520]\" plot the integrated value for each scan from 3-10ps and from 480-520nm"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "ta=pf.Summarize_scans(list_of_scans='gui',list_to_dump='single',window1=[3,10,950,1000],window2=[3,10,480,520])"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "# Automatic selection of what to filter\n",
56 | "## with options to filter single times in the scans"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "ta=pf.Summarize_scans('gui',\n",
66 | " value_filter = 20,\n",
67 | " zscore_filter_level =3,\n",
68 | " zscore_in_window=False,\n",
69 | " window1=[1,3,700,800],\n",
70 | " drop_scans=True,\n",
71 | " dump_times=True)"
72 | ]
73 | },
74 | {
75 | "cell_type": "markdown",
76 | "metadata": {},
77 | "source": [
78 | "## this one is using a base object (prior analysis) to provide scattercut and bordercut as well as chirp correction"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "ta=pf.TA('gui')\n",
88 | "ta.bordercut=[470,1150]\n",
89 | "ta.intensity_range=1e-3\n",
90 | "ta.wave_nm_bin=2"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "ta_single=pf.Summarize_scans('gui',\n",
100 | " value_filter = 20,\n",
101 | " zscore_filter_level =2.3\n",
102 | " zscore_in_window=False,\n",
103 | " window1=[1,3,700,800],\n",
104 | " drop_scans=False,\n",
105 | " dump_times=True,\n",
106 | " base_TA_object=ta)"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": []
115 | }
116 | ],
117 | "metadata": {
118 | "ipub": {
119 | "titlepage": {
120 | "author": "Jens Uhlig",
121 | "email": "jens.uhlig@chemphys.lu.se",
122 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
123 | "subtitle": "Main Worksheet",
124 | "title": "Transient Absorption Worksheet"
125 | }
126 | },
127 | "kernelspec": {
128 | "display_name": "Python 3 (ipykernel)",
129 | "language": "python",
130 | "name": "python3"
131 | },
132 | "language_info": {
133 | "codemirror_mode": {
134 | "name": "ipython",
135 | "version": 3
136 | },
137 | "file_extension": ".py",
138 | "mimetype": "text/x-python",
139 | "name": "python",
140 | "nbconvert_exporter": "python",
141 | "pygments_lexer": "ipython3",
142 | "version": "3.9.16"
143 | }
144 | },
145 | "nbformat": 4,
146 | "nbformat_minor": 2
147 | }
148 |
--------------------------------------------------------------------------------
/Workflow_tools/XES_Raw_plotting_and_Simple_Fit.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Set standard imports"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": null,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import matplotlib,lmfit\n",
17 | "import matplotlib.pyplot as plt\n",
18 | "import numpy as np\n",
19 | "import pandas as pd\n",
20 | "%matplotlib tk\n",
21 | "#for more comfort use\n",
22 | "#%matplotlib qt\n",
23 | "import KiMoPack.plot_func as pf\n",
24 | "from importlib import reload\n",
25 | "reload(pf)"
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "### Load and inspect Data "
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "ta=pf.TA('XES_on.SIA',path='Data',sep=',',units='eV',sort_indexes=True,decimal='.',data_type='X-ray emission intensity',baseunit='ps') #type filename and path (leave path empty if in the same folder)\n",
42 | "#ta=pf.TA('XES_diff.SIA',path='Data',sep=',',units='eV',sort_indexes=True,decimal='.',data_type='X-ray emission differential')\n",
43 | "#ta=pf.TA(\"sample_1.hdf5\",path=\"Data\") #reload saved projects using the same syntax\n",
44 | "#ta=pf.TA('gui') #use a GUI to select data"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "### Shape data and plot Raw images"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "plt.close('all')\n",
61 | "ta.rel_wave=np.arange(7053,7061,1) # use to manually select interesting wavelength for the kinetics\n",
62 | "ta.wavelength_bin=1 # width of wavelength bin in kinetics\n",
63 | "ta.rel_time=[-0.5,-0.1,0,0.1,0.3,0.5,1,1.5,3,5,10,15,20] # use to manually set interesting times for the spectra\n",
64 | "ta.time_width_percent=0 # rebinning of time_points in percent\n",
65 | "ta.timelimits=[-1,200] # use to manually limit the range to plot (and fit) \n",
66 | "ta.log_scale=False # use to plot the 2d plots with logarithmic intensity scale\n",
67 | "ta.bordercut=[7045,7065] # use to set the outer wavelength limits\n",
68 | "#ta.scattercut=[522,605] # this region is blanked out to block scatter, this can be a list of regions\n",
69 | "ta.intensity_range=[0,7] # set a value 5e-3 or a range [-1e-3,5e-3] for 2d plotting\n",
70 | "ta.Plot_RAW() # Plot Raw Spectra"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "metadata": {},
76 | "source": [
77 | "## Apply some gentle smoothing?"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {},
84 | "outputs": [],
85 | "source": [
86 | "if 1:\n",
87 | " ta.ds=pf.Frame_golay(ta.ds,7,3)\n",
88 | " ta.Plot_RAW() "
89 | ]
90 | },
91 | {
92 | "cell_type": "markdown",
93 | "metadata": {},
94 | "source": [
95 | "### Perform fast fitting using independent exponential decay (first order)"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "import function_library as func\n",
105 | "reload(func)"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "plt.close('all') # Close old spectra\n",
115 | "ta.mod=func.P12 # Choose the model 'exponential', 'consecutive' and 'fast_consecutive'are build in\n",
116 | "par=lmfit.Parameters() # create empty parameter object\n",
117 | "\n",
118 | "par.add('k0',value=1/0.275,vary=True) # Add second rate component\n",
119 | "par.add('k1',value=1/7.2,vary=True) # Add one rate component\n",
120 | "par.add('k2',value=1/0.45,vary=True) # Add second rate component\n",
121 | "par.add('t0',value=-0.0167,min=-2,max=2,vary=False) # Allow the arrival time to adjust (for all)\n",
122 | "par.add('resolution',value=0.0935,min=0.04,max=0.5,vary=False) # Allow the instrument response to adjust (for all)\n",
123 | "par.add('explicit_GS')\n",
124 | "#par.add('infinite') # Keyword for an non decaying component\n",
125 | "par.add('background') # Keyword to fit the Background Disable the background substraction in the loading cell for best effect\n",
126 | "ta.log_fit=False # fit in linear or log scale\n",
127 | "if 0:\n",
128 | " for key in par.keys():\n",
129 | " par[key].vary=False\n",
130 | "ta.par=par # write parameter object into file for fitting\n",
131 | "ta.Fit_Global() # trigger fitting\n",
132 | "ta.error_matrix_amplification=10; # Choose how much stronger should error be plotted # 2D plots in linear or log scale\n",
133 | "ta.Plot_fit_output() # plot the fit output"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {},
139 | "source": [
140 | "### Calculate the absolute spectra"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "for col in ta.re['DAC'].columns:\n",
150 | " ta.re['DAC'].loc[:,col]+=ta.re['DAC'].background\n",
151 | "ta.re['DAC']=ta.re['DAC'].drop('background',axis=1)\n",
152 | "ta.Plot_fit_output(0)"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {},
159 | "outputs": [],
160 | "source": [
161 | "ta.Save_Powerpoint()"
162 | ]
163 | }
164 | ],
165 | "metadata": {
166 | "ipub": {
167 | "titlepage": {
168 | "author": "Jens Uhlig",
169 | "email": "jens.uhlig@chemphys.lu.se",
170 | "logo": "http://www.jensuhlig.de//hot_warm_cold.png",
171 | "subtitle": "Main Worksheet",
172 | "title": "Transient Absorption Worksheet"
173 | }
174 | },
175 | "kernelspec": {
176 | "display_name": "Python 3 (ipykernel)",
177 | "language": "python",
178 | "name": "python3"
179 | },
180 | "language_info": {
181 | "codemirror_mode": {
182 | "name": "ipython",
183 | "version": 3
184 | },
185 | "file_extension": ".py",
186 | "mimetype": "text/x-python",
187 | "name": "python",
188 | "nbconvert_exporter": "python",
189 | "pygments_lexer": "ipython3",
190 | "version": "3.11.9"
191 | }
192 | },
193 | "nbformat": 4,
194 | "nbformat_minor": 4
195 | }
196 |
--------------------------------------------------------------------------------
/Workflow_tools/import_library.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import os
4 |
5 | def NRL(filename):
6 | ds=pd.read_csv(filename, sep=',', index_col=0, header=None)
7 | ds.columns=ds.iloc[0,:]
8 | ds.drop(0,inplace=True)
9 | ds.index=ds.index.astype(float)
10 | ds.columns=ds.columns.astype(float)
11 | ds.sort_index(inplace=True,axis=0)
12 | ds.sort_index(inplace=True,axis=1)
13 | ds.astype('float')
14 | ds=ds.T
15 | ds.index.name='Time in ps'
16 | ds.columns.name='Wavelength in nm'
17 | return ds,'differential absorption in mOD','ps'
18 |
19 | def Uppsala(filename):
20 | df=pd.read_csv(filename,index_col=0,sep=',').T
21 | #df=df.fillna(0)
22 | df=df.dropna(axis=1)
23 | df.index=df.index.astype('float')
24 | df.columns=df.columns.astype('float')
25 | df.sort_index(inplace=True)
26 | df.sort_index(inplace=True,axis=1)
27 | df.astype('float')
28 | df.index.name='Time in ps'
29 | df.columns.name='Wavelength in nm'
30 | return df,'differential absorption in mOD','ps'
31 |
32 |
33 | def Ivan_horse(filename):
34 | #print(filename)
35 | import scipy.constants as const
36 | ds=pd.read_csv(filename,sep='\t',index_col=0)
37 | ds.index=ds.index.astype(float)
38 | ds.columns=ds.columns.astype(float)
39 | #ds.index=ds.index.values/1e8
40 | #ds.columns=ds.columns.values/1e5
41 | #ds=ds.apply(lambda x:x*ds.index.values)
42 | #ds=ds.apply(lambda x:x*ds.columns.values,axis=1)
43 | #per_photon=const.h*const.c/(485e-9)
44 | #ds= ds*per_photon
45 | ds.index.name='Fluence in Photons/cm2 s'
46 | ds.columns.name='Repetitions rate in Hz'
47 | ds.sort_index(inplace=True)
48 | ds.sort_index(inplace=True,axis=1)
49 | return ds.T,'PLQY'
50 |
51 | def cor_streak_lund(filename):
52 | '''This reads the file of the "DAC" that is exported as corrected file from the streak camera software ending'''
53 | ds=pd.read_csv(filename,sep='\t',index_col=0)
54 | ds.columns.name="nm"
55 | ds.index.name="Time in ps"
56 | data_type="Emission intensity"
57 | baseunit="ps"
58 | ds.index=ds.index.astype(float)
59 | ds.columns=ds.columns.astype(float)
60 | ds.sort_index(inplace=True,axis=1)
61 | ds.sort_index(inplace=True,axis=0)
62 | return ds,data_type,baseunit
63 |
64 | def streak_Lund(filename):
65 | '''This is reading the filetype that is saved by the streak camera software as "dat" type'''
66 | code=str(filename).split(os.sep)[-1]#split of the path
67 | code=code.split('.')[0] #split of the dot and fileending
68 | code=code.split('-')
69 | ds=pd.read_csv(filename,sep='\t',header=None)
70 | n_times=len(ds.index.values)
71 | n_waves=len(ds.columns)
72 | times={ 't6':2000,
73 | 't5':1000,
74 | 't4':500,
75 | 't3':200,
76 | 't2':100,
77 | 't1':50}
78 | for i in [4,3,2,5]:#position can change
79 | try:
80 | times=times[code[i]]
81 | times=np.linspace(0,times,n_times)
82 | break
83 | except:
84 | pass
85 | for i in [6,5,7,8]:
86 | if code[i][0]=='w':
87 | center=code[i][1:]
88 | waves=np.linspace(float(center)-60,float(center)+75,n_waves)
89 | break
90 | else:
91 | continue
92 | ds.index=times
93 | ds.index=ds.index.astype(float)
94 | ds.columns=waves
95 | ds.columns=ds.columns.astype(float)
96 | ds.index.name='Time in ps'
97 | ds.columns.name='Wavelength in nm'
98 | return ds,'emission intensity'
99 |
100 | def Amine_func(filename):
101 | df=pd.read_csv(filename,sep='\t',header=None)
102 | wavelength=pd.Series(np.linspace(343.33,656.03,512))
103 | time=pd.Series(np.linspace(0,50.500,512))
104 | df.columns=wavelength
105 | df.index=time
106 | df.index=df.index.astype(float)
107 | df.columns=df.columns.astype(float)
108 | df.index.name='Time in ns'
109 | df.columns.name='Wavelength in nm'
110 | return df,'differential absorption','ns'
111 |
112 | def Lund_colors():
113 | cols={
114 | 'black':[0,0,0],
115 | 'wine_red':[152,30,50],
116 | 'brown':[153,102,51],
117 | 'dark_blue':[0,0,128],
118 | 'gold':[233,131,0],
119 | 'darker grey':[146,139,129],
120 | 'green':[85,118,48],
121 | 'light_grey':[203,199,191],
122 | 'light blue grey':[189,203,197],
123 | 'light green grey':[199,210,138],
124 | 'pink':[233,196,199],
125 | 'blue':[185,211,220],
126 | 'light_green':[173,202,184],
127 | 'yellow':[214,210,196],
128 | 'light_brown':[191,184,175],
129 | 'pastel pink':[219,173,177],
130 | 'pastel blue':[164,196,207],
131 | 'pastel green':[153,190,167],
132 | 'pastel yellow':[203,197,169],
133 | 'pastel brown':[180,168,154],
134 | 'white':[255,255,255],
135 | }
136 | cols=pd.DataFrame(cols).T/255.
137 | cols.columns=['R','G','B']
138 | return cols
139 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/Changelog.rst:
--------------------------------------------------------------------------------
1 | *********
2 | Changelog
3 | *********
4 | In this changelog I will only mention the new capabilities. All other version jumps are bug fixes
5 |
6 | .. _release-7.12.0:
7 | 7.12.0
8 | ========
9 | Added the option to return the figures from the common plotfunction setting the option "return_figures_handles" will in Plot_RAW and Plot_Fit_output trigger that a dictionary of the plots is returned.
10 | Added X-ray workflow
11 | Added the general option pf.halfsize=False If set to true all figures will be half the size (better for inline plotting)
12 |
13 | .. _release-7.11.0:
14 | 7.11.0
15 | ========
16 | Add the option to use Cor chirp for only time shifting
17 | Add Streak camera workflow
18 |
19 | .. _release-7.10.0:
20 | 7.10.0
21 | ========
22 | Added time delay for printing fit output to avoid overflow of notebook output, has big impact on speed for more complex model fits.
23 |
24 |
25 | .. _release-7.7.0:
26 | 7.7.0
27 | ========
28 | Added X-ray support
29 | added novel global variable that permits half size plots (nicer in inline mode)
30 |
31 |
32 | .. _release-7.5.0:
33 | 7.5.0
34 | ========
35 | Changed the dimension of the dc in all the models. This sped up the speed by nearly a factor of 10. There has been so many smaller changes over the last versions that I go to 7.5
36 |
37 | .. _release-7.4.0:
38 | 7.4.9
39 | ========
40 | Cleaned up previous updates.
41 |
42 |
43 |
44 | .. _release-7.3.0:
45 | 7.3.6
46 | ========
47 | Introduced that Print_results can dump the result parameter into a file.
48 |
49 |
50 | 7.3.0
51 | ========
52 |
53 | introduced: sub_sample, pulse_sample into the fitting (not yet documented) but they allow to temporarily add time points to the model (that willl then be removed again)
54 | Pulse_sample is needed when the pulse is not in the modelled data (e.g. when the time_limit or ignore_time_regions is set
55 | sub_sample divided the times and is useful if the measure data is to sparse in time.
56 | if the parameter "sub_steps" is present this will be used to define the number of sub_steps in the iterative sampling
57 |
58 |
59 | 7.2.17
60 | ========
61 |
62 | The to sparsely measured datapoints can now be sub-sampled with ta.Fit_Global(sub_sample=10)
63 | The intensity is now proper if the modelled points do not include the pump pulse
64 |
65 |
66 | 7.2.5
67 | =======
68 |
69 | On Windows the fit can now be interrupted if "q" is pressed
70 |
71 | 7.2.1
72 | =======
73 |
74 | Add write_paras as Fit_Global option. This will print the params continuously to the output. For now this serves as a way to interrupt the fitting and not loose the results
75 |
76 | 7.1.20
77 | ========
78 |
79 | Added Check_Chirp function that allows to check (look at) a chirp vector vs the data
80 |
81 | 7.1.2
82 | ========
83 |
84 | Add a description call to the TA object meaning that "ta()" will give you a mini instruction
85 |
86 | 7.1.1
87 | ========
88 |
89 | * implemented the option to define the external spectrum as explicit or as guidance spectrum
90 | * Explicit Ground state added
91 |
92 | 7.0.3
93 | ========
94 |
95 | * Allows mutli data fit with sam DAS without real fitting (for parameter check)
96 | * move from qt to tk in all notebooks
97 |
98 | 7.0.0
99 | ========
100 |
101 | Change to tk version as default (no qt needed)
--------------------------------------------------------------------------------
/docs/source/Comparing.rst:
--------------------------------------------------------------------------------
1 | Comparative plotting
2 | ====================
3 |
4 | The comparative plotting is an important tool to compare different
5 | measurements (e.g. different conditions), different fits or steady state
6 | spectra. In general you can compare different kinetics
7 | (at one or multiple fixed wavelength) with :ref:`Compare_at_wave` ,
8 | compare different spectra at one or multiple given timepoints with
9 | :ref:`Compare_at_time` and compare the extracted spectra (decay associated
10 | or species associated) with :ref:`Compare_DAC`. The essential idea is that the
11 | parameters in the project that contains the comparative plotting are used
12 | for all the plots. So the ta.bordercut or ta.intensity is used for all
13 | the plot, independent of e.g. the intensity in the other projects.
14 |
15 | New is that the compare functions take "other" as a parameter, which can
16 | be either a single or multiple projects (TA - objects). These projects
17 | need to be loaded into the program. Loading a project can be done by
18 | having them open from prior import or analysis (e.g. when comparing different fits)
19 | and then using
20 |
21 | :meth:`self.Copy()`
22 |
23 | More usual other (saved) projects will be
24 | opened with the function
25 |
26 | :meth:`pf.GUI_open()`
27 |
28 | . See :ref:`Opening multiple files`
29 | for more information on that.
30 |
31 | As this comparision very often contains a lot of files the images are
32 | automatically saved using the filenames and the wavelength/time points.
33 | The images are however open and if the standard approach using QT was
34 | used can be manipulated using the GUI tools. So is e.g. the conversion
35 | into a linear time-axis currently not implemented, but can easily be
36 | achieved by changing the axis in the QT GUI.
37 |
38 | A very important function provided by this set of tools is the comparision against
39 | other spectra. So can for examples be reference spectra (e.g. UV-vis) be added to
40 | the plots.
41 |
42 | Normalization and Scaling
43 | ----------------------------------------
44 |
45 | An important option is the normalization in a certain window that
46 | applies for both Compare_at_time and Compare_at_wave. Very often data
47 | needs to be normalized before it can be compared to anything e.g. to
48 | the size of the ground state bleach or an excited state absorption.
49 | Here I offer the normalization in a certain window in time and space.
50 | In this window a value in the "ta" and then each of the "other"
51 | projects is calculated. The intensity of each in the other projects
52 | (but not of the "ta" project) is then mutliplied by the quotient of
53 | this value in this specific window. This means e.g. that even if the
54 | kinetics is plotted for 600nm the normalization can be performed at
55 | 1-2ps at 450-500nm. This is very useful to plot e.g. the efficiency of
56 | product formation in the study of catalytic processes. For this
57 | normalization a "window" needs to be defined in the order::
58 |
59 | [Start time, End time, Start wavelength, End Wavelength]
60 |
61 | Care should be take to where this normalization is performed. The region
62 | just around t=0 is dangerous due to the artifacts, that do not represent
63 | real values. If external values are suppose to be used for scaling, the
64 | individual intensities can be manipulated. For each of the loaded
65 | projects ta.ds is the matrix that contains the data. With::
66 |
67 | "ta.ds*=1.1"
68 |
69 | could for example the individual intensity be raised by 10%. But be aware
70 | that with this action you are changing the data in the object. The original
71 | data ta.ds_ori remains unchanged. If you save and reload the data, the intensity
72 | will revert to the originally measured value.
73 |
74 | Compare_at_time
75 | -------------------
76 |
77 | This function plots multiple spectra into the same figure at one or multiple given timepoints (rel_time) and
78 | allows for :ref:`Normalization and Scaling`
79 |
80 | Very useful to compare the spectra for different solvents or quenchers, or e.g. different fits.
81 | The ta.time_width_percent parameter defines if this is a single time
82 | (if time_width_percent = 0) or an integrated window.
83 |
84 | A normalization window can be given at which all the plotted curves are normalized to.
85 | This window does not have to be in the plotted region. See :ref:`Normalization and Scaling`
86 |
87 | Very often one would like to compare the measured spectra at a certain
88 | time to an external spectrum (e.g. spectro-electro-chemistry or steady
89 | state absorption). This can be done by loading a specific spectrum into
90 | a DataFrame and handing this data Frame to the comparision function. The
91 | function can also be used to plot e.g. the measured spectra vs. an
92 | external spectrum without giving any "other" Projects.
93 |
94 | For more information, details on the parameter and examples see:
95 |
96 | :meth:`self.Compare_at_time()`
97 |
98 | Compare_at_wave
99 | --------------------
100 |
101 | This function plots multiple kinetics into the same figure at one or
102 | multiple given wavelength (rel_wave) and allows for :ref:`Normalization and Scaling`
103 | Very useful to compare the kinetics for different quencher concentrations or
104 | pump powers, or e.g. different fits. The parameter width or the general ta.wavelength_bin
105 | defines the width of the spectral window that is integrated and shown.
106 |
107 | A normalization window can be given at which all the plotted curves are normalized to.
108 | This window does not have to be in the plotted region. See :ref:`Normalization and Scaling`
109 |
110 | Often multiple wavelength are to be plotted, and if at the same time
111 | many projects go into the same plot, things tend to get messy. As the
112 | files are saved separately this approach proofed to be useful.
113 |
114 | For more information, details on the parameter and examples see:
115 |
116 | :meth:`self.Compare_at_wave()`
117 |
118 | Compare_DAC
119 | ----------------
120 |
121 | This is a convenience function to plot multiple extracted spectra (DAS
122 | or species associated) into the same figure or into a separate figure
123 | each. Other should be ta.plot_func objects (loaded or copied). By
124 | standard it plots all into the same window. If all project have the same
125 | number of components one can activate "separate_plots" and have each
126 | separated (in the order created in the projects).
127 |
128 | The "Spectra" parameter allows as before the inclusion of an external
129 | spectrum. Others is optional and I use this function often to compare
130 | species associated spectra with one or multiple steady state spectra.
131 |
132 | For more information, details on the parameter and examples see:
133 |
134 | :meth:`self.Compare_DAC()`
135 |
--------------------------------------------------------------------------------
/docs/source/Installation.rst:
--------------------------------------------------------------------------------
1 | KiMoPack
2 | ==========
3 |
4 | KiMoPack is a project for the handling of spectral data measure at
5 | multiple time-points. The current design is optimised for the use with
6 | optical transient absorption data, but it has been successfully adapted
7 | for the use with transient x-ray emission and spectro-electro chemistry
8 | data.
9 |
10 | It focuses on the main tasks an experimentator has:
11 | Loading and shaping of experiments, plotting of experiments, comparing of experiments,
12 | analysing experiments with fast and/or advanced fitting routines and saving/exporting/presenting
13 | the results.
14 |
15 | For typical use a series of juypter notebooks are provided that guide
16 | through the a number of different use scenarios, and are suggesting the
17 | parameter that are typically set.
18 |
19 | Installation
20 | --------------
21 |
22 | The basis of the program is a module called "plot_func.py" that contains all the necessary functions and classes.
23 | We recommend to use a package manager to install the program.
24 |
25 | Install using "pip":
26 |
27 | .. code-block:: text
28 |
29 | $ pip install KiMoPack
30 |
31 | Install and update using "conda" from the channel erdzeichen:
32 |
33 | .. code-block:: text
34 |
35 | $ conda install -c erdzeichen kimopack
36 |
37 | Hint: the pip version is usually more recent than the conda version
38 | The files can also be downloaded from the github directory https://github.com/erdzeichen/KiMoPack or zenodo (see below)
39 |
40 | These commands are installing only KiMoPack and the absolutely needed dependencies. However, there are several modules that work better if additional packages are installed. In general one should try to install all packages at the same time. Additional packages that I generally recommend are h5py and tables (for saving files), python-pptx (for saving power point slides) and keyboard (Window only, for interrupting the fits). Quite useful is also nbopen that allows you to double click on the notebook files. nbopen requires an activation at the end.
41 |
42 | (leave away keyboard for Linux!)
43 | .. code-block:: text
44 |
45 | $ pip install KiMoPack h5py tables nbopen python-pptx
46 | (windows) python -m nbopen.install_win
47 | (Linux) python3 -m nbopen.install_xdg
48 | (MacOS) Clone the repository and run ./osx-install.sh
49 |
50 | Upgrade if already installed:
51 |
52 | .. code-block:: text
53 |
54 | $ pip install KiMoPack -U
55 |
56 |
57 | In general it is a good idea to create a local environment to install files in python if you are using python for many tasks. In a local environment only the packages that are needed are installed, which usually avoids that conflicts can appear. It is very easy to do that.
58 |
59 | Under Windows: open the anaconda command prompt or power shell (type anaconda under windows start)
60 | Under Linuxs: open a console
61 |
62 | .. code-block:: text
63 |
64 | $ conda create --name kimoPack
65 | $ conda activate kimokack
66 | $ pip install KiMoPack h5py tables keyboard nbopen python-pptx
67 |
68 | Or if you also want make sure to have a later version of python
69 |
70 | .. code-block:: text
71 |
72 | $ conda create --name kimopack python=3.11 ipython jupyterlab jupyter
73 | $ conda activate kimopack
74 | $ pip install KiMoPack h5py tables keyboard nbopen python-pptx
75 |
76 |
77 | Error: insufficient rights: If one of the installs complains (error) that the user does not has sufficient rights, this installation can be done attaching "--user"
78 |
79 | .. code-block:: text
80 |
81 | $ conda create --name kimoPack
82 | $ conda activate kimokack
83 | $ pip install KiMoPack h5py tables keyboard nbopen python-pptx --user
84 |
85 | Error: pytables:
86 | in some versions I have been running in a problem with pytables when loading saved data.
87 | Using the conda forge version solved this problem for me
88 |
89 | .. code-block:: text
90 |
91 | conda install -c conda-forge pytables
92 |
93 | Best usage
94 | -----------
95 | While KiMoPack is a python library, we facilitate its use with Jupyter notebooks. For the typical analysis tasks we have developed a series of Notebooks that guide through the tasks.\n
96 | These notebooks can be downloaded from https://github.com/erdzeichen/KiMoPack/tree/main/Workflow_tools or by command line.
97 |
98 | You can try either of these "lazy" oneliners
99 |
100 | .. code-block:: text
101 |
102 | ipython -c "import KiMoPack; KiMoPack.download_notebooks()"
103 | python -c "import KiMoPack; KiMoPack.download_notebooks()"
104 | python3 -c "import KiMoPack; KiMoPack.download_notebooks()"
105 |
106 | If none of these work then start any console (under windows e.g. type "cmd" and hit enter). In the console you then start python by typing "python" and hit enter, lastly you import Kimopack and run a function that downloads the files for you by typing "import KiMoPack; KiMoPack.download_all()" This downloads the notebooks and tutorials from github for you. If you instead use "import KiMoPack; KiMoPack.download_notebooks()" then only the workflow tools are downloaded.
107 | Please copy one of these notebooks into your data analysis folder and rename them to create a analysis log of your session. For more information please see the publication https://doi.org/10.1021/acs.jpca.2c00907, the tutorial videos, or the tutorial notebooks under https://github.com/erdzeichen/KiMoPack/tree/main/Tutorial_Notebooks_for_local_use.
108 |
109 | Citation
110 | ------------
111 | We have published a paper introducing the toolbox under https://doi.org/10.1021/acs.jpca.2c00907
112 |
113 | Links
114 | -----
115 | * Overview talk: I gave a recent overview talk at the LaserLab Europe meeting: https://youtu.be/z9QqVLFWYrs
116 | * Tutorial videos: https://www.youtube.com/channel/UCmhiK0P9wXXjs_PJaitx8BQ
117 | * Documentation: https://kimopack.readthedocs.io/
118 | * PyPI Releases: https://pypi.org/project/KiMoPack/
119 | * Source Code: https://github.com/erdzeichen/KiMoPack
120 | * Issue Tracker: https://github.com/erdzeichen/KiMoPack/issues
121 | * Website: https://www.chemphys.lu.se/research/projects/kimopack/
122 | * Publication: https://pubs.acs.org/doi/10.1021/acs.jpca.2c00907
123 | * Zenodo: https://doi.org/10.5281/zenodo.5720587
124 |
125 |
--------------------------------------------------------------------------------
/docs/source/Introduction.rst:
--------------------------------------------------------------------------------
1 | Introduction
2 | =============
3 |
4 | KiMoPack is a project for the handling of spectral data measure at
5 | multiple time-points. The current design is optimised for the use with
6 | optical transient absorption data, but it has been successfully adapted
7 | for the use with transient x-ray emission and spectro-electro chemistry
8 | data.
9 |
10 | It focuses on the main tasks an experimentator has
11 | Loading and shaping of experiments, plotting of experiments, comparing of experiments,
12 | analysing experiments with fast and/or advanced fitting routines and saving/exporting/presenting
13 | the results.
14 |
15 | The software can be used on several different levels. The simplest level packs everything
16 | into an object "TA" that contains all the parameters that are typically set.
17 | These objects also contain the typical functions that are used in an analysis.
18 | See :ref:`Main Tasks overview` for an overview of these functions.
19 | All active functions have a capital letter in the beginning.
20 |
21 | At the lower levels a series of convenience functions for the efficient plotting of
22 | one or two dimensional data is provided. These are typical in the main module
23 |
24 | For typical use a series of juypter notebooks are provided that guide
25 | through the a number of different use scenarios, and are suggesting the
26 | parameter that are typically set.
27 |
28 | In addition a series of tutorial notebooks are provided that guide the user through the different functions.
29 | These Tutorials can either be downloaded or executed on a "mybinder" server via this badge.
30 | .. image:: https://mybinder.org/badge_logo.svg
31 | :target: https://mybinder.org/v2/gh/erdzeichen/KiMoPack/HEAD
32 |
33 | In addition a small series of videos were produced to introduce the features and usage of KiMoPack.
34 |
35 |
36 | Links
37 | -----
38 | * Overview talk: I gave a recent overview talk at the LaserLab Europe meeting: https://youtu.be/z9QqVLFWYrs
39 | * Tutorial videos: https://www.youtube.com/channel/UCmhiK0P9wXXjs_PJaitx8BQ
40 | * Documentation: https://kimopack.readthedocs.io/
41 | * PyPI Releases: https://pypi.org/project/KiMoPack/
42 | * Source Code: https://github.com/erdzeichen/KiMoPack
43 | * Issue Tracker: https://github.com/erdzeichen/KiMoPack/issues
44 | * Website: https://www.chemphys.lu.se/research/projects/kimopack/
45 | * Publication: https://pubs.acs.org/doi/10.1021/acs.jpca.2c00907
46 | * Zenodo: https://doi.org/10.5281/zenodo.5720587
47 |
48 |
--------------------------------------------------------------------------------
/docs/source/Main_tasks.rst:
--------------------------------------------------------------------------------
1 | Main Tasks overview
2 | ====================
3 |
4 | This menu is a shortcut to the main function and tasks that are performed during an analysis.
5 | In general one opens one or multiple data Files and after defining a number of shaping parameter, that
6 | e.g. set the axis limits or correct the arrival time of different wavelength, plots various graphs.
7 | Different measurements or Fits can be compared and the results saved in various forms.
8 |
9 | * :ref:`Opening of data`
10 |
11 | * Open single file: :meth:`pf.TA()`
12 | * Open many files: :meth:`pf.GUI_open()`
13 | * Combine many scans: :meth:`pf.Summarize_scans()`
14 |
15 | * :ref:`Shaping of Data`
16 |
17 | * Background correction: :meth:`self.Background()`
18 | * Filter bad data: :meth:`self.Filter_data()`
19 | * Correct arrival time (Chirp) :meth:`self.Cor_Chirp()`
20 | * :ref:`Data shaping settings that affect the fits`
21 | * :ref:`Plot shaping options without influence on the fitting`
22 |
23 | * :ref:`Plotting functions`
24 |
25 | * Plotting non Fitted data: :meth:`self.Plot_RAW()`
26 | * Plotting Fitted data: :meth:`self.Plot_fit_output()`
27 | * Adjust fonts in plots: :meth:`pf.changefonts()`
28 |
29 | * :ref:`Fitting, Parameter optimization and Error estimation`
30 |
31 | * Fitting data: :meth:`self.Fit_Global()`
32 |
33 | * :ref:`Comparative plotting`
34 |
35 | * Compare spectra: :meth:`self.Compare_at_time()`
36 | * Compare kinetics: :meth:`self.Compare_at_wave()`
37 | * Compare calculated spectra (SAS or DAS): :meth:`self.Compare_DAC()`
38 |
39 | * :ref:`Data Export and Project Saving`
40 |
41 | * Copy project :meth:`self.Copy()`
42 | * Save Project as hdf5 :meth:`self.Save_project()`
43 | * Save Plots :func:`self.Save_Plots()`
44 | * Save Plots as Powerpoint :func:`self.Save_Powerpoint()`
45 | * Save/export data as ascii/text :meth:`self.Save_data()`
46 |
47 | .. figure:: _static\\structure.png
48 |
49 |
--------------------------------------------------------------------------------
/docs/source/Opening.rst:
--------------------------------------------------------------------------------
1 | Opening of data
2 | ==========================================
3 |
4 | A key challenge in using a non graphical programming software is to locate and open files.
5 | This tool provides a mixed interface to solve this challenge and in general allows three different ways to import data.
6 |
7 | Each of the following function has a "Gui" keyword that can trigger a standard file
8 | opening dialogue. Alternatively the filenames can be written together with an (optional)
9 | path argument.
10 | If the analysis uses the from us provided workflow notebooks, then we suggest that a fresh notebook
11 | is used for each particular analysis and that the notebook is copied close to the data.
12 |
13 | Three different pathways of importing data are offered:
14 |
15 | 1. All import functions provide a wide variety of options to adapt for data formats. If a particular option is missing and is desired, please contact the developers via email or raise an issue on github. We will consider to do so but most likely instead provide you with a function for option 2. The formats of the example file have the spectral information as the first row, the time vector as first entrance of each of the following rows and are separated by tab. Files of this type can be read without any further adaption (using the standard parameter). typical options include the transposing of columns, the conversion of time and energy vectors or the providing of external files for energy and times.
16 |
17 | 2. All import function have the option of providing an external import function. (from version 7.8.0 onwards) this function gets the filename and should return a dataframe to KiMoPack. We provide a function library that contains the formats of befriended groups. If you would like help to develop an import function then please contact the developers.
18 |
19 | 3. The two main import function have a "ds" parameter to which a Pandas DataFrame can be given. Thus the user might simply import and shape the file and then hand it over to KiMoPack.
20 |
21 | Opening single file and creating TA object
22 | ------------------------------------------
23 |
24 | Open single file: :meth:`pf.TA()`
25 |
26 | Typical use of the tool is based upon an object containing all
27 | parameter, functions and the data. This object is created by importing a
28 | data file using this function.
29 |
30 | The filename can be either provided as a string, with the (optional) path to other folders given.
31 | Using the keyword "gui" instead of a filename opens the graphical file selection interface.
32 | The function can either open a text style format (using any file ending) or the internally used "hdf5" file format.
33 | The latter is exclusively used as internal storage formate that stores the complete project including the RAW data.
34 |
35 | After import of either filetype the missing parameter in the "TA" object are set with the
36 | :meth:`self.__make_standard_parameter()` function.
37 |
38 | Opening multiple files
39 | ----------------------------
40 |
41 | Open many files: :meth:`pf.GUI_open()`
42 |
43 | Sometimes multiple files are to be opened. Typical use examples are the options to compare different
44 | measurements or analysis runs. This function provides a convenient way to create a list of opened projects.
45 | One can
46 |
47 | * open a gui and select multiple saved projects, which are returned as a list
48 | * given a list of project names to open them
49 | * open all files in a given folder
50 |
51 | The general behavior is selected by the first parameter (project_list)
52 | For more details see the examples in :meth:`pf.GUI_open()`
53 |
54 | Opening and handling single scans
55 | ----------------------------------
56 |
57 | Combine many scans: :meth:`pf.Summarize_scans()`
58 |
59 | Typically the experiments consists of a larger number of scans that are combined into a single experimental file.
60 | The function "Summarize_scans" reads, selects and eventually combines a
61 | series of single scans with a bunch of useful options. The essential idea is
62 | that for each scan one or two numbers are generated through integration of the intensity
63 | in a temporal and spectral window. This single number is plotted as function against the scan number.
64 | Then either a list of numbers or a GUI is used to select the scans that are
65 | removed from the average. A number of opening and selection options are given.
66 |
67 | Observe the new automatic filter options of summarize_scans
68 |
69 | This function could also be used to combine a number of different experiments.
70 |
--------------------------------------------------------------------------------
/docs/source/Plotting.rst:
--------------------------------------------------------------------------------
1 | Plotting functions
2 | ==================
3 |
4 | * Plotting non Fitted data: :meth:`self.Plot_RAW()`
5 | * Plotting Fitted data: :meth:`self.Plot_fit_output()`
6 | * Interative Plotting RAW and Fitted: :meth:`self.Plot_Interactive()`
7 | * Adjust fonts in plots: :meth:`pf.changefonts()`
8 |
9 | One core function of this tool is to create plots for evaluation and
10 | publication. Internally there are a number of separate functions that
11 | create each plot type (see below). The methods Plot_RAW and Plot_fit_output
12 | wrap the parameter into the object and simplify their use. Two additional functions
13 | provide additional features. Both "Save_Plots" and "Save_Powerpoint" are
14 | calling both plot functions and dump their output into separate figure files or two
15 | slides of a power point file.
16 |
17 | Common to both plotting function is that either a single plot can be called by giving
18 | the plotting parameter or a series of plots (Default) by giving a list of number with
19 | e.g. "range(3)".
20 |
21 | Most of the plot defining parameter (like, what for which wavelength the kinetic
22 | is plotted or at what times the kinetics are extracted are defined by the
23 | :ref:´Plot shaping options without influence on the fitting´.
24 |
25 | The Ploting function know several options to return the plots as a dictionary or to save plots.
26 | The global parameter pf.halfsize can be set to True to reduce the size of the plots.
27 | This happens automatically for inline plotting.
28 |
29 | Plot_RAW
30 | --------
31 |
32 | :meth:`self.Plot_RAW()` plots all raw figures. The different figures can be called
33 | separately or with a list of plots (standard) e.g. plotting=range(4)
34 | call plots 0-3, plotting=1 a single plot. The plots have the following
35 | numbers: 0 - Matrix, 1 - Kinetics, 2 - Spectra, 3 - SVD. The plotting
36 | can take all parameter from the "ta" object. See:
37 | :meth:`self.Plot_RAW()`
38 |
39 | Plot_fit_output
40 | ---------------
41 |
42 | :meth:`self.Plot_fit_output()` plots the fit results. For this is uses the data
43 | contained in the shaped and cut datasets that were used for the fit,
44 | including all rebinning or temporal restrictions. The figures can be
45 | called separately or with a list of plots (standard)
46 | The plotting function takes all parameter from the object.
47 |
48 | :meth:`self.Plot_fit_output()`
49 |
50 | **Contents of the plots**
51 |
52 | #. DAC contains the assigned spectra for each component of the fit. For
53 | a modelling with independent exponential decays this corresponds to
54 | the "Decay Associated Spectra" (DAS). For all other models this
55 | contains the "Species Associated Spectra" (SAS). According to the
56 | model the separate spectra are labeled by time (process) or name, if
57 | a name is associated in the fitting model.
58 | #. summed intensity. All wavelength of the spectral axis are summed for
59 | data and fit.
60 | #. plot kinetics for selected wavelength
61 | #. plot spectra at selected times
62 | #. plots matrix (measured, modelled and error Matrix). The parameter are
63 | the same as used for the corresponding RAW plot with the addition of
64 | "error_matrix_amplification" which is a scaling factor multiplied
65 | onto the error matrix.
66 | #. concentrations. In the progress of the modelling/fitting a matrix is
67 | generated that contains the relative concentrations of the species
68 | as function of time.
69 |
70 | This function is a convenience function and is suppose to be used in
71 | conjunction with the object and the embedded parameter (see above). The
72 | use of qt as backend allows the easy customization of the plots via the
73 | GUI. If the plots are saved as "svg" they can easily be adjusted in
74 | inkscape or similar afterwards.
75 | For more details see: :meth:`self.Plot_fit_output()`
76 |
77 | Plot shaping options without influence on the fitting
78 | -----------------------------------------------------
79 |
80 | In addition to the general shaping parameter from section :ref:`Data shaping settings that affect the fits`
81 | a number of parameter only affect one or multiple of the plots but not the fitting of the data.
82 |
83 | * The plotting of the kinetics is governed by the selection of the wavelength in the list **rel_wave**
84 | and the width of each **wavelength_bin**
85 | * The plotting of the spectra is governed by the selection of the timepoint in the list **rel_time**
86 | and potentially a percentual binning around this time-point with **time_width_percent**. If this is set to 0
87 | then the measured timepoint is used.
88 | * The intensity (color) in the 2 plots as well as the height of the y-axis is determined by the **intensity_range**
89 | parameter that can be set symmetric or a-symmetric for best representation. With **log_scale**
90 | This intensity can be scaled logarithmic and **error_matrix_amplification** only amplifies the intensity of the
91 | difference matrix (measured-fitted) in the 2d plots
92 | * The color scheme can be set very flexible using the Matplotlib palets, or a manually provided color scheme
93 | (e.g. university colors)
94 | * The titles of all plots are chosen either by the filename or can be given flexible in each plotting functions
95 | through the title parameter. All the plots can be automatically saved if **save_figures_to_folder** is set to True,
96 | Which is useful for fast surveys, otherwise the method :meth:`self.Save_Plots()`
97 | stores all plots (see :ref:`Data Export and Project Saving`). The axis labels are accessible via the **baseunit**
98 | and the Fonts are accessible via the function :meth:`pf.changefonts()`
99 | * The parameter **equal_energy_bin** can be set to a value which results in that the spectral plots are shown in enqual energy
100 | bins. This is useful for tracking vibrations and such. As of version 6.7.1 this is only happing for the RAW plotting.
101 |
102 | interactive Plotting
103 | ---------------------
104 |
105 | Interactive plot function that allows the interactive slicing of both time and wavelength. The main parameter of the object apply
106 |
107 |
108 |
109 | extended Raw plotting
110 | ---------------------
111 |
112 | :meth:`self.Plot_raw()` is an extended function. All the parameters are
113 | accessible (and need then to be set manually). This function also plots a single
114 | or multiple plots bzt setting the "plotting" parameter.
115 |
116 | There are even more detailed manipulations possible by using the
117 | separate plot functions:
118 |
119 | * for plotting kinetics at fixed wavelength: :func:`pf.plot1d()`
120 | * for plotting spectra at selected times :func:`pf.plot_time()`
121 | * for plotting the data matrix :func:`pf.plot2d()`
122 | * for plotting the 3 fit data matrix :func:`pf.plot2d_fit()`
123 | * for the SVD plots. :func:`pf.SVD()`
124 |
125 | Each of the functions allows to hand in an axis and thus plot multiple things
126 |
--------------------------------------------------------------------------------
/docs/source/Saving.rst:
--------------------------------------------------------------------------------
1 | Data Export and Project Saving
2 | ==============================
3 |
4 | Save_Plots
5 | -------------------
6 |
7 | Save Plots :meth:`self.Save_Plots()`
8 |
9 | Convenience function that calls both "Plot_RAW" and if possible
10 | "Plot_fit_output" and saves the plots. Effectively this is intented to
11 | be used at the end of a quick exploratory work to capture a status. The
12 | parameter are indentical to the two plotting functions and are handed
13 | through.
14 |
15 | Save_Powerpoint
16 | --------------------
17 |
18 | Save Plots as Powerpoint :meth:`self.Save_Powerpoint()`
19 |
20 | Convenience function that calls both "Plot_RAW" and if possible
21 | "Plot_fit_output" and saves the plots as "png".
22 |
23 | Then it creates a power point file with one slide for the RAW plots and
24 | one slide for the Fits.
25 | Effectively this is intented to be used at the end of the a quick
26 | exploratory work to capture a status and create a quick presention
27 | slide. The parameter are intentical to the plotting functions and are
28 | handed through. The additional switches save_RAW and save_Fit are
29 | convenience to allow for faster processing.
30 |
31 | If the "savetype" contains 'png', 'svg', or 'pdf' then a summary file is created
32 | that looks close to the powerpoint file.
33 |
34 | Saving of the project
35 | --------------------------------------
36 |
37 | Save Project as hdf5 :meth:`self.Save_project()`
38 |
39 | This software allows the saving of the TA project as a HDF5 file that
40 | can be reloaded. The HDF5 file contains all the set parameter as well as
41 | the fit results (if applicable) and the raw data. To reduce the space
42 | consumption we save ta.ds_ori and the parameter that are used to create
43 | ta.ds. If manual changes were made to ta.ds, these have to be stored
44 | externally. As only the obvious errors are filtered in ta.Filter_data
45 | this is can savely replace the original data File. We are also saving
46 | the arrival-time (chirp) correction in the file and restore the chirp
47 | corrected data ta.ds during the import. The import function understands
48 | the file type and re-creates the object.
49 |
50 | The one limitation to this method is the external fit function. If an
51 | external **ta.mod** is used, the save function stores the name and the
52 | documentation string of this function as a string. So after reloading of
53 | the analysis object the external function will have to be set with
54 | ta.mod=imported_function. The parameter of the fit are however stored.
55 | Only the filename and the path of the file can be changed during saving
56 | of the project. If left empty the path and filename of the original
57 | ASCII file is used.
58 |
59 | Save ASCII data
60 | ---------------------------
61 |
62 | Save/export data as ascii/text :meth:`self.Save_data()`
63 |
64 | This is a convenient function to export the data for use with other
65 | plotting programs, the chirp corrected data, all the slices defined by
66 | ta.rel_wave and ta.rel_time for both the fits and a the RAW data. The
67 | external options include: save_RAW,save_Fit while there is an
68 | automatic that recognizes if for example fit data is present, this
69 | switch allows the manual selection which datasets are stored.
70 | save_slices selected if the slices defined by ta.rel_wave and
71 | ta.rel_time are saved save_binned this switch chooses if the chirp
72 | corrected and rebinned dataset (ta.ds with ta.wavelength_nm_bin) is
73 | saved. If the ta.wavelength_nm_bin is None, this saves the chirp
74 | corrected RAW data. filename sets the basis filename that is used for
75 | all the files path this can be a full path or a simple string,
76 | defining a folder relative to the folder in ta.path. If the folder
77 | does not exist, it will be created, if it exists a file with exactly
78 | the same name will be overwritten without confirmation sep defines the
79 | separator user to separate different values. Standard is a "TAP". A good
80 | choice would also be a space or a comma, unless you are
81 | located in one of the countries that uses commas for decimal points.
82 | Decimals will be separated with "dots".
83 |
84 | This function by default also dumps a text file with the fit results
85 |
--------------------------------------------------------------------------------
/docs/source/Shaping.rst:
--------------------------------------------------------------------------------
1 | Shaping of Data
2 | ===============
3 |
4 | In the Following sections we discuss the parameter and values that are used to
5 | filter and shape measured data. In general all loaded data read is
6 | stored in the un-altered "ta.ori_ds". A second matrix for the same data
7 | is created named ta.ds.
8 |
9 | Only the function "Filter_data" works on the dataset ds_ori and ds.
10 | The chirp correction creates a new ds from ds_ori.
11 | The background correction is applied to ta.ds (unless a
12 | specific matrix is given´). All other parameter are only applied during a
13 | function and do not alter ta.ds. That means that in each of the
14 | plotting/fitting functions a local copy of the ta.ds is created (using
15 | the function sub_ds) to which all the shaping is applied.
16 |
17 | **The intended work flow is:**
18 |
19 | #. loading of data :ref:`Opening of data`
20 | #. Filtering of data :ref:`Bad data Filter`
21 | #. (optional) chirp correction :ref:`Arrival time correction`
22 | #. (optional) Background correction :ref:`Background subtraction`
23 | #. setting of parameter for fit :ref:`Data shaping settings that affect the fits`
24 | #. setting of parameter for plot :ref:`Plot shaping options without influence on the fitting`
25 | #. all the plotting/fitting. :ref:`Plotting functions` and :ref:`Fitting, Parameter optimization and Error estimation`
26 | #. saving/exporting :ref:`Data Export and Project Saving`
27 |
28 | The point 5 and 6 (Parameter) can be easily changed many times and a new plot/fit
29 | generated. Important to not is that the parameter are stored with the object.
30 | This means that a parameter that is explicitly set, will stay until it is
31 | overwritten or the object is fresh loaded. So if e.g. commenting out a certain
32 | parameter does not return the value to its Default "None". The Parameter needs
33 | to be set explicitly to::
34 |
35 | ta.intensity_range = 3e-3
36 | #ta.intensity_range = 3e-3 #no effect
37 | ta.intensity_range = None
38 |
39 | Often it is faster to reload the object by choosing "run all above" in the
40 | Notebook.
41 |
42 | Bad data Filter
43 | ---------------
44 |
45 | Filter bad data: :meth:`self.Filter_data()`
46 |
47 | In some cases there are bad data points or other strange things. NA
48 | values will normally be replaced by 0 during import and all data is
49 | converted into floats during import. In many recording software
50 | (including Pascher instruments) a specific **value** is used to indicate
51 | that something went wrong. This function filters everything bigger than
52 | this value as error. Real "NaN" values are filtered during the import of Data.
53 | There is the option to either drop the times that contain bad values or to replace
54 | bad values with a specific value. There is the option to put a uppervalue, lowervalue
55 | or a single value that is then used for upper and (as negative) for the lower value.
56 |
57 | If the filtering does not work, a manual way of filtering is
58 | ta.ds (the chirp corrected data) or ta.ds_ori[ta.ds_ori>20]=0 is the classical way to filter
59 |
60 | Arrival time correction
61 | -----------------------
62 |
63 | Correct arrival time (Chirp) :meth:`self.Cor_Chirp()`
64 |
65 | *Cor_Chirp* is a powerful Function to correct for a different arrival times of
66 | different wavelength (sometimes call chirp).
67 |
68 | In general if a file is opened for the first time this function is opening
69 | a plot and allows the user to select a number of points, which are then
70 | approximated with a 4th order polynomial and finally to select a point
71 | that is declared as time zero. The observed window as well as the intensities
72 | and the colour map can be chosen to enable a good correction. Here a fast
73 | iterating colour scheme such as "prism" is often a good choice. In all of the
74 | selections a left click selects, a right click removes the last point and
75 | a middle click (sometime appreviated by clicking left and right together)
76 | finishes the selection. If no middle click exists, the process
77 | automatically ends after max_points (40 preset).
78 |
79 | Note that scattercut, bordercut and intensity_range can be used to adjust intensity.
80 |
81 | After the first run the polynom is stored in self.fitcoeff, a new matrix
82 | calculated from self.ds_ori that is stored as self.ds and a file stored in the
83 | same location as the original data. The second time the function *Cor_Chirp* is
84 | run the function will find the file and apply the chirp correction automatically.
85 |
86 | If one does want to re-run the chirp correction the function *Man_Chirp* does
87 | not look for this file, but creates after finishing a new file.
88 |
89 | Alternatively the polynom or a filename can be given that load a chirp correction
90 | (e.g. from a different run with the same sample).
91 | The function *Cor_Chirp* selects in the order:
92 |
93 | #. "fitcoeff"
94 | #. "other files"
95 | #. "stored_file"
96 | #. call Man_Chirp (clicking by hand)
97 |
98 | Correct arrival time (Chirp) :meth:`self.Cor_Chirp()`
99 | Manual overwrite arrival time correction :meth:`self.Man_Chirp()`
100 |
101 | Background subtraction
102 | ----------------------
103 |
104 | Background correction: :meth:`self.Background()`
105 |
106 | This tool is one of two ways to remove a flat background from the data (typically seen before t=0).
107 | This tool averages for each measured wavelength separately the values from 'lowlimit' to 'uplimit' and
108 | subtracts it from the data. The low and uplimit can be set
109 | anywhere to substract any background. (so one could e.g. substract a product
110 | instead) It is important to note that many problems during measurements might
111 | be visible in the data before time zero. So I recommend to first
112 | plot without background correction and only after this inspection
113 | apply the background correction.
114 | The fit function has its own way to calculcate and apply a background
115 | That could be used instead (but making the fit less stable)
116 |
117 | Data shaping settings that affect the fits
118 | ------------------------------------------
119 |
120 | in general the data is handled in each of the plotting/fitting functions
121 | separately. In each function a copy of the matrix with the limitation
122 | below is created.
123 | A number of parameter cut and potentially rebin the raw measured data and as such affet the fit.
124 | The typical workflow would therefore be to adjust these parameter before the fitting stage using the
125 | RAW plotted fits as a feedback.
126 |
127 | * Cut the outside limits of the spectrum: *Bordercut*
128 | * Blank one or multiple regions in the spectrum (e.g. suppress scatter) *Scattercut*
129 | * Cut the outside of the time axis: *timelimits*
130 | * Blank one or multiple temporal regions (e.g. around t=0) *ignore_time_region*
131 | * rebin the temporal axis (useful for e.g. steady state long term UV-vis data) *time_bin*
132 | * rebin the spectral axis (useful for prism based spectrometer) *wave_nm_bin*
133 |
134 | For further details and examples see: :meth:`self.__make_standard_parameter()`
135 | or e.g. the general plotting function :meth:`pf.plot_raw()`.
136 |
137 | The parameter that only change the plots are discussed in :ref:`Plot shaping options without influence on the fitting`
138 |
--------------------------------------------------------------------------------
/docs/source/_static/KiMoPack_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/docs/source/_static/KiMoPack_logo.png
--------------------------------------------------------------------------------
/docs/source/_static/custom.css:
--------------------------------------------------------------------------------
1 | .wy-nav-content {
2 | max-width: 100% !important;
3 | }
--------------------------------------------------------------------------------
/docs/source/_static/structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/erdzeichen/KiMoPack/d43d001221f1ae94a1cefed870140b1cdd5c1542/docs/source/_static/structure.png
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath(os.sep.join(['..','..','src','KiMoPack'])))
16 | sys.path.insert(0, os.path.abspath(os.sep.join(['src','KiMoPack'])))
17 | sys.path.insert(0, os.path.abspath(os.sep.join([os.getcwd(),'src','KiMoPack'])))
18 |
19 | # -- Project information -----------------------------------------------------
20 |
21 | project = 'KiMoPack'
22 | copyright = '2022, Jens Uhlig'
23 | author = 'Jens Uhlig'
24 |
25 | # The full version, including alpha/beta/rc tags
26 | release = '7.4.9'
27 |
28 | # -- General configuration ---------------------------------------------------
29 |
30 | # Add any Sphinx extension module names here, as strings. They can be
31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 | # ones.
33 | extensions = ['sphinx.ext.napoleon','sphinx.ext.autosectionlabel','sphinx.ext.autodoc','sphinx.ext.viewcode','sphinx.ext.autosummary']
34 | napoleon_numpy_docstring = True
35 | napoleon_include_init_with_doc = True
36 | napoleon_use_ivar = False
37 | napoleon_use_param = True
38 | napoleon_use_rtype = True
39 | autosummary_generate = True
40 | numpydoc_show_class_members = True
41 |
42 | # Add any paths that contain templates here, relative to this directory.
43 | templates_path = ['_templates']
44 |
45 | # List of patterns, relative to source directory, that match files and
46 | # directories to ignore when looking for source files.
47 | # This pattern also affects html_static_path and html_extra_path.
48 | exclude_patterns = []
49 |
50 |
51 | # -- Options for HTML output -------------------------------------------------
52 |
53 | # The theme to use for HTML and HTML Help pages. See the documentation for
54 | # a list of builtin themes.
55 | #
56 | html_theme = 'sphinx_rtd_theme'
57 |
58 | # Add any paths that contain custom static files (such as style sheets) here,
59 | # relative to this directory. They are copied after the builtin static files,
60 | # so a file named "default.css" will overwrite the builtin "default.css".
61 | html_static_path = ['_static']
62 | html_css_files = ['custom.css',]
63 | html_logo = '_static/KiMoPack_logo.png'
64 | html_theme_options = {
65 | 'display_version': True,
66 | 'prev_next_buttons_location': 'bottom',
67 | 'style_external_links': False,
68 | 'vcs_pageview_mode': '',
69 | 'style_nav_header_background': 'white',
70 | # Toc options
71 | 'collapse_navigation': True,
72 | 'sticky_navigation': True,
73 | 'navigation_depth': 4,
74 | 'includehidden': True,
75 | 'titles_only': False,
76 | 'logo_only': False,
77 | }
--------------------------------------------------------------------------------
/docs/source/genindex.rst:
--------------------------------------------------------------------------------
1 | Function Index
2 | ==================
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. KiMoPack documentation master file, created by
2 | sphinx-quickstart on Wed Aug 11 23:07:58 2021.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to KiMoPack's documentation!
7 | ======================================
8 |
9 |
10 | .. toctree::
11 | :maxdepth: 3
12 | :caption: Contents:
13 |
14 | Introduction
15 | Installation
16 | Main_tasks
17 | Opening
18 | Shaping
19 | Plotting
20 | Fitting
21 | Comparing
22 | Saving
23 | Changelog
24 | genindex
25 | plot_func
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | code_folder
2 | ===========
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | plot_func
8 |
--------------------------------------------------------------------------------
/docs/source/plot_func.rst:
--------------------------------------------------------------------------------
1 | KiMoPack - Functions
2 | =====================
3 |
4 | .. autoclass:: plot_func.TA
5 | :members: Background,Compare_DAC,Compare_at_time,Compare_at_wave,Copy,Cor_Chirp,Filter_data,Fit_Global,Man_Chirp,Plot_RAW,Plot_fit_output,Save_Plots,Save_Powerpoint,Save_data,Save_project
6 | :private-members: __make_standard_parameter,__read_ascii_data,__init__
7 |
8 | .. automodule:: plot_func
9 | :members: GUI_open,SVD,Summarize_scans,build_c,changefonts,fill_int,plot1d,plot2d,plot2d_fit,plot_fit_output,plot_raw,plot_time,s2_vs_smin2,Species_Spectra,err_func,err_func_multi,norm,Frame_golay
10 |
11 |
--------------------------------------------------------------------------------
/docs/source/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx==4.2.0
2 | sphinx_rtd_theme==1.0.0
3 | readthedocs-sphinx-search==0.3.2
4 | pandas[pytables,tabulate]
5 | h5py>=3.1
6 | matplotlib>=3.3
7 | numpy>=1.19
8 | lmfit>=1.0
9 | python-pptx>=0.6.18
10 | scipy>=1.5
11 | pathlib
12 | qtconsole>=5
13 | QtPy
--------------------------------------------------------------------------------
/environment.txt:
--------------------------------------------------------------------------------
1 | python>=3.8
2 | tables
3 | tabulate
4 | pandas>=1.4
5 | h5py>=3.1
6 | matplotlib>=3.3
7 | numpy>=1.19
8 | lmfit>=1.0
9 | python-pptx>=0.6.18
10 | scipy>=1.5
11 | pathlib
12 | urllib3
13 | shutil
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "setuptools>=42",
4 | "wheel",
5 | "setuptools_scm>=6.2"]
6 |
7 | build-backend = "setuptools.build_meta"
8 |
9 | [tool.setuptools_scm]
10 | write_to = "src/KiMoPack/_version.py"
11 |
12 | [project]
13 | name = "KiMoPack"
14 | authors = [{name = "Jens Uhlig", email = "jens.uhlig@chemphys.lu.se"}]
15 | description = "A comprehensive package for the analysis of kinetic data."
16 | readme = "README.rst"
17 | requires-python = ">=3.8"
18 | dynamic= ["version"]
19 |
20 | classifiers =[
21 | "Programming Language :: Python :: 3",
22 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
23 | "Operating System :: OS Independent",
24 | "Development Status :: 5 - Production/Stable",
25 | "Natural Language :: English",
26 | "Topic :: Scientific/Engineering :: Chemistry",
27 | "Topic :: Scientific/Engineering :: Information Analysis",
28 | "Topic :: Scientific/Engineering :: Physics",
29 | "Framework :: Jupyter"]
30 | dependencies =[
31 | "pandas>=1.4",
32 | "tables",
33 | "tabulate",
34 | "h5py>=3.1",
35 | "matplotlib>=3.3",
36 | "numpy>=1.19",
37 | "lmfit>=1.0",
38 | "scipy>=1.5",
39 | "pathlib",
40 | "urllib3"]
41 |
42 | [project.urls]
43 | "Bug Tracker"="https://github.com/erdzeichen/KiMoPack/issues"
44 | "Repository"="https://github.com/erdzeichen/KiMoPack"
45 | "Homepage"="https://www.chemphys.lu.se/research/projects/kimopack/"
46 | "Documentation"="https://kimopack.readthedocs.io/"
47 |
48 | [tool.setuptools]
49 | package-dir = {""="src"}
50 | packages = ["KiMoPack"]
51 |
--------------------------------------------------------------------------------
/src/KiMoPack/__init__.py:
--------------------------------------------------------------------------------
1 | import urllib3
2 | import sys
3 | import os
4 | import pathlib
5 | import shutil
6 | from pathlib import Path
7 |
8 | def check_folder(path = None, current_path = None, filename = None):
9 | '''Helper function using robust path determination.\n
10 | In any case if a valif file name is given it is attached to the total path\n
11 | The path can be string or windows/linux path or pure path or byte type paths.\n
12 | paths that do not exists (including parents) are created\n
13 | 1. if path is given absolute, it is returned\n_colors
14 | 2. if path is a string (relative) the current_path + path is returned.\n
15 | 3. if current_path is not absolute or None, the current working directory is assumed as path.\n
16 | 4. IF all is None, the current working directory is returned
17 |
18 | Parameters
19 | -----------
20 |
21 | path : str, purePath, absolute or relative, optional
22 | the final part of the path used
23 |
24 | current_path : None, str, purePath, absolute, optional
25 | path that sits before the "path variable, is filled with current working directory if left None
26 |
27 | filename: None, str, optional
28 | attached after path and returned if not None
29 |
30 | '''
31 |
32 | if isinstance(path,bytes):
33 | path = '%s'%path
34 | if path is not None:
35 | path = pathlib.Path(path)
36 |
37 | if isinstance(current_path, bytes):
38 | current_path = '%s'%current_path
39 | if current_path is not None:
40 | current_path=pathlib.Path(current_path)
41 |
42 | if isinstance(filename, bytes):
43 | filename='%s'%filename
44 | if filename is not None:
45 | filename = pathlib.Path(filename)
46 | if path is None:
47 | if current_path is None:
48 | directory = Path.cwd()
49 | elif current_path.is_absolute():
50 | directory=current_path
51 | else:
52 | print('attention, current_path was given but not absolute, replaced by cwd')
53 | directory = Path.cwd()
54 | elif path.is_absolute():
55 | directory = path
56 | else:
57 | if current_path is None:
58 | directory = Path.cwd().joinpath(path)
59 | elif current_path.is_absolute():
60 | directory = current_path.joinpath(path)
61 | else:
62 | print('attention, current_path was given but not absolute, replaced by cwd')
63 | directory = Path.cwd().joinpath(path)
64 | directory.mkdir( parents=True, exist_ok=True)
65 | if filename is None:
66 | return directory
67 | else:
68 | return directory.joinpath(filename)
69 | def download_notebooks(libraries_only=False):
70 | '''function loads the workflow notebooks into the active folder
71 | if libraries_only is set to True, only the function library and the import library are loaded'''
72 | http = urllib3.PoolManager()
73 | list_of_tools=['Function_library_overview.pdf',
74 | 'function_library.py',
75 | 'import_library.py',
76 | 'TA_Advanced_Fit.ipynb',
77 | 'TA_comparative_plotting_and_data_extraction.ipynb',
78 | 'TA_Raw_plotting.ipynb',
79 | 'TA_Raw_plotting_and_Simple_Fit.ipynb',
80 | 'TA_single_scan_handling.ipynb',
81 | 'Streak_camera_analysis.ipynb',
82 | 'XES_Raw_plotting_and_Simple_Fit.ipynb']
83 | print('Now downloading the workflow tools')
84 | for i,f in enumerate(list_of_tools):
85 | url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Workflow_tools/%s"%f
86 | print('Downloading Workflow Tools/%s'%f)
87 | with open(check_folder(path = 'Workflow_tools', current_path = os.getcwd(), filename = f), 'wb') as out:
88 | r = http.request('GET', url, preload_content=False)
89 | shutil.copyfileobj(r, out)
90 | if libraries_only and i==2:
91 | break
92 | def download_all(single_tutorial=None):
93 | ''' function loads workflow notebooks and example files and tutorials'''
94 | http = urllib3.PoolManager()
95 | if single_tutorial is None:
96 | download_notebooks()
97 | print('Now downloading the workflow tools and tutorials')
98 | else:
99 | download_notebooks(libraries_only=True)
100 | print('Libraries downloaded')
101 | list_of_example_data=['sample_1_chirp.dat',
102 | 'Sample_2_chirp.dat',
103 | 'sample_1.hdf5',
104 | 'sample_2.hdf5',
105 | 'Sample_1.SIA',
106 | 'Sample_2.SIA',
107 | 'XES_diff.SIA',
108 | 'XES_on.SIA',
109 | 'FeCM02-266nm-4mw-QB390-t6-G63-w450-s150-556ms-E100.dat',
110 | 'FeCM02-266nm-4mw-QB390-t6-G63-w450-s150-556ms-E100_chirp.dat']
111 | print('Now downloading the example files')
112 | if (single_tutorial is None) or (single_tutorial == 'workflow'): #we do not use this to download data for Colab
113 | for f in list_of_example_data:
114 | url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Workflow_tools/Data/%s"%f
115 | print('Downloading Workflow Tools/Data/%s'%f)
116 | with open(check_folder(path = 'Workflow_tools'+os.sep+'Data', current_path = os.getcwd(), filename = f), 'wb') as out:
117 | r = http.request('GET', url, preload_content=False)
118 | shutil.copyfileobj(r, out)
119 |
120 | list_of_tutorials=['function_library.py',
121 | 'Function_library_overview.pdf',
122 | 'import_library.py',
123 | 'KiMoPack_tutorial_0_Introduction.ipynb',
124 | 'KiMoPack_tutorial_0_Introduction_Compact.ipynb',
125 | 'KiMoPack_tutorial_1_Fitting.ipynb',
126 | 'KiMoPack_tutorial_2_Fitting.ipynb',
127 | 'KiMoPack_tutorial_3_CompareFit.ipynb',
128 | 'KiMoPack_tutorial_4_ScanHandling.ipynb',
129 | 'KiMoPack_tutorial_5_MultiModal.ipynb']
130 | if single_tutorial is None: #we do not use this to download data for Colab
131 | print('Now downloading tutorials')
132 | for f in list_of_tutorials:
133 | url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Tutorial_Notebooks/%s"%f
134 | print('Downloading tutorial %s'%f)
135 | with open(check_folder(path = 'Tutorial_Notebooks', current_path = os.getcwd(), filename = f), 'wb') as out:
136 | r = http.request('GET', url, preload_content=False)
137 | shutil.copyfileobj(r, out)
138 | tutorial_data={'Compare':['TA_Ru-dppz_400nm_DCM_paral.hdf5','TA_Ru-dppz_400nm_H2O_paral.hdf5','UVvis_SEC_Rudppz_ACN.dat'],
139 | 'Master':['TA_Ru-dppz_400nm_ACN_paral.hdf5'],
140 | 'Fitting-1':['TA_Ru-dppz_400nm_ACN.SIA','TA_Ru-dppz_400nm_ACN_chirp.dat','TA_Ru-dppz_400nm_DCM.SIA','TA_Ru-dppz_400nm_DCM_chirp.dat','TA_Ru-dppz_400nm_H2O.SIA','TA_Ru-dppz_400nm_H2O_chirp.dat'],
141 | 'Fitting-2':['TA_Ru-dppz_400nm_ACN.SIA','TA_Ru-dppz_400nm_ACN_chirp.dat'],
142 | 'Introduction':['catalysis1.SIA','catalysis2.SIA','con_1.SIA','con_1_solved.hdf5','con_2.SIA','con_2_chirp.dat','con_3.SIA','con_4.SIA','con_5.SIA','con_6.SIA','con_6_chirp.dat','full_consecutive_fit.hdf5','full_consecutive_fit_with_GS.hdf5','sample_1_chirp.dat'],
143 | 'Scan':['ACN_001.SIA','ACN_002.SIA','ACN_003.SIA','ACN_004.SIA','ACN_005.SIA','ACN_006.SIA','ACN_007.SIA','ACN_008.SIA','ACN_009.SIA','TA_Ru-dppz_400nm_ACN_mean.SIA','TA_Ru-dppz_400nm_ACN_mean_chirp.dat'],
144 | 'MultiModal':['combined_optical_spectrum.SIA','XES_on.SIA']}
145 | url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Tutorial_Notebooks/Data"
146 | for key in tutorial_data.keys():
147 | if single_tutorial is not None: #this is a shortcut to download data fror Colab use
148 | if not key==single_tutorial:
149 | continue
150 | for f in tutorial_data[key]:
151 | if 'Master' in key:
152 | url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Tutorial_Notebooks/Data/Compare/Master/%s"%f
153 | with open(check_folder(path = os.sep.join(['Tutorial_Notebooks','Data','Compare','Master']), current_path = os.getcwd(), filename = f), 'wb') as out:
154 | r = http.request('GET', url, preload_content=False)
155 | shutil.copyfileobj(r, out)
156 | else:
157 | url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Tutorial_Notebooks/Data/%s/%s"%(key,f)
158 | with open(check_folder(path = os.sep.join(['Tutorial_Notebooks','Data',key]), current_path = os.getcwd(), filename = f), 'wb') as out:
159 | r = http.request('GET', url, preload_content=False)
160 | shutil.copyfileobj(r, out)
161 | tutorial_images=['Cor_Chirp.gif','Fig1_parallel_model.png','Fig2_consecutive_model.png','Fig3_complex_model.png','Intro_tutorial.png','Model_selection.jpg']
162 | if single_tutorial is None:
163 | for f in tutorial_images:
164 | url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Tutorial_Notebooks/img/%s"%f
165 | with open(check_folder(path = os.sep.join(['Tutorial_Notebooks','img']), current_path = os.getcwd(), filename = f), 'wb') as out:
166 | r = http.request('GET', url, preload_content=False)
167 | shutil.copyfileobj(r, out)
168 |
--------------------------------------------------------------------------------