├── .coveragerc ├── .github └── workflows │ ├── python-publish.yml │ └── python-test.yml ├── .gitignore ├── .readthedocs.yml ├── LICENSE ├── README.md ├── doc ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── conf.py │ ├── examples │ ├── consistency_test.ipynb │ ├── examples.rst │ ├── phase_noise.ipynb │ ├── rabi_rwa.ipynb │ ├── t1_pure_depolarizing.ipynb │ ├── t2_markovian_noise.ipynb │ ├── t2_non_markovian_noise.ipynb │ └── t2_star_dephasing.ipynb │ ├── index.rst │ ├── qopt.rst │ ├── qopt_features │ ├── energy_spectra_analyser.ipynb │ ├── entanglement_fidelity.ipynb │ ├── filter_functions_basic.ipynb │ ├── monte_carlo_experiments.ipynb │ ├── numerics.ipynb │ ├── open_quantum_systems.ipynb │ ├── operator_class.ipynb │ ├── optimization.ipynb │ ├── parallelization.ipynb │ ├── pulse_parameterization.ipynb │ ├── qopt_features.rst │ ├── schroedinger_solver.ipynb │ └── transfer_function.ipynb │ ├── static │ └── QoptStructure.png │ └── stored │ ├── precession_times_1.npy │ ├── precession_times_2.npy │ ├── probabilities_echo_2.npy │ └── probabilities_fid_1.npy ├── environment.yml ├── feature_requests.md ├── patch_notes.md ├── pyproject.toml ├── pytest.ini ├── qopt ├── __init__.py ├── amplitude_functions.py ├── analyser.py ├── cost_functions.py ├── data_container.py ├── energy_spectrum.py ├── examples │ └── rabi_driving │ │ ├── lab_frame_setup.py │ │ ├── optimization_phase_control.ipynb │ │ ├── rabi_xy_setup.py │ │ └── setup.py ├── matrix.py ├── noise.py ├── optimization_data.py ├── optimize.py ├── parallel.py ├── performance_statistics.py ├── plotting.py ├── simulator.py ├── solver_algorithms.py ├── transfer_function.py └── util.py ├── qopt_tests ├── integration_tests │ └── test_t1_comparison.py ├── test_plan.md ├── testutil.py └── unittests │ ├── test_fidelity_computing.py │ ├── test_matrix.py │ ├── test_noise.py │ ├── test_solvers.py │ └── test_transfer_functions.py ├── requirements.txt ├── setup.cfg ├── setup.py └── temp ├── GaAsExchangeCoupledCerf.py ├── decoherence_checks.ipynb ├── ff_testutil.py ├── reproduce_previous_infids.py ├── tf_test.ipynb └── unrefactored_integration_tests ├── fidelity_computing_integration_test.py ├── pure_dephasing_fidelity.py ├── rabi_driving_lab_frame.py ├── rabi_driving_rotating_frame.py ├── test_gradients.py └── test_optimizers.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | 4 | [report] 5 | omit = 6 | */tests/* 7 | */__init__.py -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | deploy: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v3 12 | with: 13 | ref: release 14 | - name: Set up Python 15 | uses: actions/setup-python@v1 16 | with: 17 | python-version: '3.7' 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | pip install setuptools wheel twine 22 | - name: Build and publish 23 | env: 24 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 25 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 26 | run: | 27 | python setup.py sdist bdist_wheel 28 | twine upload dist/* 29 | -------------------------------------------------------------------------------- /.github/workflows/python-test.yml: -------------------------------------------------------------------------------- 1 | name: Run Unittests 2 | 3 | on: 4 | push: 5 | branches: [ master, release ] 6 | pull_request: 7 | branches: [ master, release ] 8 | 9 | jobs: 10 | test: 11 | name: Run Unittests 12 | runs-on: windows-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | python-version: ['3.7', '3.8', '3.9', '3.10'] 17 | 18 | steps: 19 | - name: Checkout repository 20 | uses: actions/checkout@v3 21 | # The action checks out the branch, which triggered the event by default. 22 | 23 | - name: Set up Python 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - name: Install Dependencies 29 | run: | 30 | python -m pip install --upgrade pip 31 | python -m pip install .[qopt_tests] 32 | python -m pip install pytest-cov 33 | 34 | - name: Run the tests 35 | run: | 36 | python -m pytest qopt_tests --cov=qopt --cov-report=xml 37 | # it is important to use python -m because it adds the current directory to the path! 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .spyproject/ 3 | qopt/__pycache__/ 4 | build/ 5 | qsim.egg-info/ 6 | dist/ 7 | qopt.egg-info/ 8 | quantum_computer.egg-info/ 9 | qopt/examples/ff_benchmarking/__pycache__/ 10 | __pycache__/setup.cpython-37.pyc 11 | qopt/examples/rabi_driving/__pycache__/lab_frame_setup.cpython-37.pyc 12 | **/__pycache__/ 13 | .coverage 14 | coverage.xml 15 | temp/File Name 16 | temp/tensorflow/ 17 | doc/latexbuild/ 18 | ~/tensorflow_datasets/ 19 | logs/20220823-190009/train/events.out.tfevents.1661274009.DESKTOP-29V7IHI.12928.0.v2 20 | doc/short_latex_build/ 21 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: doc/source/conf.py 11 | 12 | conda: 13 | environment: environment.yml 14 | 15 | # Optionally set the version of Python and requirements required to build your docs 16 | python: 17 | version: 3.7 18 | system_packages: true 19 | install: 20 | - method: pip 21 | path: . 22 | extra_requirements: 23 | - doc 24 | #- requirements: doc/requirements.txt 25 | #- requirements: requirements.txt -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # qopt: A Simulation and Quantum Optimal Control Package 2 | [![Build Status](https://github.com/qutech/qopt/actions/workflows/python-test.yml/badge.svg)](https://github.com/qutech/qopt/actions/workflows/python-test.yml) 3 | [![Documentation Status](https://img.shields.io/readthedocs/qopt)](https://qopt.readthedocs.io/en/latest/) 4 | [![PyPI version](https://img.shields.io/pypi/v/qopt)](https://pypi.org/project/qopt/) 5 | [![License](https://img.shields.io/github/license/qutech/qopt)](https://github.com/qutech/qopt/blob/master/LICENSE) 6 | 7 | ## Documentation 8 | The documentation can be found on 9 | [readthedocs](https://qopt.readthedocs.io/en/latest/index.html). 10 | It features an API documentation and an introduction in the 11 | form of jupyter notebooks demonstrating how to utilize the package. A 12 | complementary theoretical introduction is given in the 13 | qopt paper on 14 | [Phys. Rev. Applied](https://doi.org/10.1103/PhysRevApplied.17.034036) and an 15 | older version can be found on the 16 | [Arxiv](https://arxiv.org/abs/2110.05873). 17 | 18 | ## Applications 19 | 20 | We set up another open-source repository named 21 | [qopt-applications](https://github.com/qutech/qopt-applications) to save and 22 | exchange quantum simulation and optimal control applications implemented using 23 | qopt. 24 | 25 | ## Introduction 26 | Realistic modelling of qubit systems including noise and constraints imposed 27 | by control hardware is required for performance prediction and control 28 | optimization of quantum processors. 29 | qopt is a software framework for simulating qubit dynamics and 30 | robust quantum optimal control considering common experimental situations. 31 | It supports modelling of open and closed qubit systems with a focus on the 32 | simulation of realistic noise characteristics and experimental constraints. 33 | Specifically, the influence of noise can be calculated using Monte Carlo 34 | methods, effective master equations or with the filter function formalism, 35 | enabling the investigation and mitigation of auto-correlated noise. In 36 | addition, limitations of control electronics including finite bandwidth 37 | effects can be considered. The calculation of gradients based on analytic 38 | results is implemented to facilitate the efficient optimization of control 39 | pulses. The software is published under an open source license, well-tested 40 | and features a detailed documentation. 41 | 42 | ## Installation 43 | 44 | Qopt is available on github and the python index Pypi. 45 | To install qopt directly from the python index, you can use pip: 46 | 47 | pip install qopt 48 | 49 | or alternatively download the source code, navigate to the folder containing 50 | qopt and install by 51 | 52 | pip install . 53 | 54 | or append the command -e to install qopt with symlinks 55 | 56 | pip -e install . 57 | 58 | The -e stands for edible as the symlinks allow you to make local changes to 59 | the sourcecode. 60 | 61 | ### Optional packages 62 | 63 | If you wish to use the plotting features of the quantum toolbox in pythen 64 | (QuTiP), then you need to install additional dependencies: 65 | 66 | conda install cython pytest pytest-cov jupyter 67 | 68 | Then open a conda forge channel: 69 | 70 | conda config --append channels conda-forge 71 | 72 | and install QuTiP: 73 | 74 | conda install qutip 75 | 76 | Another optional package is simanneal for the use of simulated annealing for 77 | discrete optimization: 78 | 79 | conda install simanneal 80 | 81 | ## Feature Requests 82 | 83 | If you require an additional feature for your work, then please open an issue 84 | on github or reach out to me via e-mail j.teske@fz-juelich.de. 85 | There is a list in markdown format with possible extensions to the package. 86 | 87 | ## Patch Notes 88 | 89 | You can find the patch Notes in a markdown file in the root folder of the 90 | package. You can also find it on 91 | [github](https://github.com/qutech/qopt/blob/master/patch_notes.md). 92 | 93 | ## Citing 94 | 95 | If you are using qopt for your work then please cite the 96 | [qopt paper](https://doi.org/10.1103/PhysRevApplied.17.034036), as the funding 97 | of the development depends on the public impact. 98 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXAPIDOC = sphinx-apidoc 8 | SPHINXPROJ = qopt 9 | SOURCEDIR = source 10 | BUILDDIR = build 11 | 12 | # User-friendly check for sphinx-build 13 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 14 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 15 | endif 16 | 17 | # Internal variables. 18 | PAPEROPT_a4 = -D latex_paper_size=a4 19 | PAPEROPT_letter = -D latex_paper_size=letter 20 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 21 | # the i18n builder cannot share the environment and doctrees with the others 22 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 23 | 24 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 25 | 26 | help: 27 | @echo "Please use \`make ' where is one of" 28 | @echo " html to make standalone HTML files" 29 | @echo " dirhtml to make HTML files named index.html in directories" 30 | @echo " singlehtml to make a single large HTML file" 31 | @echo " pickle to make pickle files" 32 | @echo " json to make JSON files" 33 | @echo " htmlhelp to make HTML files and a HTML help project" 34 | @echo " qthelp to make HTML files and a qthelp project" 35 | @echo " applehelp to make an Apple Help Book" 36 | @echo " devhelp to make HTML files and a Devhelp project" 37 | @echo " epub to make an epub" 38 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 39 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 40 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 41 | @echo " text to make text files" 42 | @echo " man to make manual pages" 43 | @echo " texinfo to make Texinfo files" 44 | @echo " info to make Texinfo files and run them through makeinfo" 45 | @echo " gettext to make PO message catalogs" 46 | @echo " changes to make an overview of all changed/added/deprecated items" 47 | @echo " xml to make Docutils-native XML files" 48 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 49 | @echo " linkcheck to check all external links for integrity" 50 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 51 | @echo " coverage to run coverage check of the documentation (if enabled)" 52 | 53 | clean: 54 | rm -rf $(BUILDDIR)/* 55 | 56 | extractapi: 57 | $(SPHINXAPIDOC) --no-toc -o source ../filter_functions 58 | 59 | html: extractapi 60 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 63 | 64 | dirhtml: extractapi 65 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 66 | @echo 67 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 68 | 69 | singlehtml: extractapi 70 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 71 | @echo 72 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 73 | 74 | pickle: extractapi 75 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 76 | @echo 77 | @echo "Build finished; now you can process the pickle files." 78 | 79 | json: extractapi 80 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 81 | @echo 82 | @echo "Build finished; now you can process the JSON files." 83 | 84 | htmlhelp: extractapi 85 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 86 | @echo 87 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 88 | ".hhp project file in $(BUILDDIR)/htmlhelp." 89 | 90 | qthelp: extractapi 91 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 92 | @echo 93 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 94 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 95 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/filter_functions.qhcp" 96 | @echo "To view the help file:" 97 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/filter_functions.qhc" 98 | 99 | applehelp: extractapi 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | devhelp: extractapi 108 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 109 | @echo 110 | @echo "Build finished." 111 | @echo "To view the help file:" 112 | @echo "# mkdir -p $$HOME/.local/share/devhelp/filter_functions" 113 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/filter_functions" 114 | @echo "# devhelp" 115 | 116 | epub: extractapi 117 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 118 | @echo 119 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 120 | 121 | latex: extractapi 122 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 123 | @echo 124 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 125 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 126 | "(use \`make latexpdf' here to do that automatically)." 127 | 128 | latexpdf: extractapi 129 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 130 | @echo "Running LaTeX files through pdflatex..." 131 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 132 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 133 | 134 | latexpdfja: extractapi 135 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 136 | @echo "Running LaTeX files through platex and dvipdfmx..." 137 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 138 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 139 | 140 | text: extractapi 141 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 142 | @echo 143 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 144 | 145 | man: extractapi 146 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 147 | @echo 148 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 149 | 150 | texinfo: extractapi 151 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 152 | @echo 153 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 154 | @echo "Run \`make' in that directory to run these through makeinfo" \ 155 | "(use \`make info' here to do that automatically)." 156 | 157 | info: extractapi 158 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 159 | @echo "Running Texinfo files through makeinfo..." 160 | make -C $(BUILDDIR)/texinfo info 161 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 162 | 163 | gettext: extractapi 164 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 165 | @echo 166 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 167 | 168 | changes: extractapi 169 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 170 | @echo 171 | @echo "The overview file is in $(BUILDDIR)/changes." 172 | 173 | linkcheck: extractapi 174 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 175 | @echo 176 | @echo "Link check complete; look for any errors in the above output " \ 177 | "or in $(BUILDDIR)/linkcheck/output.txt." 178 | 179 | doctest: extractapi 180 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 181 | @echo "Testing of doctests in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/doctest/output.txt." 183 | 184 | coverage: extractapi 185 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 186 | @echo "Testing of coverage in the sources finished, look at the " \ 187 | "results in $(BUILDDIR)/coverage/python.txt." 188 | 189 | xml: extractapi 190 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 191 | @echo 192 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 193 | 194 | pseudoxml: extractapi 195 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 196 | @echo 197 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 198 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source 10 | set I18NSPHINXOPTS=%SPHINXOPTS% source 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | sphinx-apidoc --no-toc -o source ../qopt 75 | 76 | if "%1" == "html" ( 77 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 78 | if errorlevel 1 exit /b 1 79 | echo. 80 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 81 | goto end 82 | ) 83 | 84 | if "%1" == "dirhtml" ( 85 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 86 | if errorlevel 1 exit /b 1 87 | echo. 88 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 89 | goto end 90 | ) 91 | 92 | if "%1" == "singlehtml" ( 93 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 94 | if errorlevel 1 exit /b 1 95 | echo. 96 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 97 | goto end 98 | ) 99 | 100 | if "%1" == "pickle" ( 101 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 102 | if errorlevel 1 exit /b 1 103 | echo. 104 | echo.Build finished; now you can process the pickle files. 105 | goto end 106 | ) 107 | 108 | if "%1" == "json" ( 109 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 110 | if errorlevel 1 exit /b 1 111 | echo. 112 | echo.Build finished; now you can process the JSON files. 113 | goto end 114 | ) 115 | 116 | if "%1" == "htmlhelp" ( 117 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 118 | if errorlevel 1 exit /b 1 119 | echo. 120 | echo.Build finished; now you can run HTML Help Workshop with the ^ 121 | .hhp project file in %BUILDDIR%/htmlhelp. 122 | goto end 123 | ) 124 | 125 | if "%1" == "qthelp" ( 126 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 127 | if errorlevel 1 exit /b 1 128 | echo. 129 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 130 | .qhcp project file in %BUILDDIR%/qthelp, like this: 131 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\qopt.qhcp 132 | echo.To view the help file: 133 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\qopt.ghc 134 | goto end 135 | ) 136 | 137 | if "%1" == "devhelp" ( 138 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 139 | if errorlevel 1 exit /b 1 140 | echo. 141 | echo.Build finished. 142 | goto end 143 | ) 144 | 145 | if "%1" == "epub" ( 146 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 147 | if errorlevel 1 exit /b 1 148 | echo. 149 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 150 | goto end 151 | ) 152 | 153 | if "%1" == "latex" ( 154 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 155 | if errorlevel 1 exit /b 1 156 | echo. 157 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 158 | goto end 159 | ) 160 | 161 | if "%1" == "latexpdf" ( 162 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 163 | cd %BUILDDIR%/latex 164 | make all-pdf 165 | cd %~dp0 166 | echo. 167 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 168 | goto end 169 | ) 170 | 171 | if "%1" == "latexpdfja" ( 172 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 173 | cd %BUILDDIR%/latex 174 | make all-pdf-ja 175 | cd %~dp0 176 | echo. 177 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 178 | goto end 179 | ) 180 | 181 | if "%1" == "text" ( 182 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 183 | if errorlevel 1 exit /b 1 184 | echo. 185 | echo.Build finished. The text files are in %BUILDDIR%/text. 186 | goto end 187 | ) 188 | 189 | if "%1" == "man" ( 190 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 191 | if errorlevel 1 exit /b 1 192 | echo. 193 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 194 | goto end 195 | ) 196 | 197 | if "%1" == "texinfo" ( 198 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 199 | if errorlevel 1 exit /b 1 200 | echo. 201 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 202 | goto end 203 | ) 204 | 205 | if "%1" == "gettext" ( 206 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 207 | if errorlevel 1 exit /b 1 208 | echo. 209 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 210 | goto end 211 | ) 212 | 213 | if "%1" == "changes" ( 214 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 215 | if errorlevel 1 exit /b 1 216 | echo. 217 | echo.The overview file is in %BUILDDIR%/changes. 218 | goto end 219 | ) 220 | 221 | if "%1" == "linkcheck" ( 222 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 223 | if errorlevel 1 exit /b 1 224 | echo. 225 | echo.Link check complete; look for any errors in the above output ^ 226 | or in %BUILDDIR%/linkcheck/output.txt. 227 | goto end 228 | ) 229 | 230 | if "%1" == "doctest" ( 231 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 232 | if errorlevel 1 exit /b 1 233 | echo. 234 | echo.Testing of doctests in the sources finished, look at the ^ 235 | results in %BUILDDIR%/doctest/output.txt. 236 | goto end 237 | ) 238 | 239 | if "%1" == "coverage" ( 240 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 241 | if errorlevel 1 exit /b 1 242 | echo. 243 | echo.Testing of coverage in the sources finished, look at the ^ 244 | results in %BUILDDIR%/coverage/python.txt. 245 | goto end 246 | ) 247 | 248 | if "%1" == "xml" ( 249 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 250 | if errorlevel 1 exit /b 1 251 | echo. 252 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 253 | goto end 254 | ) 255 | 256 | if "%1" == "pseudoxml" ( 257 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 258 | if errorlevel 1 exit /b 1 259 | echo. 260 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 261 | goto end 262 | ) 263 | 264 | :end 265 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx>=1.4 2 | nbsphinx 3 | ipykernel 4 | numpydoc 5 | sphinx_rtd_theme 6 | jupyter_client 7 | ipython 8 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | import qopt 16 | 17 | sys.path.insert(0, os.path.abspath('../../')) 18 | 19 | print(os.path.abspath('../../')) 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = 'qopt' 24 | copyright = '2020, Forschungszentrum Juelich, Julian Teske' 25 | author = 'Julian Teske' 26 | 27 | # The full version, including alpha/beta/rc tags 28 | release = qopt.__version__ 29 | 30 | # -- General configuration --------------------------------------------------- 31 | 32 | # Add any Sphinx extension module names here, as strings. They can be 33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 34 | # ones. 35 | extensions = [ 36 | 'nbsphinx', 37 | 'sphinx.ext.mathjax', 38 | 'sphinx.ext.todo', 39 | 'sphinx.ext.autosummary', 40 | 'sphinx.ext.extlinks', 41 | 'sphinx.ext.viewcode', 42 | 'sphinx.ext.ifconfig', 43 | 'sphinx.ext.napoleon', 44 | 'numpydoc', 45 | ] 46 | 47 | # Apidoc config 48 | apidoc_module_dir = '../../qopt' 49 | apidoc_excluded_paths = ['../qopt_tests'] 50 | apidoc_separate_modules = True 51 | 52 | # Numpydoc settings 53 | numpydoc_show_inherited_class_members = False 54 | numpydoc_show_class_members = False 55 | 56 | napoleon_numpy_docstring = True 57 | napoleon_use_admonition_for_notes = True 58 | 59 | # autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance'] 60 | # 'private-members', 'special-members', 'inherited-members' 61 | autoclass_content = 'both' 62 | # napoleon_include_init_with_doc = True 63 | 64 | # Add any paths that contain templates here, relative to this directory. 65 | templates_path = ['_templates'] 66 | 67 | # The suffix(es) of source filenames. 68 | # You can specify multiple suffix as a list of string: 69 | # 70 | # source_suffix = ['.rst', '.md'] 71 | source_suffix = '.rst' 72 | 73 | # The master toctree document. 74 | master_doc = 'index' 75 | 76 | # The language for content autogenerated by Sphinx. Refer to documentation 77 | # for a list of supported languages. 78 | # 79 | # This is also used if you do content translation via gettext catalogs. 80 | # Usually you set "language" from the command line for these cases. 81 | language = None 82 | 83 | # List of patterns, relative to source directory, that match files and 84 | # directories to ignore when looking for source files. 85 | exclude_patterns = ['_build', '**.ipynb_checkpoints'] 86 | 87 | # Execute notebooks when building the documentation only if no outputs 88 | # are present. Set the execution time-out per cell to 120 s 89 | nbsphinx_execute = 'auto' 90 | nbsphinx_timeout = 120 91 | 92 | nbsphinx_execute_arguments = [ 93 | "--InlineBackend.figure_formats={'svg', 'pdf'}", 94 | "--InlineBackend.rc={'figure.dpi': 72}", 95 | ] 96 | 97 | # The reST default role (used for this markup: `text`) to use for all 98 | # documents. 99 | # default_role = None 100 | 101 | # If true, '()' will be appended to :func: etc. cross-reference text. 102 | add_function_parentheses = True 103 | 104 | # If true, the current module name will be prepended to all description 105 | # unit titles (such as .. function::). 106 | add_module_names = False 107 | 108 | # If true, sectionauthor and moduleauthor directives will be shown in the 109 | # output. They are ignored by default. 110 | # show_authors = False 111 | 112 | # The name of the Pygments (syntax highlighting) style to use. 113 | pygments_style = 'sphinx' 114 | 115 | # A list of ignored prefixes for module index sorting. 116 | # modindex_common_prefix = [] 117 | 118 | # If true, keep warnings as "system message" paragraphs in the built documents. 119 | # keep_warnings = False 120 | 121 | # If true, `todo` and `todoList` produce output, else they produce nothing. 122 | todo_include_todos = False 123 | # -- Options for HTML output ------------------------------------------------- 124 | 125 | # The theme to use for HTML and HTML Help pages. See the documentation for 126 | # a list of builtin themes. 127 | # 128 | html_theme = 'sphinx_rtd_theme' 129 | 130 | # Add any paths that contain custom static files (such as style sheets) here, 131 | # relative to this directory. They are copied after the builtin static files, 132 | # so a file named "default.css" will overwrite the builtin "default.css". 133 | # html_static_path = ['_static'] 134 | 135 | # -- Options for HTMLHelp output --------------------------------------------- 136 | 137 | # Output file base name for HTML help builder. 138 | htmlhelp_basename = 'qopt' 139 | -------------------------------------------------------------------------------- /doc/source/examples/examples.rst: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Example Application 4 | ------------------- 5 | This directory contains static examples that can also be run interactively 6 | from *Doc/source/examples* using `jupyter notebook`. 7 | 8 | .. toctree:: 9 | rabi_rwa 10 | t1_pure_depolarizing 11 | t2_star_dephasing 12 | t2_markovian_noise 13 | t2_non_markovian_noise 14 | consistency_test 15 | phase_noise 16 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. qopt documentation master file, created by 2 | sphinx-quickstart on Tue Apr 28 12:03:54 2020. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | qopt 7 | ==== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :numbered: 12 | :caption: Contents: 13 | 14 | 15 | Welcome to ``qopt``. In this documentation you will find everything you need to 16 | know about qopt to simulate qubits and apply optimal control techniques. You 17 | can find the source code and install instructions on 18 | https://github.com/qutech/qopt. 19 | 20 | A complementary publication about the software can be found at Phys. Rev. 21 | Applied https://doi.org/10.1103/PhysRevApplied.17.034036 and an older version 22 | on the Arxiv 23 | https://arxiv.org/abs/2110.05873. This paper gives a sound introduction to the 24 | topic of qubit simulation and quantum optimal control. It also provides a 25 | systematic theoretical introduction of simulation methods to give a profound 26 | understanding of each method's capabilities. 27 | 28 | Abstract 29 | ======== 30 | 31 | Realistic modelling of qubit systems including noise and constraints imposed 32 | by control hardware is required for performance prediction and control 33 | optimization of quantum processors. 34 | We introduce qopt, a software framework for simulating qubit dynamics and 35 | robust quantum optimal control considering common experimental situations. 36 | To this end, we model open and closed qubit systems with a focus on the 37 | simulation of realistic noise characteristics and experimental constraints. 38 | Specifically, the influence of noise can be calculated using Monte Carlo 39 | methods, effective master equations or with the filter function formalism, 40 | enabling the investigation and mitigation of auto-correlated noise. In 41 | addition, limitations of control electronics including finite bandwidth 42 | effects can be considered. The calculation of gradients based on analytic 43 | results is implemented to facilitate the efficient optimization of control 44 | pulses. The software is published under an open source license, well-tested 45 | and features a detailed documentation. 46 | 47 | Summary 48 | ======= 49 | 50 | This python package is designed to facilitate the simulation of 51 | state-of-the-art quantum bits (qubits) including realistic experimental 52 | operation conditions, and for the optimization of noise-robust control pulses. 53 | For this purpose, an extensive set of noise simulation tools is 54 | included and complemented by methods to describe the limitations posed by the 55 | electronics steering the quantum operations. Compared to other available 56 | simulation packages qopt stands out by the ability to efficiently simulate the 57 | effects of fast Non-Markovian noise, while providing a general interface to 58 | define control pulses for any qubit type making qopt platform-independent. 59 | The simulation interfaces to optimization algorithms to apply 60 | optimal quantum control theory, the field of study which optimizes the 61 | accuracy of quantum operations by intelligent steering methods. 62 | 63 | The functionalities can be coarsely divided into simulation and optimization of 64 | quantum operations. Various cost functions can be evaluated on the simulated 65 | evolution of the qubits such as an error rate, a gate or state fidelity or a 66 | leakage rate. Since gradient-based optimization algorithms perform 67 | extremely well in minimization problems, we implemented the derivatives of the 68 | cost functions by the optimization parameters based on analytical calculations. 69 | 70 | Simulation 71 | ---------- 72 | 73 | The qopt package simulates closed or open quantum systems on a pulse level by 74 | solving the corresponding partial differential equations being the Schroedinger 75 | equation or a Lindblad master equation. 76 | Thereby, pulses are discretized in time and the differential equations are 77 | solved using matrix exponentials. The total propagator is then available for 78 | every time step to analyse the dynamics of the qubit system. 79 | 80 | 81 | Noise 82 | ----- 83 | 84 | The realistic simulation of noise is one of qopt's key features. The various 85 | methods are therefore mentioned in more detail, and in a brief overview is 86 | given stating the advantages and requirements of each method. 87 | 88 | **Monte Carlo Simulations** 89 | 90 | The most forward way to simulate noise is to draw samples from the noise 91 | distribution and repeat the simulation for each of those noise realizations. 92 | Any cost function is then averaged over the repetitions. 93 | The sampling can be based on pseudo random number generators. 94 | Monte Carlo simulations are universally applicable but computationally 95 | expensive for high-frequency noise. 96 | 97 | 98 | **Lindblad Master Equation** 99 | 100 | In order to include dissipation effects in the simulation, the qubit and its 101 | environment can be modeled as open quantum system, described by a master 102 | equation in Lindblad form. The solution of the master equation is in 103 | the general case not unitary unlike the propagators calculated from 104 | Schroedinger's equation, such that it can also describe the loss of energy or 105 | information into the environment. This approach is numerically efficient but 106 | only applicable to systems subjected to Markovian noise. 107 | 108 | **Filter Functions** 109 | 110 | The filter function formalism is a mathematical approach which allows the 111 | estimation of fidelities in the presence of universal classical noise. It is 112 | numerically very efficient for low numbers of qubits and widely applicable. 113 | This package interfaces to the open source 114 | filter function package (https://github.com/qutech/filter_functions) 115 | written by Tobias Hangleiter. 116 | 117 | **Leakage** 118 | 119 | Leakage occurs when the qubit leaves the computational space spanned by the 120 | computational states. To take this kind of error into 121 | account, the Hilbert space must be expanded as vector space sum by the leakage 122 | levels. The simulation is then performed on the larger Hilbert space and 123 | needs to be truncated to the computational states for evaluation. The Leakage 124 | rate or transition rate into the leakage states can be used to quantify the 125 | error rate caused to leakage. 126 | 127 | Pulse Parametrization 128 | --------------------- 129 | 130 | The pulse parameterization translates a mathematically described pulse function 131 | into discrete-time control amplitudes that appear in the Hamiltonian describing 132 | the qubit model. This comprises sampling a potentially continuously defined 133 | control pulse, evaluating the physical function that describes the relation 134 | between pulse values and the control amplitudes, and including the hardware 135 | limitations of the control electronics that generate the control pulse. 136 | 137 | 138 | **Amplitude Functions** 139 | 140 | The amplitude functions encode a differential relationship between the 141 | optimization parameters, which describe the pulse, and the control amplitudes, 142 | which appear in the Hamiltonian and describe the dynamics of the quantum 143 | system. An example would be a sinusoidal pulse that drives resonant excitations 144 | of a qubit. The optimization parameters would be the pulse length, the pulse 145 | frequency and the amplitude. The amplitude function samples the continuous 146 | pulse and maps the voltage values to energy values from the Hamiltonian, 147 | which are the control amplitudes in this example. 148 | 149 | 150 | **Transfer Functions** 151 | 152 | To model realistic control electronics the package includes transfer functions 153 | mapping the ideal pulse to the actual provided voltages. This can include 154 | for example exponential saturation to consider finite voltage rise times in 155 | pulse generators, Gaussian smoothing of pulses to mimic bandwidth limitations 156 | on arbitrary waveform generators, linear transformations or even 157 | the measured response of an arbitrary waveform generator to a set of input 158 | voltages. The transfer functions then map the ideal pulse to the actually 159 | generated pulse. 160 | 161 | Optimization 162 | ------------ 163 | 164 | To leverage a given noisy quantum computer to its full potential, optimal 165 | control techniques can be applied to mitigate the detrimental effects of noise. 166 | The package allows the use of different optimization algorithms by a strong 167 | modularity in the implementation. 168 | 169 | **Analytical Derivatives** 170 | 171 | Gradient based optimization algorithms such as GRAPE have proven to be 172 | versatile and reliable for the application in pulse optimization. For the 173 | efficient calculation of gradients, the package implements analytical 174 | derivatives for the solution of the Schroedinger equation, the master 175 | equation in Lindblad form and all calculations used to estimate fidelities. 176 | 177 | 178 | Documentation 179 | ============= 180 | 181 | The documentation is structured in the three parts 'Features', 182 | 'Example Applications' and the 'API Documentation'. 183 | 184 | **qopt Features** 185 | 186 | The first part introduces the qopt functionalities step by step. Refer to this 187 | chapter for an introduction to the simulation package. 188 | 189 | **Example Applications** 190 | 191 | The 'Example 192 | Applications' combines an educational introduction to physical phenomena with 193 | simulation techniques and theoretical background information. They demonstrate 194 | how the package is used and treat FAQs on the way. They can also serve as 195 | blueprints for applications. 196 | 197 | **API Documentation** 198 | 199 | You can find the full API Documentation in the last section. Each class is 200 | implemented in a single module and each module is a subsection in the 201 | auto-generated API documentation. These subsections start with a general 202 | description of the purpose of the respective class. If you want to gain a quick 203 | overview of the class structure, I advise you to read through these 204 | descriptions. During the implementation of a simulation using qopt, you can 205 | frequently jump to the classes and functions your are using to look up the 206 | signatures. 207 | 208 | 209 | .. toctree:: 210 | :maxdepth: 2 211 | :numbered: 212 | 213 | qopt_features/qopt_features 214 | examples/examples 215 | qopt API Documentation 216 | 217 | Citing 218 | ====== 219 | 220 | If you are using qopt for your work then please cite the 221 | [qopt paper](https://doi.org/10.1103/PhysRevApplied.17.034036), as the funding 222 | of the development depends on the public impact. 223 | 224 | 225 | 226 | Indices and tables 227 | ================== 228 | 229 | * :ref:`genindex` 230 | * :ref:`modindex` 231 | * :ref:`search` 232 | -------------------------------------------------------------------------------- /doc/source/qopt.rst: -------------------------------------------------------------------------------- 1 | qopt API Documentation 2 | ========================= 3 | 4 | amplitude\_function module 5 | --------------------------------- 6 | 7 | .. automodule:: qopt.amplitude_functions 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | 12 | analyser module 13 | ------------------------------ 14 | 15 | .. automodule:: qopt.analyser 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | 20 | cost\_functions module 21 | -------------------------------- 22 | 23 | .. automodule:: qopt.cost_functions 24 | :members: 25 | :undoc-members: 26 | :show-inheritance: 27 | 28 | data\_container module 29 | --------------------------------- 30 | 31 | .. automodule:: qopt.data_container 32 | :members: 33 | :undoc-members: 34 | :show-inheritance: 35 | 36 | energy\_spectrum module 37 | ---------------------------------------- 38 | 39 | .. automodule:: qopt.energy_spectrum 40 | :members: 41 | :undoc-members: 42 | :show-inheritance: 43 | 44 | matrix module 45 | ------------------------------ 46 | 47 | .. automodule:: qopt.matrix 48 | :members: 49 | :undoc-members: 50 | :show-inheritance: 51 | 52 | noise module 53 | ------------------------------ 54 | 55 | .. automodule:: qopt.noise 56 | :members: 57 | :undoc-members: 58 | :show-inheritance: 59 | 60 | optimization\_data module 61 | ------------------------------ 62 | 63 | .. automodule:: qopt.optimization_data 64 | :members: 65 | :undoc-members: 66 | :show-inheritance: 67 | 68 | optimize module 69 | ------------------------------ 70 | 71 | .. automodule:: qopt.optimize 72 | :members: 73 | :undoc-members: 74 | :show-inheritance: 75 | 76 | qopt.parallel module 77 | ----------------------------------- 78 | 79 | .. automodule:: qopt.parallel 80 | :members: 81 | :undoc-members: 82 | :show-inheritance: 83 | 84 | qopt.performance\_statistics module 85 | ----------------------------------- 86 | 87 | .. automodule:: qopt.performance_statistics 88 | :members: 89 | :undoc-members: 90 | :show-inheritance: 91 | 92 | qopt.plotting module 93 | ----------------------------------- 94 | 95 | .. automodule:: qopt.plotting 96 | :members: 97 | :undoc-members: 98 | 99 | simulator module 100 | ------------------------------ 101 | 102 | .. automodule:: qopt.simulator 103 | :members: 104 | :undoc-members: 105 | :show-inheritance: 106 | 107 | solver\_algorithms module 108 | ------------------------------ 109 | 110 | .. automodule:: qopt.solver_algorithms 111 | :members: 112 | :undoc-members: 113 | :show-inheritance: 114 | 115 | transfer\_function module 116 | ------------------------------ 117 | 118 | .. automodule:: qopt.transfer_function 119 | :members: 120 | :undoc-members: 121 | :show-inheritance: 122 | 123 | util module 124 | ----------------------------- 125 | 126 | .. automodule:: qopt.util 127 | :members: 128 | :undoc-members: 129 | :show-inheritance: 130 | 131 | 132 | Module contents 133 | --------------- 134 | 135 | .. automodule:: qopt 136 | :members: 137 | :undoc-members: 138 | :show-inheritance: 139 | -------------------------------------------------------------------------------- /doc/source/qopt_features/numerics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "# Numerics\n", 7 | "\n", 8 | "The numerical intensive calculations are encapsulated by the `OperatorMatrix`\n", 9 | "class, which can encode quantum states or operators in a matrix representation,\n", 10 | "meaning that each object must have two dimensions. The class was already\n", 11 | "introduced earlier, but now we would like to illuminate the computational\n", 12 | "efficiency for advanced users.\n", 13 | "\n", 14 | "A frequently used and computationally complex operation is the calculation of\n", 15 | "the matrix potential, which is used to solve partial differential equations\n", 16 | "or first order like Schroedingers equation or a master equation in lindblad\n", 17 | "form.\n", 18 | "For example an $X_{\\pi/2}$ rotation on the bloch sphere\n", 19 | "is given by a unitary:\n", 20 | "\n", 21 | "\\begin{equation}\n", 22 | "U = e^{i \\pi \\sigma_x /4}\n", 23 | "\\end{equation}\n", 24 | "\n", 25 | "which can be calculated using several numeric methods. The matrix exponential and its frechet derivative are usually calculated together, for example by spectral decomposition:" 26 | ], 27 | "metadata": { 28 | "collapsed": false, 29 | "pycharm": { 30 | "name": "#%% md\n" 31 | } 32 | } 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 1, 37 | "outputs": [ 38 | { 39 | "name": "stdout", 40 | "output_type": "stream", 41 | "text": [ 42 | "(DenseOperator with data: \n", 43 | "array([[7.07106781e-01-2.22044605e-16j, 5.55111512e-17+7.07106781e-01j],\n", 44 | " [5.55111512e-17+7.07106781e-01j, 7.07106781e-01+0.00000000e+00j]]), DenseOperator with data: \n", 45 | "array([[-5.55360367e-01-1.14514159e-16j, 2.88907583e-16+5.55360367e-01j],\n", 46 | " [ 2.75869449e-16+5.55360367e-01j, -5.55360367e-01-1.88672737e-16j]]))\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "import numpy as np\n", 52 | "from qopt.matrix import DenseOperator\n", 53 | "\n", 54 | "sigma_x = DenseOperator.pauli_x()\n", 55 | "\n", 56 | "print(sigma_x.dexp(tau=.25j * np.pi,\n", 57 | " method='spectral',\n", 58 | " compute_expm=True,\n", 59 | " direction=sigma_x))\n" 60 | ], 61 | "metadata": { 62 | "collapsed": false, 63 | "pycharm": { 64 | "name": "#%%\n" 65 | } 66 | } 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "source": [ 71 | "Aternatively, the scipy method `expm_frechet` can be used:" 72 | ], 73 | "metadata": { 74 | "collapsed": false, 75 | "pycharm": { 76 | "name": "#%% md\n" 77 | } 78 | } 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 2, 83 | "outputs": [ 84 | { 85 | "name": "stdout", 86 | "output_type": "stream", 87 | "text": [ 88 | "(DenseOperator with data: \n", 89 | "array([[0.70710678+0.j , 0. +0.70710678j],\n", 90 | " [0. +0.70710678j, 0.70710678+0.j ]]), DenseOperator with data: \n", 91 | "array([[-0.55536037+0.j , 0. +0.55536037j],\n", 92 | " [ 0. +0.55536037j, -0.55536037+0.j ]]))\n" 93 | ] 94 | } 95 | ], 96 | "source": [ 97 | "print(sigma_x.dexp(tau=.25j * np.pi,\n", 98 | " method='Frechet',\n", 99 | " compute_expm=True,\n", 100 | " direction=sigma_x))" 101 | ], 102 | "metadata": { 103 | "collapsed": false, 104 | "pycharm": { 105 | "name": "#%%\n" 106 | } 107 | } 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "source": [ 112 | "The derivative can also be calculated by finite differences using the method approx:" 113 | ], 114 | "metadata": { 115 | "collapsed": false, 116 | "pycharm": { 117 | "name": "#%% md\n" 118 | } 119 | } 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 3, 124 | "outputs": [ 125 | { 126 | "name": "stdout", 127 | "output_type": "stream", 128 | "text": [ 129 | "(DenseOperator with data: \n", 130 | "array([[7.07106781e-01-2.22044605e-16j, 5.55111512e-17+7.07106781e-01j],\n", 131 | " [5.55111512e-17+7.07106781e-01j, 7.07106781e-01+0.00000000e+00j]]), DenseOperator with data: \n", 132 | "array([[-5.55359092e-01+2.22044605e-06j, -5.55111512e-07+5.55362423e-01j],\n", 133 | " [-5.55111512e-07+5.55362423e-01j, -5.55361312e-01+0.00000000e+00j]]))\n" 134 | ] 135 | } 136 | ], 137 | "source": [ 138 | "print(sigma_x.dexp(tau=.25j * np.pi,\n", 139 | " method='approx',\n", 140 | " compute_expm=True,\n", 141 | " direction=sigma_x))" 142 | ], 143 | "metadata": { 144 | "collapsed": false, 145 | "pycharm": { 146 | "name": "#%%\n" 147 | } 148 | } 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "source": [ 153 | "And we can also explicitly calculate a number of terms in the expansion serias of\n", 154 | "the exponential function:" 155 | ], 156 | "metadata": { 157 | "collapsed": false, 158 | "pycharm": { 159 | "name": "#%% md\n" 160 | } 161 | } 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": 4, 166 | "outputs": [ 167 | { 168 | "name": "stdout", 169 | "output_type": "stream", 170 | "text": [ 171 | "(DenseOperator with data: \n", 172 | "array([[7.07106781e-01-2.22044605e-16j, 5.55111512e-17+7.07106781e-01j],\n", 173 | " [5.55111512e-17+7.07106781e-01j, 7.07106781e-01+0.00000000e+00j]]), DenseOperator with data: \n", 174 | "array([[0.+0.j , 0.+0.78539816j],\n", 175 | " [0.+0.78539816j, 0.+0.j ]]))\n" 176 | ] 177 | } 178 | ], 179 | "source": [ 180 | "print(sigma_x.dexp(tau=.25j * np.pi,\n", 181 | " method='first_order',\n", 182 | " compute_expm=True,\n", 183 | " direction=sigma_x))" 184 | ], 185 | "metadata": { 186 | "collapsed": false, 187 | "pycharm": { 188 | "name": "#%%\n" 189 | } 190 | } 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": 5, 195 | "outputs": [ 196 | { 197 | "name": "stdout", 198 | "output_type": "stream", 199 | "text": [ 200 | "(DenseOperator with data: \n", 201 | "array([[7.07106781e-01-2.22044605e-16j, 5.55111512e-17+7.07106781e-01j],\n", 202 | " [5.55111512e-17+7.07106781e-01j, 7.07106781e-01+0.00000000e+00j]]), DenseOperator with data: \n", 203 | "array([[-0.61685028+0.j , 0. +0.78539816j],\n", 204 | " [ 0. +0.78539816j, -0.61685028+0.j ]]))\n" 205 | ] 206 | } 207 | ], 208 | "source": [ 209 | "print(sigma_x.dexp(tau=.25j * np.pi,\n", 210 | " method='second_order',\n", 211 | " compute_expm=True,\n", 212 | " direction=sigma_x))" 213 | ], 214 | "metadata": { 215 | "collapsed": false, 216 | "pycharm": { 217 | "name": "#%%\n" 218 | } 219 | } 220 | }, 221 | { 222 | "cell_type": "code", 223 | "source": [ 224 | "print(sigma_x.dexp(tau=.25j * np.pi,\n", 225 | " method='third_order',\n", 226 | " compute_expm=True,\n", 227 | " direction=sigma_x))" 228 | ], 229 | "metadata": { 230 | "collapsed": false, 231 | "pycharm": { 232 | "name": "#%%\n" 233 | } 234 | }, 235 | "execution_count": 6, 236 | "outputs": [ 237 | { 238 | "name": "stdout", 239 | "output_type": "stream", 240 | "text": [ 241 | "(DenseOperator with data: \n", 242 | "array([[7.07106781e-01-2.22044605e-16j, 5.55111512e-17+7.07106781e-01j],\n", 243 | " [5.55111512e-17+7.07106781e-01j, 7.07106781e-01+0.00000000e+00j]]), DenseOperator with data: \n", 244 | "array([[-0.61685028+0.j , 0. +0.54316163j],\n", 245 | " [ 0. +0.54316163j, -0.61685028+0.j ]]))\n" 246 | ] 247 | } 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 6, 253 | "outputs": [], 254 | "source": [], 255 | "metadata": { 256 | "collapsed": false, 257 | "pycharm": { 258 | "name": "#%%\n" 259 | } 260 | } 261 | } 262 | ], 263 | "metadata": { 264 | "kernelspec": { 265 | "display_name": "Python 3", 266 | "language": "python", 267 | "name": "python3" 268 | }, 269 | "language_info": { 270 | "codemirror_mode": { 271 | "name": "ipython", 272 | "version": 2 273 | }, 274 | "file_extension": ".py", 275 | "mimetype": "text/x-python", 276 | "name": "python", 277 | "nbconvert_exporter": "python", 278 | "pygments_lexer": "ipython2", 279 | "version": "2.7.6" 280 | } 281 | }, 282 | "nbformat": 4, 283 | "nbformat_minor": 0 284 | } -------------------------------------------------------------------------------- /doc/source/qopt_features/operator_class.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true, 7 | "pycharm": { 8 | "name": "#%% md\n" 9 | } 10 | }, 11 | "source": [ 12 | "# Operator Class\n", 13 | "\n", 14 | "Quantum operators are represented as matrices in qopt. The class `DenseOperator`\n", 15 | "encodes a quantum operator in a dense representation. Simple examples are the\n", 16 | "pauli matrices" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 1, 22 | "outputs": [ 23 | { 24 | "name": "stdout", 25 | "output_type": "stream", 26 | "text": [ 27 | "DenseOperator with data: \n", 28 | "array([[0.+0.j, 1.+0.j],\n", 29 | " [1.+0.j, 0.+0.j]])\n", 30 | "DenseOperator with data: \n", 31 | "array([[ 0.+0.j, -0.-1.j],\n", 32 | " [ 0.+1.j, 0.+0.j]])\n" 33 | ] 34 | } 35 | ], 36 | "source": [ 37 | "from qopt import *\n", 38 | "\n", 39 | "print(DenseOperator.pauli_x())\n", 40 | "print(DenseOperator.pauli_y())\n" 41 | ], 42 | "metadata": { 43 | "collapsed": false, 44 | "pycharm": { 45 | "name": "#%%\n" 46 | } 47 | } 48 | }, 49 | { 50 | "cell_type": "markdown", 51 | "source": [ 52 | "Internally, the `DenseOperator` is based on a 2-dimensional numpy array, which\n", 53 | "can be accessed by the `data` attribute." 54 | ], 55 | "metadata": { 56 | "collapsed": false, 57 | "pycharm": { 58 | "name": "#%% md\n", 59 | "is_executing": true 60 | } 61 | } 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 2, 66 | "outputs": [ 67 | { 68 | "name": "stdout", 69 | "output_type": "stream", 70 | "text": [ 71 | "[[0.+0.j 1.+0.j]\n", 72 | " [1.+0.j 0.+0.j]]\n" 73 | ] 74 | } 75 | ], 76 | "source": [ 77 | "print(DenseOperator.pauli_x().data)" 78 | ], 79 | "metadata": { 80 | "collapsed": false, 81 | "pycharm": { 82 | "name": "#%%\n" 83 | } 84 | } 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "source": [ 89 | "The same class can also be used to store state vectors, density matrices,\n", 90 | "propagators and so on. The `DenseOperator` can be initialized from a\n", 91 | "2-dimensional numpy array, a scipy sparse matrix or a `Qobj` from the\n", 92 | "Quantum Toolbox in Python (QuTiP)." 93 | ], 94 | "metadata": { 95 | "collapsed": false, 96 | "pycharm": { 97 | "name": "#%% md\n" 98 | } 99 | } 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 3, 104 | "outputs": [ 105 | { 106 | "name": "stdout", 107 | "output_type": "stream", 108 | "text": [ 109 | "DenseOperator with data: \n", 110 | "array([[ 1.+0.j, 0.+0.j],\n", 111 | " [ 0.+0.j, -1.+0.j]])\n", 112 | "DenseOperator with data: \n", 113 | "array([[ 1.+0.j, 0.+0.j],\n", 114 | " [ 0.+0.j, -1.+0.j]])\n", 115 | "Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\n", 116 | "Qobj data =\n", 117 | "[[ 1. 0.]\n", 118 | " [ 0. -1.]]\n", 119 | "DenseOperator with data: \n", 120 | "array([[ 1.+0.j, 0.+0.j],\n", 121 | " [ 0.+0.j, -1.+0.j]])\n" 122 | ] 123 | } 124 | ], 125 | "source": [ 126 | "import numpy as np\n", 127 | "import scipy.sparse as sp\n", 128 | "from qutip import sigmaz\n", 129 | "\n", 130 | "pauli_z = DenseOperator(np.diag((1, -1)))\n", 131 | "print(pauli_z)\n", 132 | "pauli_z_from_sparse = DenseOperator(sp.csr_matrix(np.diag((1, -1))))\n", 133 | "print(pauli_z_from_sparse)\n", 134 | "print(sigmaz())\n", 135 | "print(DenseOperator(sigmaz()))" 136 | ], 137 | "metadata": { 138 | "collapsed": false, 139 | "pycharm": { 140 | "name": "#%%\n" 141 | } 142 | } 143 | }, 144 | { 145 | "cell_type": "markdown", 146 | "source": [ 147 | "The matrix arithmetic is overloaded to support intuitive matrix operation like\n", 148 | "addition, multiplication, scalar multiplication and so on." 149 | ], 150 | "metadata": { 151 | "collapsed": false, 152 | "pycharm": { 153 | "name": "#%% md\n" 154 | } 155 | } 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 4, 160 | "outputs": [ 161 | { 162 | "name": "stdout", 163 | "output_type": "stream", 164 | "text": [ 165 | "2 * pauli_z =\n", 166 | "DenseOperator with data: \n", 167 | "array([[ 2.+0.j, 0.+0.j],\n", 168 | " [ 0.+0.j, -2.+0.j]])\n", 169 | "pauli_z + pauli_x =\n", 170 | "DenseOperator with data: \n", 171 | "array([[ 1.+0.j, 1.+0.j],\n", 172 | " [ 1.+0.j, -1.+0.j]])\n", 173 | "pauli_x * pauli_z =\n", 174 | "DenseOperator with data: \n", 175 | "array([[ 0.+0.j, -1.+0.j],\n", 176 | " [ 1.+0.j, 0.+0.j]])\n" 177 | ] 178 | } 179 | ], 180 | "source": [ 181 | "p_x = DenseOperator.pauli_x()\n", 182 | "p_z = DenseOperator.pauli_z()\n", 183 | "\n", 184 | "print(\"2 * pauli_z =\")\n", 185 | "print(2 * p_z)\n", 186 | "print(\"pauli_z + pauli_x =\")\n", 187 | "print(p_z + p_x)\n", 188 | "print(\"pauli_x * pauli_z =\")\n", 189 | "print(p_x * p_z)" 190 | ], 191 | "metadata": { 192 | "collapsed": false, 193 | "pycharm": { 194 | "name": "#%%\n" 195 | } 196 | } 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "source": [ 201 | "The operator class implements plenty of useful functions for quantum mechanics.\n", 202 | "For example the kronecker matrix product and partial traces are included to\n", 203 | "work with product spaces.\n", 204 | "\n", 205 | "Assume you describe two qubits and need an operator\n", 206 | "$p = \\sigma_x \\otimes \\sigma_0$ that operates as\n", 207 | "bit flip on the first qubit and as identity on the second qubit. Then this\n", 208 | "operator is constructed as:" 209 | ], 210 | "metadata": { 211 | "collapsed": false, 212 | "pycharm": { 213 | "name": "#%% md\n" 214 | } 215 | } 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": 5, 220 | "outputs": [ 221 | { 222 | "name": "stdout", 223 | "output_type": "stream", 224 | "text": [ 225 | "DenseOperator with data: \n", 226 | "array([[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],\n", 227 | " [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],\n", 228 | " [1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],\n", 229 | " [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])\n" 230 | ] 231 | } 232 | ], 233 | "source": [ 234 | "p = DenseOperator.pauli_x().kron(DenseOperator.pauli_0())\n", 235 | "print(p)" 236 | ], 237 | "metadata": { 238 | "collapsed": false, 239 | "pycharm": { 240 | "name": "#%%\n" 241 | } 242 | } 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "source": [ 247 | "And taking the partial trace over the second space provides the bit flip gate:" 248 | ], 249 | "metadata": { 250 | "collapsed": false, 251 | "pycharm": { 252 | "name": "#%% md\n" 253 | } 254 | } 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 6, 259 | "outputs": [ 260 | { 261 | "name": "stdout", 262 | "output_type": "stream", 263 | "text": [ 264 | "DenseOperator with data: \n", 265 | "array([[0.+0.j, 1.+0.j],\n", 266 | " [1.+0.j, 0.+0.j]])\n" 267 | ] 268 | } 269 | ], 270 | "source": [ 271 | "print(.5 * p.ptrace(dims=[2, 2], remove=[1]))" 272 | ], 273 | "metadata": { 274 | "collapsed": false, 275 | "pycharm": { 276 | "name": "#%%\n" 277 | } 278 | } 279 | }, 280 | { 281 | "cell_type": "markdown", 282 | "source": [ 283 | "When calculating the partial trace, the argument 'dims' specifies the\n", 284 | "dimensions of the subsystems and the argument 'remove' contains a list of the\n", 285 | "subsystems that are to be traced over.\n", 286 | "\n", 287 | "Another useful function is the spectral decomposition:" 288 | ], 289 | "metadata": { 290 | "collapsed": false, 291 | "pycharm": { 292 | "name": "#%% md\n" 293 | } 294 | } 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": 7, 299 | "outputs": [ 300 | { 301 | "name": "stdout", 302 | "output_type": "stream", 303 | "text": [ 304 | "Eigenvalues:\n", 305 | "[-1. 1.]\n", 306 | "Eigenvectors:\n", 307 | "[[-0.70710678+0.j 0.70710678+0.j]\n", 308 | " [ 0.70710678+0.j 0.70710678+0.j]]\n" 309 | ] 310 | } 311 | ], 312 | "source": [ 313 | "eigenvalues, eigenvectors = p_x.spectral_decomposition(hermitian=True)\n", 314 | "print('Eigenvalues:')\n", 315 | "print(eigenvalues)\n", 316 | "print('Eigenvectors:')\n", 317 | "print(eigenvectors)" 318 | ], 319 | "metadata": { 320 | "collapsed": false, 321 | "pycharm": { 322 | "name": "#%%\n" 323 | } 324 | } 325 | }, 326 | { 327 | "cell_type": "markdown", 328 | "source": [ 329 | "And complex conjugation, transposition and the calculation of the adjoint\n", 330 | "matrix are also supported:" 331 | ], 332 | "metadata": { 333 | "collapsed": false, 334 | "pycharm": { 335 | "name": "#%% md\n" 336 | } 337 | } 338 | }, 339 | { 340 | "cell_type": "code", 341 | "execution_count": 8, 342 | "outputs": [ 343 | { 344 | "name": "stdout", 345 | "output_type": "stream", 346 | "text": [ 347 | "Pauli Y:\n", 348 | "DenseOperator with data: \n", 349 | "array([[ 0.+0.j, -0.-1.j],\n", 350 | " [ 0.+1.j, 0.+0.j]])\n", 351 | "Pauli Y complex conjugated:\n", 352 | "DenseOperator with data: \n", 353 | "array([[ 0.-0.j, -0.+1.j],\n", 354 | " [ 0.-1.j, 0.-0.j]])\n", 355 | "Pauli Y complex transposed:\n", 356 | "0j\n", 357 | "Pauli Y complex adjoint:\n", 358 | "DenseOperator with data: \n", 359 | "array([[ 0.-0.j, 0.-1.j],\n", 360 | " [-0.+1.j, 0.-0.j]])\n" 361 | ] 362 | } 363 | ], 364 | "source": [ 365 | "p_y = DenseOperator.pauli_y()\n", 366 | "print('Pauli Y:')\n", 367 | "print(p_y)\n", 368 | "print('Pauli Y complex conjugated:')\n", 369 | "print(p_y.conj())\n", 370 | "print('Pauli Y complex transposed:')\n", 371 | "print(p_y.tr())\n", 372 | "print('Pauli Y complex adjoint:')\n", 373 | "print(p_y.dag())" 374 | ], 375 | "metadata": { 376 | "collapsed": false, 377 | "pycharm": { 378 | "name": "#%%\n" 379 | } 380 | } 381 | }, 382 | { 383 | "cell_type": "markdown", 384 | "source": [ 385 | "There are even more functions to be discovered in the API documentation." 386 | ], 387 | "metadata": { 388 | "collapsed": false, 389 | "pycharm": { 390 | "name": "#%% md\n" 391 | } 392 | } 393 | } 394 | ], 395 | "metadata": { 396 | "kernelspec": { 397 | "name": "python3", 398 | "language": "python", 399 | "display_name": "Python 3" 400 | }, 401 | "language_info": { 402 | "codemirror_mode": { 403 | "name": "ipython", 404 | "version": 2 405 | }, 406 | "file_extension": ".py", 407 | "mimetype": "text/x-python", 408 | "name": "python", 409 | "nbconvert_exporter": "python", 410 | "pygments_lexer": "ipython2", 411 | "version": "2.7.6" 412 | } 413 | }, 414 | "nbformat": 4, 415 | "nbformat_minor": 0 416 | } -------------------------------------------------------------------------------- /doc/source/qopt_features/qopt_features.rst: -------------------------------------------------------------------------------- 1 | .. _qopt_features: 2 | 3 | qopt Features 4 | ------------- 5 | This directory contains static examples that can also be run interactively 6 | from *Doc/source/qopt_features* using `jupyter notebook`. 7 | 8 | .. toctree:: 9 | operator_class 10 | schroedinger_solver 11 | entanglement_fidelity 12 | optimization 13 | transfer_function 14 | pulse_parameterization 15 | monte_carlo_experiments 16 | open_quantum_systems 17 | filter_functions_basic 18 | energy_spectra_analyser 19 | parallelization 20 | numerics 21 | -------------------------------------------------------------------------------- /doc/source/static/QoptStructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qutech/qopt/448c78863d394c775f5d4eb60d9139f163dc1411/doc/source/static/QoptStructure.png -------------------------------------------------------------------------------- /doc/source/stored/precession_times_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qutech/qopt/448c78863d394c775f5d4eb60d9139f163dc1411/doc/source/stored/precession_times_1.npy -------------------------------------------------------------------------------- /doc/source/stored/precession_times_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qutech/qopt/448c78863d394c775f5d4eb60d9139f163dc1411/doc/source/stored/precession_times_2.npy -------------------------------------------------------------------------------- /doc/source/stored/probabilities_echo_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qutech/qopt/448c78863d394c775f5d4eb60d9139f163dc1411/doc/source/stored/probabilities_echo_2.npy -------------------------------------------------------------------------------- /doc/source/stored/probabilities_fid_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qutech/qopt/448c78863d394c775f5d4eb60d9139f163dc1411/doc/source/stored/probabilities_fid_1.npy -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: qopt 2 | 3 | channels: 4 | - defaults 5 | - conda-forge 6 | 7 | dependencies: 8 | - qutip 9 | - pip 10 | 11 | prefix: /home/docs/.conda/envs/qopt -------------------------------------------------------------------------------- /feature_requests.md: -------------------------------------------------------------------------------- 1 | # Feature Requests 2 | 3 | Since this version covers by no means all possible features a quantum optimal 4 | control package can have, this document gathers features requested by 5 | (potential) users. If require a new feature urgently, you can reach out to me 6 | via email: julian.teske@rwth-aachen.de 7 | 8 | 9 | ### Dynamically change between lab and rotating frame 10 | (priority low) In the special case of Rabi driving change between two frames 11 | connected by a unitary evolution. (i.e. combine single gates in rot frame with 12 | two qubit gates in the lab frame.) 13 | 14 | 15 | -------------------------------------------------------------------------------- /patch_notes.md: -------------------------------------------------------------------------------- 1 | ### Version 0.1 to 1.0 2 | 3 | Parallelization 4 | - Monte Carlo Simulations can be run in parallel 5 | - Optimization with multiple seeds can be run in parallel 6 | 7 | Filter Function Derivatives 8 | - New Filter function cost function can be used with analytical gradients 9 | for the optimization 10 | 11 | Transfer Function 12 | - New Base class MatrixTF to distinguish between transfer functions implemented 13 | as matrix multiplication and other transfer functions 14 | - implementation of gaussian convolution as transfer function 15 | 16 | Solver Algorithms 17 | - the times are now set automatically to the transfer functions. The Solver 18 | must now be instantiated with the untransferred times 19 | - drift Hamiltonians can be set to constant my setting only a single element 20 | otherwise you need one element for each transferred time step. 21 | 22 | Optimizer 23 | - scalar optimization algorithms available 24 | - gradient free nelder mead algorithm available 25 | - cost function weights must now be given in the optimizer class 26 | 27 | ### Version 1.0 to 1.1 28 | 29 | Cost Functions 30 | - refactoring of the angle axis representation 31 | 32 | Matrix 33 | - implements the division by scalar by div operator 34 | 35 | Documentation 36 | - Extends the documentation by the notebook series examples 37 | 38 | ### Version 1.1 to 1.2 39 | 40 | General: 41 | - Updates in the README, including paper reference and bug fixes 42 | - changed the URL from git-ce to github 43 | 44 | Imports 45 | - reduce the list of required imports. QuTiP and simanneal can only be used 46 | if they were installed. 47 | 48 | Documentation 49 | - Improvement of the docstrings at module level and for the feature notebooks. 50 | 51 | Transfer Function: 52 | - adds the custom ConvolutionTF 53 | 54 | OperatorMatrix: 55 | - adds a function to vectorize density matrices 56 | - Adds the calculation of the partial trace 57 | 58 | CostFunction: 59 | - Implements leakage and entanglement fidelity with truncation to computational 60 | states in Liouville space 61 | 62 | Optimizer: 63 | - improve storage. The optimizer is only stored in the result on request. 64 | 65 | ### Version 1.2 to 1.3 66 | 67 | GaussianMTF: 68 | - made the deprecation explicit 69 | 70 | Transfer Function: 71 | - new internal check function has more explicit error messages than previous 72 | assertions. 73 | 74 | Energy Spectrum: 75 | - plotting reworked to be applicable to a larger number of dimensions in the 76 | Hamiltonian 77 | - Plotting function returns the figure and axis. 78 | 79 | Solver Algorithm: 80 | - the filter_function_h_n and noise_coeffs_derivatives change their signature, 81 | now then are called with the optimization parameters, the transferred 82 | parameters and the control amplitudes. Previously only with the control 83 | amplitudes. 84 | 85 | Read the docs: 86 | - Add a new notebook about the basic use of filter functions in optimal control 87 | - Rework the notebook about the numerics. Now focused on the matrix class and 88 | put at the start. 89 | - Add a new notebook about the use of filter functions in the optimization of 90 | amplitude, frequency and phase noise. 91 | 92 | Optimizer: 93 | - Fix a false dimension check for the cost function weights. 94 | 95 | Github Actions: 96 | - Continuous Deployment to Pypi 97 | 98 | ### Version 1.3 to 1.3.1 99 | 100 | Github Actions: 101 | - Continuous Integration by running unittests 102 | - github badges 103 | 104 | Filter Functions: 105 | - Add docstring describing that the filter function package sorts the noise 106 | Hamiltonians in lexicographic order. 107 | 108 | Read the docs: 109 | - Include new notebook on the operator class. The numerics notebook 110 | now focuses on the matrix exponentials. 111 | - Rework the notebooks describing qopt features. 112 | 113 | Transfer Function: 114 | - Fixes an assertion controlling the common shape in the ParallelTF 115 | 116 | ### Version 1.3.1 to 1.3.2 117 | 118 | Filter Functions: 119 | - adapt to the latest version of filter functions. The noise operators are not 120 | stored in lexicographic order any more. 121 | - now requires filter_function v1.1.2 122 | 123 | Energy Spectrum: 124 | - scatter options can now be given to the plotting function. 125 | 126 | ### Version 1.3.2 to 1.3.3 127 | 128 | Noise 129 | - Reworked the sampling of the Gaussian distribution. 130 | 131 | ### Version 1.3.3 to 1.3.4 132 | 133 | Example Notebooks 134 | - Compatibility with the latest changes in filter functions 135 | 136 | ### Version 1.3.4 to 1.3.5 137 | 138 | Optimize 139 | - Refactors and comments the simulated annealing 140 | 141 | ### Version 1.3.5 to 1.3.6 142 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = 3 | -ra 4 | --verbose 5 | --cov=qopt 6 | --cov-config=.coveragerc 7 | --cov-report=xml 8 | -------------------------------------------------------------------------------- /qopt/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """Hardware adapted quantum simulation and optimal control """ 22 | 23 | from . import amplitude_functions, analyser, cost_functions, data_container, \ 24 | energy_spectrum, matrix, noise, optimization_data, optimize, \ 25 | performance_statistics, simulator, solver_algorithms, transfer_function, \ 26 | util 27 | from .amplitude_functions import IdentityAmpFunc, UnaryAnalyticAmpFunc, \ 28 | CustomAmpFunc 29 | from .analyser import Analyser 30 | from .cost_functions import OperatorMatrixNorm, OperationInfidelity, \ 31 | OperationNoiseInfidelity, OperatorFilterFunctionInfidelity, LeakageError, \ 32 | state_fidelity, angle_axis_representation, entanglement_fidelity, \ 33 | entanglement_fidelity_super_operator, StateInfidelity, \ 34 | StateNoiseInfidelity, IncoherentLeakageError, StateInfidelitySubspace, \ 35 | LeakageLiouville, state_fidelity_subspace, \ 36 | LiouvilleMonteCarloEntanglementInfidelity 37 | from .data_container import DataContainer 38 | from .energy_spectrum import plot_energy_spectrum 39 | from .matrix import DenseOperator, convert_unitary_to_super_operator, \ 40 | closest_unitary, ket_vectorize_density_matrix, \ 41 | convert_ket_vectorized_density_matrix_to_square 42 | from .noise import NTGColoredNoise, NTGQuasiStatic 43 | from .optimization_data import OptimizationResult, OptimizationSummary 44 | from .optimize import LeastSquaresOptimizer, ScalarMinimizingOptimizer 45 | from .performance_statistics import PerformanceStatistics 46 | from .simulator import Simulator 47 | from .solver_algorithms import SchroedingerSolver, SchroedingerSMonteCarlo, \ 48 | SchroedingerSMCControlNoise, LindbladSolver 49 | from .transfer_function import IdentityTF, OversamplingTF, \ 50 | GaussianConvolution, ConcatenateTF, ParallelTF, ParallelMTF, CustomMTF, \ 51 | ExponentialMTF, OversamplingMTF, ConcatenateMTF, ConvolutionTF 52 | from .parallel import run_optimization_parallel 53 | 54 | __all__ = [ 55 | 'IdentityAmpFunc', 'UnaryAnalyticAmpFunc', 'CustomAmpFunc', 'Analyser', 56 | 'OperatorMatrixNorm', 'OperationInfidelity', 'OperationNoiseInfidelity', 57 | 'OperatorFilterFunctionInfidelity', 'LeakageError', 'state_fidelity', 58 | 'angle_axis_representation', 'entanglement_fidelity', 'StateInfidelity', 59 | 'entanglement_fidelity_super_operator', 'DataContainer', 60 | 'plot_energy_spectrum', 'DenseOperator', 'StateNoiseInfidelity', 61 | 'convert_unitary_to_super_operator', 'closest_unitary', 'NTGColoredNoise', 62 | 'NTGQuasiStatic', 'OptimizationResult', 'OptimizationSummary', 63 | 'LeastSquaresOptimizer', 'ScalarMinimizingOptimizer', 64 | 'PerformanceStatistics', 'Simulator', 'SchroedingerSolver', 65 | 'SchroedingerSMonteCarlo', 'SchroedingerSMCControlNoise', 'LindbladSolver', 66 | 'IdentityTF', 'OversamplingTF', 'ConvolutionTF', 67 | 'GaussianConvolution', 'OversamplingMTF', 'ConcatenateMTF', 68 | 'ConcatenateTF', 'ParallelTF', 'ParallelMTF', 'CustomMTF', 69 | 'ExponentialMTF', 'IncoherentLeakageError', 70 | 'run_optimization_parallel', 'StateInfidelitySubspace', 71 | 'ket_vectorize_density_matrix', 'LeakageLiouville', 72 | 'convert_ket_vectorized_density_matrix_to_square', 73 | 'state_fidelity_subspace', 'LiouvilleMonteCarloEntanglementInfidelity' 74 | ] 75 | 76 | try: 77 | from .plotting import plot_bloch_vector_evolution 78 | __all__.append('plot_bloch_vector_evolution') 79 | except ImportError: 80 | pass 81 | 82 | __version__ = '1.3' 83 | __license__ = 'GNU GPLv3+' 84 | __author__ = 'Julian Teske, Forschungszentrum Juelich' 85 | -------------------------------------------------------------------------------- /qopt/amplitude_functions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | r"""This class is designed to express a functional relationship between the 22 | optimization parameters, which can be directly controlled and the control 23 | amplitudes, which appear as factors in the Hamiltonian. 24 | 25 | If the Hamiltonian is given as sum of a drift Hamiltonian and a control 26 | Hamiltonian described by operators multiplied with time dependent control 27 | amplitudes 28 | 29 | .. math:: 30 | 31 | H = H_{drift} + \sum_k u_k(t) H_k, 32 | 33 | then this class describes the control amplitudes as function of optimization 34 | parameters: 35 | 36 | .. math:: 37 | 38 | u_k(t) = u_k(x(t)) 39 | 40 | The `AmplitudeFunction` class is used as attribute of the `Solver` class. 41 | 42 | Classes 43 | ------- 44 | :class:`AmplitudeFunction` 45 | Abstract base class of the amplitude function. 46 | :class:`IdentityAmpFunc` 47 | The transferred optimization parameters are the control amplitudes. 48 | :class:`UnaryAnalyticAmpFunc` 49 | An amplitude function which can be given by a unary function. 50 | :class:`CustomAmpFunc` 51 | Applies functions handles specified by the user at the initialization. 52 | 53 | Notes 54 | ----- 55 | The implementation was inspired by the optimal control package of QuTiP [1]_ 56 | (Quantum Toolbox in Python) 57 | 58 | References 59 | ---------- 60 | .. [1] J. R. Johansson, P. D. Nation, and F. Nori: "QuTiP 2: A Python framework 61 | for the dynamics of open quantum systems.", Comp. Phys. Comm. 184, 1234 62 | (2013) [DOI: 10.1016/j.cpc.2012.11.019]. 63 | 64 | """ 65 | 66 | from abc import ABC, abstractmethod 67 | from typing import Callable 68 | 69 | import numpy as np 70 | 71 | 72 | class AmplitudeFunction(ABC): 73 | """Abstract Base class of the amplitude function. """ 74 | @abstractmethod 75 | def __call__(self, x): 76 | """ Calculates the control amplitudes u from the optimization 77 | parameters x. 78 | 79 | Parameters 80 | ---------- 81 | x : np.array 82 | Optimization parameters of shape (num_t, num_par), where num_t is 83 | the number of time slices and num_par the number of different 84 | optimization parameters. 85 | 86 | Returns 87 | ------- 88 | u : np.array 89 | Control amplitudes of shape (num_t, num_ctrl), where num_x is 90 | the number of time slices and num_ctrl the number of control 91 | operators in the Hamiltonian. 92 | 93 | """ 94 | return None 95 | 96 | @abstractmethod 97 | def derivative_by_chain_rule(self, deriv_by_ctrl_amps, x): 98 | """ Calculates the derivatives of some function f by the optimization 99 | parameters x, when given the optimization parameters x and the 100 | derivative by the control amplitudes. The calculation is performed 101 | using the chain rule: df/dx = df/du * du/dx. 102 | 103 | Parameters 104 | ---------- 105 | deriv_by_ctrl_amps : np.array, shape (num_t, num_f, num_ctrl) 106 | The gradients of num_f functions by num_ctrl different pulses at 107 | num_t different time steps, i.e. the derivatives df/du. 108 | 109 | x : np.array 110 | Optimization parameters of shape (num_t, num_par), where num_t is 111 | the number of time slices and num_par the number of different 112 | optimization parameters. 113 | 114 | Returns 115 | ------- 116 | deriv_by_opt_par : np.array, shape: (num_t, num_f, num_par) 117 | The derivatives by the optimization parameters. 118 | 119 | """ 120 | return None 121 | 122 | 123 | class IdentityAmpFunc(AmplitudeFunction): 124 | """The control amplitudes are identical with the optimization parameters. 125 | 126 | """ 127 | def __init__(self): 128 | pass 129 | 130 | def __call__(self, x: np.ndarray) -> np.ndarray: 131 | """See base class. """ 132 | return x 133 | 134 | def derivative_by_chain_rule(self, deriv_by_ctrl_amps: np.ndarray, 135 | x: np.ndarray) -> np.ndarray: 136 | """See base class. """ 137 | return deriv_by_ctrl_amps 138 | 139 | 140 | class UnaryAnalyticAmpFunc(AmplitudeFunction): 141 | """A unary analytic amplitude function which is applied to each amplitude 142 | value. This class can be used for every application case where all 143 | transferred parameters are mapped one-to-one to the control amplitudes 144 | by a single unary function. 145 | 146 | Parameters 147 | ---------- 148 | value_function : Callable float to float 149 | This scalar function expresses the functional dependency of the control 150 | amplitudes on the optimization parameters. The function is vectorized 151 | internally. 152 | 153 | derivative_function : Callable float to float 154 | This scalar function describes the derivative of the control 155 | amplitudes. The function is vectorized internally. 156 | 157 | """ 158 | def __init__(self, 159 | value_function: Callable[[float, ], float], 160 | derivative_function: Callable[[float, ], float]): 161 | self.value_function = np.vectorize(value_function) 162 | self.derivative_function = np.vectorize(derivative_function) 163 | 164 | def __call__(self, x: np.ndarray) -> np.ndarray: 165 | """See base class. """ 166 | return self.value_function(x) 167 | 168 | def derivative_by_chain_rule(self, deriv_by_ctrl_amps: np.ndarray, x): 169 | """See base class. """ 170 | du_by_dx = self.derivative_function(x) 171 | # du_by_dx shape: (n_time, n_ctrl) 172 | # deriv_by_ctrl_amps shape: (n_time, n_func, n_ctrl) 173 | # deriv_by_opt_par shape: (n_time, n_func, n_ctrl 174 | # since the function is unary we have n_ctrl = n_amps 175 | return np.einsum('ij,ikj->ikj', du_by_dx, deriv_by_ctrl_amps) 176 | 177 | 178 | class CustomAmpFunc(AmplitudeFunction): 179 | """A general amplitude function which is applied to the amplitude 180 | values. 181 | 182 | Parameters 183 | ---------- 184 | value_function : Callable array to array 185 | This function expresses the functional dependency of the control 186 | amplitudes on the optimization parameters. The function receives the 187 | optimization parameters x as array of the shape (num_t, num_par) and 188 | must return the control amplitudes u as array of the shape 189 | (num_t, num_ctrl). Where num_t is the number of time slices, 190 | num_par the number of optimization parameters and num_ctrl the number 191 | of control operators in the Hamiltonian. 192 | 193 | derivative_function : Callable array to array 194 | This function describes the derivative of the control amplitudes by the 195 | optimization parameters. 196 | The function receives the optimisation parameters x as array 197 | of shape (num_t, num_par) and must return the derivatives of the 198 | control amplitudes by the optimization parameters as array of shape 199 | (num_t, num_par, num_ctrl). 200 | 201 | """ 202 | def __init__(self, 203 | value_function: Callable[[np.ndarray, ], np.ndarray], 204 | derivative_function: Callable[[np.ndarray, ], np.ndarray]): 205 | self.value_function = value_function 206 | self.derivative_function = derivative_function 207 | 208 | def __call__(self, x: np.ndarray) -> np.ndarray: 209 | """See base class. """ 210 | return self.value_function(x) 211 | 212 | def derivative_by_chain_rule(self, deriv_by_ctrl_amps: np.ndarray, 213 | x: np.ndarray) -> np.ndarray: 214 | """See base class. """ 215 | du_by_dx = self.derivative_function(x) 216 | # du_by_dx: shape (time, par, ctrl) 217 | # deriv_by_ctrl_amps: shape (time, func, ctrl) 218 | # return: shape (time, func, par) 219 | 220 | return np.einsum('imj,ikj->ikm', du_by_dx, deriv_by_ctrl_amps) 221 | -------------------------------------------------------------------------------- /qopt/analyser.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """The `Analyser` class operates on an instance of the `DataContainer` class 22 | and offers various convenience functions for the visualization and analysis 23 | of data acquired during the optimizations. 24 | 25 | These features include the plotting of cost functions, either for a single 26 | optimization run or for a multitude. Also functions for the calculation of 27 | computational time for the various cost functions and if given their gradients 28 | are included. 29 | 30 | Classes 31 | ------- 32 | :class:`Analyser` 33 | Holds convenience functions to visualize the optimization. 34 | 35 | Notes 36 | ----- 37 | The implementation was inspired by the optimal control package of QuTiP [1]_ 38 | (Quantum Toolbox in Python) 39 | 40 | References 41 | ---------- 42 | .. [1] J. R. Johansson, P. D. Nation, and F. Nori: "QuTiP 2: A Python framework 43 | for the dynamics of open quantum systems.", Comp. Phys. Comm. 184, 1234 44 | (2013) [DOI: 10.1016/j.cpc.2012.11.019]. 45 | 46 | """ 47 | 48 | import numpy as np 49 | import matplotlib.pyplot as plt 50 | 51 | from qopt import data_container 52 | 53 | 54 | class Analyser: 55 | """Holds convenience functions to visualize the optimization. 56 | 57 | The Analyser class can be used to make plots of the optimization data 58 | stored in an instance of the DataContainer class. This can be useful to 59 | judge the performance of optimization algorithms and investigate how fast 60 | the convergence is and whether the algorithm has fully converged. 61 | 62 | """ 63 | def __init__(self, data: data_container.DataContainer): 64 | self.data = data 65 | self.infidelities = None 66 | 67 | @property 68 | def n_least_square(self) -> int: 69 | """Returns the number of the optimization run which yields the smallest 70 | total costs. 71 | 72 | The total cost is measured as squared sum of the final cost function 73 | values. 74 | 75 | Returns 76 | ------- 77 | n_least_square : int 78 | Number of optimization run with smallest final costs. 79 | 80 | """ 81 | final_costs = np.asarray(self.data.final_costs) 82 | if len(final_costs.shape) == 1: 83 | final_costs = np.reshape( 84 | final_costs, (len(self.data.final_costs), 1)) 85 | squared_sum = np.sum(final_costs**2, axis=1) 86 | return int(np.argmin(squared_sum, axis=0)) 87 | 88 | def plot_costs(self, n=0, log_y=True, ax=None) -> None: 89 | """Plots the absolute cost values as function of optimization 90 | iteration. 91 | 92 | Parameters 93 | ---------- 94 | n: int, optional 95 | Number of the optimization run. Defaults to 0. 96 | 97 | log_y: bool, optional 98 | If True then the costs are plotted logarithmically. Defaults to 99 | True. 100 | 101 | ax: matplotlib.pyplot.axes 102 | Axes element in which the data is plotted. If not specified, a new 103 | one will be created. 104 | 105 | Returns 106 | ------- 107 | ax: matplotlib.pyplot.axes 108 | Axes with the plot. 109 | 110 | """ 111 | if ax is None: 112 | _, ax = plt.subplots() 113 | 114 | for cost, index in zip(np.asarray(self.data.costs[n]).T, self.data.indices): 115 | ax.plot(cost, label=index) 116 | ax.set_ylabel('Costs') 117 | ax.set_xlabel('Iteration') 118 | ax.legend() 119 | if log_y: 120 | ax.set_yscale('log') 121 | 122 | return ax 123 | 124 | def absolute_costs(self) -> np.ndarray: 125 | """ 126 | Calculates for each optimization run and each iteration in the 127 | optimization algorithm the sum of the costs. 128 | 129 | Returns 130 | ------- 131 | costs: numpy array, shape (n_runs, n_iter) 132 | The sum of the costs. 133 | 134 | """ 135 | n_steps = np.max(list(map(len, self.data.costs))) 136 | 137 | # shape: (num_runs, num_step, num_cost_fkt) 138 | costs = np.empty( 139 | (len(self.data.costs), n_steps, len(self.data.costs[0][0]))) 140 | costs[:] = np.nan 141 | 142 | for i, run in enumerate(self.data.costs): 143 | num_steps = len(run) 144 | costs[i, :num_steps, :] = np.stack(run, axis=0) 145 | 146 | costs = np.sqrt(np.sum(costs ** 2, axis=2)) 147 | return costs 148 | 149 | def plot_absolute_costs(self) -> (plt.Figure, plt.Axes): 150 | """Plots the absolute costs. """ 151 | costs = self.absolute_costs() 152 | fig, axes = plt.subplots(1, 1) 153 | ax = axes 154 | ax.plot(costs.T) 155 | ax.set_yscale('log') 156 | ax.set_title('Sum of Infidelitites') 157 | ax.set_xlabel('Iteration') 158 | ax.set_ylabel('Infidelity') 159 | return fig, ax 160 | 161 | def integral_cost_fkt_times(self, n: int = 0) -> np.ndarray: 162 | """Sum of the time required for the evaluation of the cost 163 | function. 164 | 165 | Parameters 166 | ---------- 167 | n: int, optional 168 | Number of the optimization run. Defaults to 0. 169 | 170 | Returns 171 | ------- 172 | integral_times: np.array 173 | Integrated time required for the cost function evaluation. 174 | 175 | """ 176 | times = self.data.optimization_statistics[n].cost_func_eval_times 177 | integral_times = np.sum(np.asarray(times), axis=0) 178 | return integral_times 179 | 180 | def integral_grad_fkt_times(self, n: int = 0): 181 | """Sum of the time required for the evaluation of the cost 182 | function gradients. 183 | 184 | Parameters 185 | ---------- 186 | n: int, optional 187 | Number of the optimization run. Defaults to 0. 188 | 189 | Returns 190 | ------- 191 | integral_times: np.array 192 | Integrated time required for the cost function gradient evaluation. 193 | 194 | """ 195 | times = self.data.optimization_statistics[n].grad_func_eval_times 196 | integral_times = np.sum(np.asarray(times), axis=0) 197 | return integral_times 198 | 199 | def opt_times(self): 200 | """Total optimization times. 201 | 202 | Returns 203 | ------- 204 | total_times: np.array 205 | Time required per optimization run. 206 | 207 | """ 208 | total_times = np.zeros((len(self.data.optimization_statistics))) 209 | for i in range(len(self.data.optimization_statistics)): 210 | t_start = self.data.optimization_statistics[i].start_t_opt 211 | t_end = self.data.optimization_statistics[i].end_t_opt 212 | total_times[i] = t_end - t_start 213 | return total_times 214 | 215 | def total_cost_fkt_time(self): 216 | """Total time of cost function evaluation. 217 | 218 | Returns 219 | ------- 220 | total_t: np.array 221 | Total times for the evaluation of cost functions. 222 | 223 | """ 224 | total_t = 0 225 | for n in range(len(self.data.optimization_statistics)): 226 | total_t += np.sum(self.integral_cost_fkt_times(n)) 227 | return total_t 228 | 229 | def total_grad_fkt_time(self): 230 | """Total time of cost function gradient calculation. 231 | 232 | Returns 233 | ------- 234 | total_t: np.array 235 | Total times for the calculation of cost functions gradients. 236 | 237 | """ 238 | total_t = 0 239 | for n in range(len(self.data.optimization_statistics)): 240 | total_t += np.sum(self.integral_grad_fkt_times(n)) 241 | return total_t 242 | 243 | def time_share_cost_fkt(self): 244 | """Time share of the cost function evaluation. """ 245 | return self.total_cost_fkt_time() / np.sum(self.opt_times()) 246 | 247 | def time_share_grad_fkt(self): 248 | """Time share of the cost function gradient calculation. """ 249 | return self.total_grad_fkt_time() / np.sum(self.opt_times()) 250 | -------------------------------------------------------------------------------- /qopt/data_container.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """Implements data storage. 22 | 23 | The `DataContainer` class stores the contend of multiple `Result` class 24 | instances. Each 'Result' class instance holds the information gathered in 25 | an optimization run. 26 | 27 | The `DataContainer` interfaces to the `Analyser` class, which visualizes the 28 | stored data. It has also the functionalities for writing data to and loading 29 | it from the hard drive. 30 | 31 | Classes 32 | ------- 33 | :class:`DataContainer` 34 | Data storage class. 35 | 36 | Notes 37 | ----- 38 | The implementation was inspired by the optimal control package of QuTiP [1]_ 39 | (Quantum Toolbox in Python) 40 | 41 | References 42 | ---------- 43 | .. [1] J. R. Johansson, P. D. Nation, and F. Nori: "QuTiP 2: A Python framework 44 | for the dynamics of open quantum systems.", Comp. Phys. Comm. 184, 1234 45 | (2013) [DOI: 10.1016/j.cpc.2012.11.019]. 46 | 47 | """ 48 | 49 | import pickle 50 | import os 51 | import copy 52 | 53 | from typing import Optional, List 54 | 55 | from qopt import optimization_data, performance_statistics 56 | 57 | 58 | class DataContainer: 59 | 60 | """Stores data of the optimization. 61 | 62 | This class gathers the information stored in multiple objects of the 63 | class `OptimResult`. 64 | 65 | Parameters 66 | ---------- 67 | storage_path : string 68 | The path were this instance of DataContainer is to be stored or from 69 | where is shall be loaded. 70 | 71 | file_name : string 72 | The file name will be appended to the path for storage and loading. The 73 | default value is an empty string assuming that the storage path already 74 | contains the file name. 75 | 76 | indices : list of str 77 | The indices of the costs. 78 | 79 | final_costs : list 80 | The final values of the cost function. 81 | 82 | init_parameters : list 83 | The initial optimization parameters. 84 | 85 | final_parameters : list 86 | The final optimization parameters. 87 | 88 | costs : list of list 89 | All values of the cost functions during the optimization. 90 | 91 | parameters : list of list 92 | All parameters for which the cost functions were evaluated during the 93 | optimization. 94 | 95 | status : list of None or int 96 | The termination_reason as integer. Like in scipy.OptimizeResult 97 | None if the optimization has not started. 98 | -1: improper input parameters status 99 | 0: the maximum number of function evaluations is exceeded. 100 | 1: gradient norm termination condition is satisfied. 101 | 2: cost function termination condition is satisfied. 102 | 3: minimal step size termination condition is satisfied. 103 | 4: Both 2 and 3 termination conditions are satisfied. 104 | 5: Wall time exceeded. 105 | 106 | optimization_stats : list 107 | Optimization statistics, which have been appended to the data. 108 | 109 | append_time_to_path : bool 110 | If True, the current time is appended to the file name. 111 | 112 | """ 113 | def __init__(self, 114 | storage_path: Optional[str] = None, 115 | file_name: str = 'Temp File', 116 | indices: Optional[List[str]] = None, 117 | final_costs: Optional[List] = None, 118 | init_parameters: Optional[List] = None, 119 | final_parameters: Optional[List] = None, 120 | costs: Optional[List[List]] = None, 121 | parameters: Optional[List[List]] = None, 122 | status: Optional[List] = None, 123 | optimization_stats: Optional[List] = None, 124 | append_time_to_path=True): 125 | 126 | storage_path = os.path.join( 127 | __file__, r"..\..\temp" 128 | ) if storage_path is None else storage_path 129 | 130 | self.final_costs = [] if final_costs is None else final_costs 131 | self.indices = [] if indices is None else indices 132 | self.init_parameters = ( 133 | [] if init_parameters is None else init_parameters) 134 | self.final_parameters = ( 135 | [] if final_parameters is None else final_parameters) 136 | self.costs = [] if costs is None else costs 137 | self.parameters = [] if parameters is None else parameters 138 | self.status = [] if status is None else status 139 | self.optimization_statistics = ( 140 | [] if optimization_stats is None else optimization_stats) 141 | 142 | self.check_length() 143 | 144 | self.storage_path = storage_path 145 | self.append_time_to_path = append_time_to_path 146 | 147 | self._asyncrone_writer = None 148 | self.file_name = file_name 149 | 150 | def __len__(self): 151 | """Number of optimization runs in the data. 152 | 153 | Returns 154 | ------- 155 | len: int 156 | Number of optimization runs in the data. 157 | 158 | """ 159 | if self.costs is None: 160 | return 0 161 | else: 162 | return len(self.costs) 163 | 164 | @property 165 | def index(self): 166 | """Indices of the cost functions. """ 167 | return self.final_parameters.index 168 | 169 | def check_length(self): 170 | pass 171 | 172 | def append_optim_result( 173 | self, 174 | optim_result: optimization_data.OptimizationResult): 175 | """Appends an instance of `OptimizationResult` to the stored data. 176 | 177 | The Information gained in an optimization run is extracted and 178 | appended to the various lists of the `DataContainer`. 179 | 180 | Parameters 181 | ---------- 182 | optim_result: `OptimizationResult` 183 | Result of an optimization run. 184 | 185 | """ 186 | if optim_result.optim_summary is None: 187 | costs = [] 188 | parameters = [] 189 | else: 190 | costs = optim_result.optim_summary.costs 191 | parameters = optim_result.optim_summary.parameters 192 | 193 | self._append(final_costs=optim_result.final_cost, 194 | indices=optim_result.indices, 195 | init_parameters=optim_result.init_parameters, 196 | final_parameters=optim_result.final_parameters, 197 | costs=costs, 198 | parameters=parameters, 199 | status=optim_result.status, 200 | optimization_stats=optim_result.optimization_stats 201 | ) 202 | 203 | def _append(self, final_costs: List, indices: List[str], 204 | init_parameters: List, final_parameters: List, 205 | costs: List, parameters: List, status: int, 206 | optimization_stats: Optional[ 207 | performance_statistics.PerformanceStatistics]): 208 | if len(self) == 0: 209 | self.indices = indices 210 | else: 211 | assert self.indices == indices 212 | 213 | self.final_costs.append(final_costs) 214 | self.init_parameters.append(init_parameters) 215 | self.final_parameters.append(final_parameters) 216 | self.costs.append(costs) 217 | self.parameters.append(parameters) 218 | self.status.append(status) 219 | self.optimization_statistics.append(optimization_stats) 220 | self.check_length() 221 | 222 | def __deepcopy__(self): 223 | cpyobj = type(self)( 224 | final_costs=copy.deepcopy(self.final_costs), 225 | indices=copy.deepcopy(self.indices), 226 | init_parameters=copy.deepcopy(self.init_parameters), 227 | final_parameters=copy.deepcopy(self.final_parameters), 228 | costs=copy.deepcopy(self.costs), 229 | parameters=copy.deepcopy(self.parameters), 230 | status=copy.deepcopy(self.status), 231 | storage_path=copy.deepcopy(self.storage_path), 232 | file_name=copy.deepcopy(self.file_name), 233 | optimization_stats=copy.deepcopy( 234 | self.optimization_statistics), 235 | append_time_to_path=copy.deepcopy(self.append_time_to_path)) 236 | return cpyobj 237 | 238 | def to_pickle(self, filename=None): 239 | """Dumps the class to pickle. 240 | 241 | Parameters 242 | ---------- 243 | filename : str 244 | Name of the file to which the class is pickled. 245 | 246 | """ 247 | if filename is None: 248 | if self.file_name is not None: 249 | filename = os.path.join(self.storage_path, self.file_name) 250 | else: 251 | filename = self.storage_path 252 | infile = open(filename, 'wb') 253 | pickle.dump(self._to_dict(), infile) 254 | infile.close() 255 | 256 | @classmethod 257 | def from_pickle(cls, filename): 258 | """Read class from pickled file. 259 | 260 | Parameters 261 | ---------- 262 | filename : str 263 | The name of the file which is loaded. 264 | 265 | """ 266 | outfile = open(filename, 'rb') 267 | data_dict = pickle.load(outfile) 268 | outfile.close() 269 | return cls._from_dict(data_dict=data_dict) 270 | 271 | def _to_dict(self): 272 | return dict(final_costs=self.final_costs, 273 | indices=self.indices, 274 | init_parameters=self.init_parameters, 275 | final_parameters=self.final_parameters, 276 | costs=self.costs, 277 | parameters=self.parameters, 278 | status=self.status, 279 | storage_path=self.storage_path, 280 | file_name=self.file_name, 281 | append_time_to_path=self.append_time_to_path, 282 | optimization_stats=self.optimization_statistics) 283 | 284 | @classmethod 285 | def _from_dict(cls, data_dict): 286 | return cls(**data_dict) 287 | -------------------------------------------------------------------------------- /qopt/energy_spectrum.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """This file serves to plot energy spectra of a Hamiltonian. 22 | 23 | The convenience functions implemented in this module can be used to plot the 24 | eigenvalues and eigenvectors of the Hamiltonian as function of a parameter. 25 | This is especially useful to analyse the theoretical properties of a system. 26 | 27 | Functions 28 | --------- 29 | :func:`vector_color_map` 30 | Maps eigenvectors to a coloring. 31 | 32 | :func:`plot_energy_spectrum` 33 | plot the energy spectrum of an Hamiltonian. 34 | 35 | """ 36 | 37 | import matplotlib as mpl 38 | import matplotlib.pyplot as plt 39 | import numpy as np 40 | 41 | from typing import List 42 | 43 | from qopt.matrix import OperatorMatrix 44 | 45 | 46 | def vector_color_map(vectors: np.array): 47 | """ 48 | Maps eigenvectors to a coloring, encoding the contributions. 49 | 50 | Parameters 51 | ---------- 52 | vectors: array 53 | Array of eigenvectors. The eigenvectors are given as columns. There 54 | may be no more than 7. 55 | 56 | Returns 57 | ------- 58 | color_values: array 59 | The coloring is given as array. Each column signifies one tuple of 60 | RGB color values. 61 | 62 | """ 63 | assert len(vectors.shape) == 2 64 | 65 | points_on_cmap = np.linspace(0, 1, vectors.shape[-1], endpoint=True) 66 | # include the endpoint to use the full colormap 67 | basis_colors = mpl.cm.get_cmap('brg')(points_on_cmap) 68 | basis_colors = np.array(basis_colors)[:, :-1] 69 | 70 | values = np.einsum('ij, ni -> nj', basis_colors, np.abs(vectors)) 71 | 72 | # in this basis, the normalization to the rgb range is not preserved, 73 | # thus it needs to be renormalized. 74 | values /= np.max(values, axis=-1)[..., None] 75 | 76 | return values 77 | 78 | 79 | def plot_energy_spectrum(hamiltonian: List[OperatorMatrix], 80 | x_val: np.array, 81 | x_label: str, 82 | ax=None, 83 | use_spectral_decomposition=True, 84 | **scatter_kwargs): 85 | """ 86 | Calculates and plots the energy spectra of hamilton operators. 87 | 88 | The colors demonstrate the contribution of individual base vectors. 89 | 90 | Parameters 91 | ---------- 92 | hamiltonian: list of OperatorMatrix 93 | The Hamiltonians which shall provide the energy spectra. They need to 94 | be hermitian. 95 | 96 | x_val: array of float, shape (n, ) 97 | The x_vales by which the eigenvalues are plotted. 98 | 99 | x_label: str 100 | Label of the x-axis. 101 | 102 | ax: matplotlib pyplot axes 103 | Instance of axes to plot the data in. Defaults to None. 104 | 105 | """ 106 | d = hamiltonian[0].shape[0] 107 | eigenvalues = np.empty((len(hamiltonian), d)) 108 | eigenvectors = np.empty((len(hamiltonian), d, d)) 109 | for i, h in enumerate(hamiltonian): 110 | eig_val, eig_vec = h.spectral_decomposition(hermitian=True) 111 | eigenvalues[i, :] = eig_val 112 | eigenvectors[i, :, :] = np.abs(eig_vec) 113 | 114 | if ax is None: 115 | _, ax = plt.subplots() 116 | if use_spectral_decomposition: 117 | for i in range(d): 118 | ax.scatter(x=x_val, y=eigenvalues[:, i], 119 | c=vector_color_map(eigenvectors[:, :, i]), 120 | **scatter_kwargs) 121 | else: 122 | for i in range(d): 123 | ax.scatter(x=x_val, y=eigenvalues[:, i], 124 | c='k', 125 | **scatter_kwargs) 126 | 127 | ax.set_xlabel(x_label) 128 | return ax 129 | -------------------------------------------------------------------------------- /qopt/examples/rabi_driving/lab_frame_setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file provides the setup for the simulation of a rabi driving experiment. 3 | 4 | author: Julian Teske julian.teske@fz-juelich.de 5 | 6 | """ 7 | from qopt import * 8 | import numpy as np 9 | 10 | __all__ = ['build_lab_solver', 'build_rotational_frame_solver', 11 | 'transform_ket_to_rot_frame', 'transform_prop_to_rot_frame'] 12 | 13 | 14 | def build_lab_solver( 15 | resonance_frequency, 16 | n_time_steps, 17 | delta_t, 18 | exponential_method='spectral' 19 | ): 20 | """ 21 | Creates an instance of SchroedingerSolver for a qubit subjected to Rabi 22 | driving and described in the lab frame. 23 | 24 | Parameters 25 | ---------- 26 | resonance_frequency: float 27 | The qubit's resonance frequency. 28 | 29 | n_time_steps: int 30 | Number of time steps. 31 | 32 | delta_t: float 33 | Length of time steps. 34 | 35 | exponential_method: str in ['spectral', 'Frechet'], optional 36 | Numeric method for the calculation of matrix exponentials, required to 37 | solve Schrodinger's equation. Defaults to 'spectral' because 38 | 'Frechet' might have issues when used in after QuTiP's plot routines. 39 | 40 | Returns 41 | ------- 42 | solver: SchroedingerSolver 43 | The created solver. 44 | 45 | """ 46 | h_ctrl = [.5 * DenseOperator.pauli_x()] 47 | h_drift = [resonance_frequency * .5 * DenseOperator.pauli_z()] 48 | tau = delta_t * np.ones(n_time_steps) 49 | 50 | def periodic_signal(opt_pars): 51 | """ 52 | Periodic driving signal. Mimics e.g. a microwave source. 53 | 54 | Parameters 55 | ---------- 56 | opt_pars: array of float 57 | Optimization parameters. 58 | 59 | Returns 60 | ------- 61 | signal: array, shape (n_time_steps, 1) 62 | 63 | Todo: replace arange by linspace 64 | 65 | """ 66 | amplitude = opt_pars[0] 67 | frequency = opt_pars[1] 68 | phase_shift = opt_pars[2] 69 | pulse = amplitude * np.sin( 70 | frequency * np.arange(n_time_steps) * delta_t + phase_shift) 71 | return np.expand_dims(pulse, axis=1) 72 | 73 | # The derivative is not required, as we will not perform any pulse 74 | # optimization. 75 | def periodic_signal_deriv(_): 76 | raise NotImplementedError 77 | 78 | amplitude_function = CustomAmpFunc( 79 | value_function=periodic_signal, 80 | derivative_function=periodic_signal_deriv 81 | ) 82 | 83 | solver = SchroedingerSolver( 84 | h_drift=h_drift, 85 | h_ctrl=h_ctrl, 86 | tau=tau, 87 | amplitude_function=amplitude_function, 88 | exponential_method=exponential_method 89 | ) 90 | return solver 91 | 92 | 93 | def build_rotational_frame_solver( 94 | frequency_detuning, 95 | n_time_steps, 96 | delta_t, 97 | exponential_method='spectral' 98 | ): 99 | """ 100 | Creates an instance of SchroedingerSolver for a qubit subjected to Rabi 101 | driving and described in the rotational frame. 102 | 103 | Parameters 104 | ---------- 105 | frequency_detuning: float 106 | Detuning between the driving and the resonance frequency. 107 | 108 | n_time_steps: int 109 | Number of time steps. 110 | 111 | delta_t: float 112 | Length of time steps. 113 | 114 | exponential_method: str in ['spectral', 'Frechet'], optional 115 | Numeric method for the calculation of matrix exponentials, required to 116 | solve Schrodinger's equation. Defaults to 'spectral' because 117 | 'Frechet' might have issues when used in after QuTiP's plot routines. 118 | 119 | Returns 120 | ------- 121 | solver: SchroedingerSolver 122 | The created solver. 123 | 124 | """ 125 | h_ctrl = [.5 * DenseOperator.pauli_x(), .5 * DenseOperator.pauli_y()] 126 | h_drift = [frequency_detuning * .5 * DenseOperator.pauli_z()] 127 | tau = delta_t * np.ones(n_time_steps) 128 | 129 | solver = SchroedingerSolver( 130 | h_drift=h_drift, 131 | h_ctrl=h_ctrl, 132 | tau=tau, 133 | exponential_method=exponential_method 134 | ) 135 | return solver 136 | 137 | 138 | def transform_prop_to_rot_frame( 139 | propagator: DenseOperator, 140 | time_end: float, 141 | time_start: float, 142 | resonance_frequency: float 143 | ): 144 | """ 145 | Transforms a propagator which propagates state vectors from time t_start 146 | to time t_end. 147 | 148 | Parameters 149 | ---------- 150 | propagator: DenseOperator 151 | The propagator from time_start to time_end in the lab frame. 152 | 153 | time_end: float 154 | Final time of the propagator. 155 | 156 | time_start: float 157 | Initial time of the propagator. 158 | 159 | resonance_frequency: float 160 | The qubit's resonance frequency. 161 | 162 | Returns 163 | ------- 164 | returned_prop: DenseOperator 165 | The transformed propagator in the rotational frame. 166 | 167 | """ 168 | u_end = (-.5 * DenseOperator.pauli_z()).exp( 169 | 1j * time_end * resonance_frequency) 170 | u_start = (-.5 * DenseOperator.pauli_z()).exp( 171 | 1j * time_start * resonance_frequency) 172 | return u_end.dag() * propagator * u_start 173 | 174 | 175 | def transform_ket_to_rot_frame( 176 | vector: DenseOperator, 177 | time, 178 | resonance_frequency 179 | ): 180 | """ 181 | Transforms a state vector into the rotational frame at time. 182 | 183 | Parameters 184 | ---------- 185 | vector: DenseOperator 186 | State vector in the lab frame. 187 | 188 | time: float 189 | Time of transformation. 190 | 191 | resonance_frequency: float 192 | The qubit's resonance frequency. 193 | 194 | Returns 195 | ------- 196 | transformed_vector: DenseOperator 197 | Transformed vector in the rotational frame. 198 | 199 | """ 200 | u = (.5 * DenseOperator.pauli_z()).exp(1j * time * resonance_frequency) 201 | return u.dag() * vector 202 | -------------------------------------------------------------------------------- /qopt/examples/rabi_driving/rabi_xy_setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains the physical constants, operators and convenience functions 3 | for the simulation of rabi drive. 4 | 5 | """ 6 | 7 | import numpy as np 8 | from qopt import * 9 | 10 | 11 | # ##################### 1. Implementation Choices ############################# 12 | 13 | # dense control matrices are faster in low dimensions than sparse matrices 14 | control_matrix = DenseOperator 15 | # We use the scipy linalg expm_frechet implementation to calculate matrix 16 | # exponentials 17 | exponential_method = 'Frechet' 18 | 19 | # ##################### 2. Constants ########################################## 20 | 21 | 22 | # The time steps could be chosen more densely giving more flexibility at the 23 | # cost of computational time 24 | n_time_samples = 20 25 | total_time = 4 26 | time_step = total_time / n_time_samples 27 | rabi_frequency_max = 2 * np.pi 28 | 29 | oversampling = 5 30 | awg_rise_time = time_step * .2 31 | lin_freq_rel = 1. 32 | sigma_eps = .02 * lin_freq_rel 33 | 34 | # ##################### 3. Operators ########################################## 35 | 36 | sigma_0 = control_matrix.pauli_0() 37 | sigma_x = control_matrix.pauli_x() 38 | sigma_y = control_matrix.pauli_y() 39 | sigma_z = control_matrix.pauli_z() 40 | 41 | # we implement the control on the x and y axis 42 | h_ctrl = [.5 * sigma_x, .5 * sigma_y] 43 | # We could ad a drift hamiltonian by detuning from the resonance frequency. 44 | h_drift = 0 * .5 * sigma_z 45 | 46 | h_noise = .5 * sigma_z 47 | 48 | x_half = sigma_x.exp(tau=np.pi * .25j) 49 | y_half = sigma_y.exp(tau=np.pi * .25j) 50 | 51 | # ##################### 4. Transfer Function ################################## 52 | 53 | # 4.1: Exponential Transfer Function 54 | # This transfer function assumes an exponential saturation of voltages. 55 | transfer_function = ExponentialMTF( 56 | awg_rise_time=awg_rise_time, 57 | oversampling=oversampling, 58 | num_ctrls=2 59 | ) 60 | 61 | # 4.2: Identity 62 | # No transfer function. Here we assume ideal control electronics. 63 | # transfer_function = OversamplingTF(oversampling=oversampling, num_ctrls=2) 64 | 65 | # ##################### 5. Amplitude Function ################################# 66 | 67 | # 5.1: x,y control 68 | # lin_amp_func = UnaryAnalyticAmpFunc( 69 | # value_function=lambda x: lin_freq_rel * x, 70 | # derivative_function=lambda x: lin_freq_rel) 71 | # lambda functions cannot be pickled and subsequently used for multiprocessing. 72 | 73 | 74 | def value_function(x): 75 | return lin_freq_rel * x 76 | 77 | 78 | def derivative_function(x): 79 | return lin_freq_rel 80 | 81 | 82 | lin_amp_func = UnaryAnalyticAmpFunc( 83 | value_function=value_function, 84 | derivative_function=derivative_function) 85 | 86 | 87 | # ##################### 6. Noise Trace Generator ############################## 88 | 89 | # 6.1 1/f noise spectrum 90 | # until Tom provides the exact numbers I will go with 91 | S_01 = 3e8 92 | S_02 = 3e4 93 | # S(f) = S_01 / f + S_02 / f^2 94 | 95 | f_qs_min = 1 / 10 / 60 # 1 over 10 minutes 96 | f_qs_max = 1 / (n_time_samples * time_step) 97 | 98 | variance_f = S_01 * (np.log(f_qs_max) - np.log(f_qs_min)) \ 99 | - S_02 * (1 / f_qs_max - 1 / f_qs_min) 100 | sigma_f = np.sqrt(variance_f) 101 | 102 | 103 | def toms_spectral_noise_density(f, white_floor=1e6): 104 | """ For fast noise only. I. e. without the 1/f^2 terms. """ 105 | if not isinstance(f, np.ndarray): 106 | f = np.asarray(f) 107 | is_white = (f > white_floor).astype(int) 108 | noise_density = S_01 / f * (1 - is_white) 109 | noise_density += (S_01 / white_floor) * is_white 110 | return noise_density 111 | 112 | 113 | # The noise trace generator explicitly simulates noise realizations 114 | ntg_one_over_f_noise = NTGColoredNoise( 115 | noise_spectral_density=toms_spectral_noise_density, 116 | dt=(time_step / oversampling), 117 | n_samples_per_trace=n_time_samples * oversampling, 118 | n_traces=1000, 119 | n_noise_operators=1, 120 | always_redraw_samples=True 121 | ) 122 | 123 | 124 | # 6.2 quasi static contribution 125 | # for the remaining quasi static noise contribution, we integrate the spectral 126 | # density from 10^-3 Hz to 1 / (time_step / oversampling) 127 | ntg_quasi_static = NTGQuasiStatic( 128 | # standard_deviation=[sigma_f, ], 129 | standard_deviation=[sigma_eps], 130 | n_samples_per_trace=n_time_samples * oversampling, 131 | n_traces=8, 132 | always_redraw_samples=False, 133 | sampling_mode='uncorrelated_deterministic') 134 | 135 | # ##################### 7. Time Slot Computer ################################# 136 | # The time slot computer calculates the evolution of the qubit taking into 137 | # account the amplitude and transfer function and also the noise traces if 138 | # required. 139 | 140 | # 7.1 xy-control 141 | solver_unperturbed_xy = SchroedingerSolver( 142 | h_drift=[0 * h_drift, ], 143 | h_ctrl=h_ctrl, 144 | initial_state=DenseOperator(np.eye(2)), 145 | tau=time_step * np.ones(n_time_samples), 146 | is_skew_hermitian=True, 147 | exponential_method=exponential_method, 148 | transfer_function=transfer_function, 149 | amplitude_function=lin_amp_func 150 | ) 151 | 152 | solver_qs_noise_xy = SchroedingerSMonteCarlo( 153 | h_drift=[0 * h_drift, ], 154 | h_ctrl=h_ctrl, 155 | h_noise=[h_noise, ], 156 | noise_trace_generator=ntg_quasi_static, 157 | initial_state=DenseOperator(np.eye(2)), 158 | tau=time_step * np.ones(n_time_samples), 159 | is_skew_hermitian=True, 160 | exponential_method=exponential_method, 161 | transfer_function=transfer_function, 162 | amplitude_function=lin_amp_func 163 | ) 164 | 165 | 166 | solver_colored_noise_xy = SchroedingerSMonteCarlo( 167 | h_drift=[0 * h_drift, ], 168 | h_ctrl=h_ctrl, 169 | h_noise=[h_noise, ], 170 | noise_trace_generator=ntg_one_over_f_noise, 171 | initial_state=DenseOperator(np.eye(2)), 172 | tau=time_step * np.ones(n_time_samples), 173 | is_skew_hermitian=True, 174 | exponential_method=exponential_method, 175 | transfer_function=transfer_function, 176 | amplitude_function=lin_amp_func 177 | ) 178 | 179 | 180 | # ##################### 8. Cost Function ###################################### 181 | # The cost functions calculate the infidelities and are minimized by the 182 | # optimiser. 183 | 184 | # 8.1 xy-control 185 | entanglement_infid_xy = OperationInfidelity( 186 | solver=solver_qs_noise_xy, 187 | target=x_half, 188 | fidelity_measure='entanglement', 189 | label=['Entanglement Fidelity XY-Control'] 190 | ) 191 | 192 | entanglement_infid_qs_noise_xy = OperationNoiseInfidelity( 193 | solver=solver_qs_noise_xy, 194 | target=y_half, 195 | fidelity_measure='entanglement', 196 | label=['Entanglement Fidelity QS-Noise XY-Control'], 197 | neglect_systematic_errors=True 198 | ) 199 | 200 | entanglement_infid_colored_noise_xy = OperationNoiseInfidelity( 201 | solver=solver_colored_noise_xy, 202 | target=x_half, 203 | fidelity_measure='entanglement', 204 | label=['Entanglement Fidelity 1-over-f-Noise XY-Control'], 205 | neglect_systematic_errors=True 206 | ) 207 | 208 | 209 | # ##################### 8. Convenience Functions ############################## 210 | amp_bound = rabi_frequency_max * 2 * np.pi / lin_freq_rel 211 | bounds_xy = [[0, amp_bound]] * (n_time_samples * len(h_ctrl)) 212 | bounds_xy_least_sq = [np.zeros((n_time_samples * len(h_ctrl))), 213 | amp_bound * np.ones((n_time_samples * len(h_ctrl)))] 214 | 215 | 216 | def random_xy_init_pulse(seed=None): 217 | if seed: 218 | np.random.seed(seed) 219 | return np.random.rand(n_time_samples, len(h_ctrl)) * amp_bound 220 | 221 | # ### for multiprocessing test 222 | 223 | qs_solver = solver_qs_noise_xy 224 | fast_mc_solver = solver_colored_noise_xy 225 | syst_infid = entanglement_infid_xy 226 | qs_infid = entanglement_infid_qs_noise_xy 227 | fast_infid = entanglement_infid_colored_noise_xy 228 | 229 | 230 | def simulate_propagation(initial_pulse): 231 | simulator = Simulator( 232 | solvers=[qs_solver, fast_mc_solver], 233 | cost_funcs=[syst_infid, qs_infid, fast_infid] 234 | ) 235 | infid = simulator.wrapped_cost_functions(pulse=initial_pulse) 236 | return infid 237 | 238 | 239 | simulator = Simulator( 240 | solvers=[qs_solver], 241 | cost_funcs=[syst_infid, qs_infid,] 242 | ) 243 | 244 | optimizer = LeastSquaresOptimizer( 245 | system_simulator=simulator, 246 | bounds=bounds_xy_least_sq, 247 | cost_func_weights=[1, 1e2] 248 | ) 249 | 250 | -------------------------------------------------------------------------------- /qopt/optimization_data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """This module stores information about the optimization and its result. 22 | 23 | The `OptimizationResult` is generated with the final properties and initial 24 | optimization parameter values of each optimization run. The 25 | `OptimizationSummary` is only created when requested and stores the properties 26 | of each step in the optimization algorithm. This information is valuable for 27 | the choice of the best optimization algorithm. 28 | 29 | Classes 30 | ------- 31 | :class:`OptimizationResult` 32 | Describes the information gained by an optimization run. 33 | 34 | :class:`OptimizationSummary` 35 | Describes the whole information gained during an optimization run. 36 | 37 | Notes 38 | ----- 39 | The implementation was inspired by the optimal control package of QuTiP [1]_ 40 | (Quantum Toolbox in Python) 41 | 42 | References 43 | ---------- 44 | .. [1] J. R. Johansson, P. D. Nation, and F. Nori: "QuTiP 2: A Python framework 45 | for the dynamics of open quantum systems.", Comp. Phys. Comm. 184, 1234 46 | (2013) [DOI: 10.1016/j.cpc.2012.11.019]. 47 | 48 | """ 49 | 50 | from typing import Dict, List 51 | 52 | 53 | class OptimizationResult(object): 54 | """ 55 | Resulting data of the optimization. 56 | 57 | An instance of this class is returned by the `Optimizer` after the 58 | optimization has terminated. It holds the results of the optimization and 59 | can also contain an instance of `OptimizationSummary` to describe the 60 | optimization run itself, for example its convergence. 61 | The parameters of the initialization method are all optional. This class is 62 | intended to be initialized empty or loaded from a dictionary by the class 63 | method :meth:`from_dict`. 64 | 65 | Attributes 66 | ---------- 67 | termination_reason : string 68 | Reason for the termination as string. 69 | 70 | status : None or int 71 | The termination_reason as integer. Like in scipy.OptimizeResult 72 | None if the optimization has not started. 73 | -1: improper input parameters status 74 | 0: the maximum number of function evaluations is exceeded. 75 | 1: gradient norm termination condition is satisfied. 76 | 2: cost function termination condition is satisfied. 77 | 3: minimal step size termination condition is satisfied. 78 | 4: Both 2 and 3 termination conditions are satisfied. 79 | 80 | final_cost : float 81 | Value of the cost functions after the optimization. 82 | 83 | final_grad_norm : float 84 | Norm of the gradient after the optimization. 85 | 86 | num_iter : integer 87 | Number of iterations in the optimization algorithm. 88 | 89 | init_parameters : array, shape: (n_t, n_par) 90 | The amplitudes at the start of the optimisation, where n_t is 91 | the number of time steps simulated and n_par the number of 92 | optimization parameters. 93 | 94 | final_parameters : array, shape: (n_t, n_par) 95 | The optimization parameters at the end of the optimisation, where n_t 96 | is the number of time steps simulated and n_par the number of 97 | optimization parameters. 98 | 99 | optimizer : `Optimizer` 100 | Instance of the `Optimizer` used to generate the result 101 | 102 | optim_summary : `OptimizationSummary` 103 | None if no intermediary results are saved. Otherwise the infidelity 104 | during the optimization. 105 | 106 | """ 107 | 108 | def __init__(self, 109 | final_cost=None, 110 | indices=None, 111 | final_parameters=None, 112 | final_grad_norm=None, 113 | init_parameters=None, 114 | num_iter=None, 115 | termination_reason="not started yet", 116 | status=None, 117 | optimization_stats=None, 118 | optimizer=None, 119 | optim_summary=None): 120 | self.final_cost = final_cost 121 | self.indices = indices 122 | self.final_parameters = final_parameters 123 | self.final_grad_norm = final_grad_norm 124 | 125 | self.init_parameters = init_parameters 126 | 127 | self.num_iter = num_iter 128 | self.termination_reason = termination_reason 129 | self.status = status 130 | 131 | self.optimizer = optimizer 132 | self.optimization_stats = optimization_stats 133 | self.optim_summary = optim_summary 134 | 135 | def to_dict(self): 136 | """Writes the information held by this instance to a dictionary. 137 | 138 | Returns 139 | ------- 140 | dictionary: dict 141 | The information stored in a class instance as dictionary. 142 | 143 | """ 144 | return {'final_cost': self.final_cost, 145 | 'indices': self.indices, 146 | 'final_parameters': self.final_parameters, 147 | 'final_grad_norm': self.final_grad_norm, 148 | 'init_parameters': self.init_parameters, 149 | 'num_iter': self.num_iter, 150 | 'termination_reason': self.termination_reason, 151 | 'optimizer': self.optimizer, 152 | 'optimization_stats': self.optimization_stats, 153 | 'optim_summary': self.optim_summary 154 | } 155 | 156 | @classmethod 157 | def from_dict(cls, data_dict: Dict): 158 | """Initialize the class with the information held in a dictionary. 159 | 160 | Parameters 161 | ---------- 162 | data_dict: dict 163 | Class information. 164 | 165 | Returns 166 | ------- 167 | optim_result: OptimizationResult 168 | Class instance. 169 | 170 | """ 171 | return cls(**data_dict) 172 | 173 | 174 | class OptimizationSummary(object): 175 | """A summary of an optimization run. 176 | 177 | This class saves the state of the optimization for each iteration. All 178 | parameters for the initialization are optimal. The class is intended to be 179 | either initialized empty. 180 | 181 | Attributes 182 | ---------- 183 | iter_num : int 184 | Number of iterations stored. Serves as checksum to verify that full 185 | data has been stored. 186 | 187 | costs : List[float] 188 | Evaluation results of the cost functions. The dictionary is sorted by 189 | cost function indices. The lists hold one entry for each evaluation. 190 | 191 | indices : List[str] 192 | The indices of the cost functions. 193 | 194 | gradients : List[array] 195 | Gradients of the cost functions. The dictionary is again sorted by cost 196 | function indices and the lists hold one entry per evaluation. 197 | 198 | parameters : List[array] 199 | Optimization parameters during the optimization. 200 | 201 | """ 202 | 203 | def __init__(self, indices=None, iter_num=0, costs=None, gradients=None, 204 | parameters=None): 205 | self.indices = indices 206 | self.iter_num = iter_num 207 | if costs is None: 208 | self.costs = [] 209 | else: 210 | self.costs = costs 211 | if gradients is None: 212 | self.gradients = [] 213 | else: 214 | self.gradients = gradients 215 | if parameters is None: 216 | self.parameters = [] 217 | else: 218 | self.parameters = parameters 219 | -------------------------------------------------------------------------------- /qopt/parallel.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """ 22 | This module contains functions for the support of multiprocessing. 23 | 24 | The function `run_optimization_parallel` can be used to perform the 25 | optimization for multiple initial conditions in parallel. 26 | 27 | Caution! The solver class `SchroedingerSMonteCarlo` offers a functionality for 28 | the parallel execution of the simulation vor various noise samples. These 29 | features are not compatible. The program can only be parallelized once. 30 | 31 | Functions 32 | --------- 33 | :func:`run_optimization` 34 | Executes the run_optimization method of an optimizer. 35 | 36 | :func:`run_optimization_parallel` 37 | Parallel execution of the run_optimization Method of the 38 | Optimizer. 39 | 40 | Notes 41 | ----- 42 | The implementation was inspired by the optimal control package of QuTiP [1]_ 43 | (Quantum Toolbox in Python) 44 | 45 | References 46 | ---------- 47 | .. [1] J. R. Johansson, P. D. Nation, and F. Nori: "QuTiP 2: A Python framework 48 | for the dynamics of open quantum systems.", Comp. Phys. Comm. 184, 1234 49 | (2013) [DOI: 10.1016/j.cpc.2012.11.019]. 50 | 51 | """ 52 | 53 | from multiprocessing import Pool 54 | import copy 55 | 56 | from qopt.data_container import DataContainer 57 | 58 | 59 | def run_optimization(optimizer, initial_pulse): 60 | """ Executes the run_optimization method of an optimizer. 61 | 62 | Parameters 63 | ---------- 64 | optimizer: Optimizer 65 | The Optimizer. 66 | 67 | initial_pulse: numpy array, shape (num_t, num_ctrl) 68 | The initial pulse. 69 | 70 | Returns 71 | ------- 72 | result: OptimizationResult 73 | The result of the optimization. 74 | 75 | """ 76 | return optimizer.run_optimization(initial_pulse) 77 | 78 | 79 | def run_optimization_parallel(optimizer, initial_pulses, processes=None): 80 | """ Parallel execution of the run_optimization Method of the 81 | Optimizer. 82 | 83 | Parameters 84 | ---------- 85 | optimizer: Optimizer 86 | The Optimizer. 87 | 88 | initial_pulses: numpy array, shape (num_init, num_t, num_ctrl) 89 | The initial pulse. Where num_init is the number of initial pulses. 90 | 91 | processes: int, optional 92 | If an integer is given, then the propagation is calculated in 93 | this number of parallel processes. If 1 then no parallel 94 | computing is applied. If None then cpu_count() is called to use 95 | all cores available. Defaults to None. 96 | 97 | Returns 98 | ------- 99 | data: DataContainer 100 | A DataContainer in which the OptimizationResults are saved. 101 | 102 | """ 103 | optimizers = [copy.deepcopy(optimizer) for _ in initial_pulses] 104 | with Pool(processes=processes) as pool: 105 | results = pool.starmap( 106 | run_optimization, zip(optimizers, initial_pulses)) 107 | data = DataContainer() 108 | for result in results: 109 | data.append_optim_result(result) 110 | return data 111 | -------------------------------------------------------------------------------- /qopt/performance_statistics.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """Statistics of the use of computational resources. 22 | 23 | The class `PerformanceStatistics` gathers information about the wall time 24 | spend for the calculation of each cost function and its gradient. It can be 25 | used to evaluate the consumption of computational resources. 26 | 27 | Notes 28 | ----- 29 | The implementation was inspired by the optimal control package of QuTiP [1]_ 30 | (Quantum Toolbox in Python) 31 | 32 | References 33 | ---------- 34 | .. [1] J. R. Johansson, P. D. Nation, and F. Nori: "QuTiP 2: A Python framework 35 | for the dynamics of open quantum systems.", Comp. Phys. Comm. 184, 1234 36 | (2013) [DOI: 10.1016/j.cpc.2012.11.019]. 37 | 38 | """ 39 | 40 | 41 | class PerformanceStatistics(object): 42 | """Stores performance statistics. 43 | 44 | Attributes 45 | ---------- 46 | start_t_opt: float or None 47 | Time of the optimizations start. None if it has not been set yet. 48 | 49 | end_t_opt: float or None 50 | Time of the optimizations end. None if it has not been set yet. 51 | 52 | indices : List[str] 53 | The indices of the cost functions. 54 | 55 | cost_func_eval_times: list of float 56 | List of durations of the evaluation of the cost functions. 57 | 58 | grad_func_eval_times: list of float 59 | List of durations of the evaluation of the gradients. 60 | 61 | """ 62 | def __init__(self): 63 | self.start_t_opt = None 64 | self.end_t_opt = None 65 | self.indices = None 66 | self.cost_func_eval_times = [] 67 | self.grad_func_eval_times = [] 68 | -------------------------------------------------------------------------------- /qopt/plotting.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """ Plotting functions. 22 | 23 | The function `plot_bloch_vector_evolution` can be used to plot the evolution 24 | under a series of propagators on the bloch sphere. It uses QuTiP and is only 25 | available if QuTiP is installed in the environment. (See installation 26 | instructions on https://github.com/qutech/qopt) 27 | 28 | Functions 29 | --------- 30 | :func:`plot_bloch_vector_evolution` 31 | Plots the evolution of the forward propagators of the initial state on the 32 | bloch sphere. 33 | 34 | Notes 35 | ----- 36 | The implementation was adapted from the filter_functions package. 37 | 38 | """ 39 | 40 | import numpy as np 41 | import matplotlib.pyplot as plt 42 | from unittest import mock 43 | from warnings import warn 44 | from typing import Sequence 45 | 46 | from qopt.matrix import OperatorMatrix 47 | 48 | __all__ = [] 49 | 50 | try: 51 | import qutip as qt 52 | __all__.append('plot_bloch_vector_evolution') 53 | except ImportError: 54 | warn('Qutip not installed. plot_bloch_vector_evolution() is not available') 55 | qt = mock.Mock() 56 | 57 | 58 | def plot_bloch_vector_evolution( 59 | forward_propagators: Sequence[OperatorMatrix], 60 | initial_state: OperatorMatrix, 61 | return_bloch: bool = False, 62 | **bloch_kwargs): 63 | """ 64 | Plots the evolution of the forward propagators of the initial state on the 65 | bloch sphere. 66 | 67 | Parameters 68 | ---------- 69 | forward_propagators: list of DenseOperators 70 | The forward propagators whose evolution shall be plotted on the Bloch 71 | sphere. 72 | 73 | initial_state: DenseOperator 74 | The initial state aka. beginning point of the plotting. 75 | 76 | return_bloch: bool, optional 77 | If True, the Bloch sphere is returned as object. 78 | 79 | bloch_kwargs: dict, optional 80 | Plotting parameters for the Bloch sphere. 81 | 82 | Returns 83 | ------- 84 | bloch_sphere: 85 | Only returned if return_bloch is set to true. 86 | 87 | """ 88 | try: 89 | import qutip as qt 90 | except ImportError as err: 91 | raise RuntimeError( 92 | 'Requirements not fulfilled. Please install Qutip') from err 93 | 94 | if not forward_propagators[0].shape[0] == 2: 95 | raise ValueError('Plotting Bloch sphere evolution only implemented ' 96 | 'for one-qubit case!') 97 | 98 | figsize = bloch_kwargs.pop('figsize', [5, 5]) 99 | view = bloch_kwargs.pop('view', [-60, 30]) 100 | fig = plt.figure(figsize=figsize) 101 | axes = fig.add_subplot(projection='3d', azim=view[0], elev=view[1]) 102 | bloch_kwargs.setdefault('view', [-150, 30]) 103 | b = qt.Bloch(fig=fig, axes=axes, **bloch_kwargs) 104 | 105 | # https://github.com/qutip/qutip/issues/1385 106 | if hasattr(b.axes, 'set_box_aspect'): 107 | b.axes.set_box_aspect([1, 1, 1]) 108 | 109 | b.xlabel = [r'$|+\rangle$', ''] 110 | b.ylabel = [r'$|+_i\rangle$', ''] 111 | 112 | states = [ 113 | qt.Qobj((prop * initial_state).data) for prop in forward_propagators 114 | ] 115 | a = np.empty((3, len(states))) 116 | x, y, z = qt.sigmax(), qt.sigmay(), qt.sigmaz() 117 | for i, state in enumerate(states): 118 | a[:, i] = [qt.expect(x, state), 119 | qt.expect(y, state), 120 | qt.expect(z, state)] 121 | b.add_points(a.real, meth='l') 122 | b.make_sphere() 123 | 124 | if return_bloch: 125 | return b 126 | -------------------------------------------------------------------------------- /qopt/util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ============================================================================= 3 | # qopt 4 | # Copyright (C) 2020 Julian Teske, Forschungszentrum Juelich 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program. If not, see . 18 | # 19 | # Contact email: j.teske@fz-juelich.de 20 | # ============================================================================= 21 | """ 22 | Utility functions for the optimal control package. 23 | 24 | Functions 25 | --------- 26 | :func:`deprecated` decorator 27 | Marks functions and methods which are deprecated. 28 | 29 | :func:`needs_refactoring` decorator 30 | Marks objects which need refactoring. 31 | 32 | :func:`timeit` decorator 33 | Measures the run time of a function evaluation. 34 | 35 | :func:`closest_unitary` 36 | Calculates the closest unitary matrix to a square matrix. 37 | 38 | Notes 39 | ----- 40 | The implementation was inspired by the optimal control package of QuTiP [1]_ 41 | (Quantum Toolbox in Python) 42 | 43 | References 44 | ---------- 45 | .. [1] J. R. Johansson, P. D. Nation, and F. Nori: "QuTiP 2: A Python framework 46 | for the dynamics of open quantum systems.", Comp. Phys. Comm. 184, 1234 47 | (2013) [DOI: 10.1016/j.cpc.2012.11.019]. 48 | 49 | """ 50 | 51 | import warnings 52 | import functools 53 | import time 54 | import scipy 55 | import numpy as np 56 | 57 | 58 | def deprecated(func): 59 | """This is a decorator which can be used to mark functions 60 | as deprecated. It will result in a warning being emitted 61 | when the function is used.""" 62 | @functools.wraps(func) 63 | def new_func(*args, **kwargs): 64 | warnings.simplefilter('always', DeprecationWarning) # turn off filter 65 | warnings.warn("Call to deprecated function {}.".format(func.__name__), 66 | category=DeprecationWarning, 67 | stacklevel=2) 68 | warnings.simplefilter('default', DeprecationWarning) # reset filter 69 | return func(*args, **kwargs) 70 | return new_func 71 | 72 | 73 | def needs_refactoring(func): 74 | """This is a decorator which can be used to mark functions 75 | which need to be refactored. It will result in a warning being emitted 76 | when the function is used.""" 77 | @functools.wraps(func) 78 | def new_func(*args, **kwargs): 79 | warnings.simplefilter('always', DeprecationWarning) # turn off filter 80 | warnings.warn("Call to function {} which needs refactoring.".format( 81 | func.__name__), 82 | category=DeprecationWarning, 83 | stacklevel=2) 84 | warnings.simplefilter('default', DeprecationWarning) # reset filter 85 | return func(*args, **kwargs) 86 | return new_func 87 | 88 | 89 | def timeit(function): 90 | """Convenience function to measure the run time of a function. 91 | 92 | This function can be applied as decorator to get a function that evaluates 93 | the input function an measures the run time. 94 | 95 | Parameters 96 | ---------- 97 | function: Callable 98 | The function of which the run time is measured. 99 | 100 | Returns 101 | ------- 102 | timed: Callable 103 | Timed function. 104 | 105 | """ 106 | def timed(*args, **kw): 107 | ts = time.time() 108 | result = function(*args, **kw) 109 | te = time.time() 110 | return result, (te - ts) 111 | return timed 112 | 113 | 114 | def closest_unitary(A): 115 | """ Closest unitary to given square matrix. 116 | 117 | Calculate the unitary matrix U that is closest with respect to the 118 | operator norm distance to the general matrix A. 119 | 120 | Returns 121 | ------- 122 | U: np.array 123 | Closest unitary. 124 | 125 | """ 126 | V, __, Wh = scipy.linalg.svd(A) 127 | U = np.matrix(V.dot(Wh)) 128 | return U 129 | -------------------------------------------------------------------------------- /qopt_tests/integration_tests/test_t1_comparison.py: -------------------------------------------------------------------------------- 1 | """ 2 | This Test will compute a single qubit with T1 noise. 3 | 4 | Step 1: We compare lindlblad master equation vs Monte 5 | Carlo simulation vs filter functions. 6 | 7 | Step 2: Compare the gradients to finite differences. 8 | 9 | """ 10 | 11 | 12 | from qopt import * 13 | import numpy as np 14 | import unittest 15 | 16 | 17 | np.random.seed(0) 18 | total_time = 2 19 | noise_variance = 1e-3 20 | n_time_steps = 3 21 | fid_ctrl_amps = np.expand_dims(np.zeros(n_time_steps), 1) 22 | 23 | # we need to change the pulse, otherwise some analytic gradients 24 | # become 0 and we just get numerical errors. 25 | gradient_pulse = fid_ctrl_amps + 1 26 | 27 | bz_rotation = np.pi 28 | target = (.5 * DenseOperator.pauli_x()).exp(1j * bz_rotation) 29 | 30 | delta_bz = bz_rotation / total_time 31 | delta_t = total_time / n_time_steps 32 | 33 | h_drift = [delta_bz * .5 * DenseOperator.pauli_x()] 34 | h_ctrl = [.5 * DenseOperator.pauli_z()] 35 | time_steps = delta_t * np.ones(n_time_steps) 36 | 37 | 38 | def create_lindblad_simulator(): 39 | 40 | def prefactor_function(transferred_parameters, _): 41 | return noise_variance * np.ones_like(transferred_parameters) 42 | 43 | lindblad_solver = LindbladSolver( 44 | h_drift=h_drift, 45 | h_ctrl=h_ctrl, 46 | tau=time_steps, 47 | prefactor_function=prefactor_function, 48 | lindblad_operators=[.5 * DenseOperator.pauli_z()] 49 | ) 50 | 51 | lindblad_cost_fkt = OperationInfidelity( 52 | solver=lindblad_solver, 53 | super_operator_formalism=True, 54 | target=target 55 | ) 56 | lindblad_simulator = Simulator( 57 | solvers=[lindblad_solver, ], 58 | cost_funcs=[lindblad_cost_fkt, ] 59 | ) 60 | return lindblad_simulator 61 | 62 | 63 | def create_mc_simulator(low_freq_ex): 64 | 65 | def noise_spectral_density(f): 66 | return 2 * noise_variance * np.ones_like(f) # factor of 2 for 67 | # one-sided spectrum 68 | 69 | noise_trace_generator = NTGColoredNoise( 70 | n_samples_per_trace=n_time_steps, 71 | dt=delta_t, 72 | noise_spectral_density=noise_spectral_density, 73 | n_traces=3000, 74 | low_frequency_extension_ratio=low_freq_ex 75 | ) 76 | # The low frequency extension leads to the sampling of a noise trace 77 | # for more time steps. The simulation then uses only a section of this 78 | # longer trace. The extension allows the noise trace generator to include 79 | # low frequency noise. 80 | 81 | monte_carlo_solver = SchroedingerSMonteCarlo( 82 | h_drift=h_drift, 83 | h_ctrl=h_ctrl, 84 | tau=time_steps, 85 | h_noise=[.5 * DenseOperator.pauli_z()], 86 | noise_trace_generator=noise_trace_generator 87 | ) 88 | 89 | mc_cost_fkt = OperationNoiseInfidelity( 90 | solver=monte_carlo_solver, 91 | target=target, 92 | neglect_systematic_errors=False 93 | ) 94 | 95 | monte_carlo_simulator = Simulator( 96 | solvers=[monte_carlo_solver, ], 97 | cost_funcs=[mc_cost_fkt, ] 98 | ) 99 | 100 | return monte_carlo_simulator 101 | 102 | 103 | def create_ff_simulator(low_freq_rel, ff_n_time_steps): 104 | 105 | ff_hamiltonian_noise = [[ 106 | .5 * DenseOperator.pauli_z().data, 107 | np.ones(ff_n_time_steps), 108 | 'Noise 1' 109 | ], ] 110 | 111 | ff_solver = SchroedingerSolver( 112 | h_drift=h_drift, 113 | h_ctrl=h_ctrl, 114 | tau=(total_time / ff_n_time_steps) * np.ones(ff_n_time_steps), 115 | filter_function_h_n=ff_hamiltonian_noise 116 | ) 117 | 118 | def noise_spectral_density(f): 119 | return 2 * noise_variance * np.ones_like(f) # factor of 2 for 120 | # one-sided spectrum 121 | 122 | ff_cost_fkn = OperatorFilterFunctionInfidelity( 123 | solver=ff_solver, 124 | noise_power_spec_density=noise_spectral_density, 125 | omega=(1 / (total_time * low_freq_rel)) * 126 | (np.arange(ff_n_time_steps * low_freq_rel) + 1) 127 | ) 128 | 129 | ff_simulator = Simulator( 130 | solvers=[ff_solver, ], 131 | cost_funcs=[ff_cost_fkn, ] 132 | ) 133 | 134 | return ff_simulator 135 | 136 | 137 | class TestT1Decay(unittest.TestCase): 138 | 139 | def test_compare_infidelitites_and_gradient_accuracy(self): 140 | np.random.seed(0) 141 | ff_n_time_steps = 20 142 | simulators = [ 143 | create_lindblad_simulator(), 144 | create_mc_simulator(low_freq_ex=20), 145 | create_ff_simulator( 146 | low_freq_rel=100, ff_n_time_steps=ff_n_time_steps) 147 | ] 148 | 149 | infidelities = [] 150 | gradient_accuracy = [] 151 | 152 | ff_fid_ctrl_amps = np.expand_dims(np.zeros(ff_n_time_steps), 1) 153 | 154 | for sim, pulse, delta in zip( 155 | simulators, 156 | [fid_ctrl_amps, fid_ctrl_amps, ff_fid_ctrl_amps], 157 | [1e-8, 1e-1, 1e-8] 158 | ): 159 | infidelities.append(sim.wrapped_cost_functions(pulse)) 160 | gradient_accuracy.append( 161 | sim.compare_numeric_to_analytic_gradient(pulse + 1, 162 | delta_eps=delta) 163 | ) 164 | 165 | for k in range(3): 166 | # assert all simulations yield the same result 167 | rel_infidelity_variation = \ 168 | .5 * (infidelities[0] - infidelities[k]) \ 169 | / (infidelities[0] + infidelities[k]) 170 | self.assertTrue(rel_infidelity_variation < 1e-1) 171 | 172 | # Assert correct Gradients 173 | 174 | # Lindblad is about as accurate as the finite differences 175 | self.assertTrue(gradient_accuracy[0][0] < 1e-7) 176 | 177 | # Monte Carlo has stability issues to converge to 0: 178 | self.assertTrue(gradient_accuracy[1][0] < 1e-2) 179 | 180 | # Filter functions very small: 181 | self.assertTrue(gradient_accuracy[2][0] < 1e-10) 182 | 183 | -------------------------------------------------------------------------------- /qopt_tests/test_plan.md: -------------------------------------------------------------------------------- 1 | # qopt Tests 2 | 3 | The purpose of this file is to contain a structure for the test coverage of 4 | qopt classes. 5 | 6 | ## Unittests 7 | 8 | - Matrix operations 9 | - Propagator Calculation 10 | - Transfer Functions 11 | - Fidelity Functions 12 | - Noise Distribution Sampling 13 | 14 | ## Integration Tests 15 | 16 | ### Tests with Analytical Results 17 | 18 | - T2-star pure dephasing with quasi static noise 19 | - T1 depolarization decay with white noise 20 | - T2 non-Markovian literature analytic calculation 21 | - T2 Markovian noise 22 | 23 | #### Refactored: 24 | 25 | - T1 comparison of MC, Lindblad and FF 26 | 27 | ### Tests with Numerical Results 28 | 29 | - Test gradients vs. finite differences 30 | 31 | ### Consistency 32 | 33 | - FF vs. Lindblad vs. MC for white noise 34 | - FF vs MC for colored noise 35 | - Use multiple optimization algorithms of simple problem to ensure convergence 36 | 37 | ### Testing per Module 38 | 39 | To ensure coverage of all critical features, we also go through each module 40 | and list its coverage in a test. 41 | 42 | #### Amplitude Functions 43 | Correct gradient calculation tested by finite differences. 44 | 45 | #### Cost Functions 46 | Correct gradient calculation tested by finite differences. 47 | 48 | 49 | #### Matrix 50 | Unittests for the calculation of Matrix exponential. And other methods. 51 | 52 | #### Noise 53 | Fast noise tested with periodogram. Slow noise with analytic pure dephasing. 54 | 55 | #### Optimize 56 | Use multiple optimization algorithms of simple problem to ensure convergence. 57 | 58 | #### Simulator 59 | Covered in all integration tests using also optimizers. 60 | 61 | #### Solver Algorithms 62 | Unittests for the correct calculation of Propagators. Gradient calculation 63 | checked for all classes with finite differences. Used in almost every 64 | integration test. 65 | 66 | #### Transfer Functions 67 | Correct gradient calculation tested by finite differences. 68 | 69 | 70 | ## Exempted from Testing 71 | 72 | The less critical modules are not explicitly considered in the testing. 73 | 74 | - data_container 75 | - energy_spectrum 76 | - optimization_data 77 | - parallel 78 | - performance_statistics 79 | - plotting 80 | - util -------------------------------------------------------------------------------- /qopt_tests/testutil.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for the unit tests. 3 | """ 4 | 5 | import numpy as np 6 | from typing import Callable 7 | 8 | 9 | def calculate_jacobian( 10 | func: Callable[[np.ndarray], np.ndarray], 11 | x0: np.ndarray, 12 | delta_x: float): 13 | x_shape = x0.shape 14 | num_par = x0.size 15 | y0 = func(x0) 16 | num_func = y0.size 17 | 18 | y = np.empty((num_par, num_func, 3), np.float64) 19 | 20 | for i in range(num_par): 21 | dif_x = np.zeros_like(x0, dtype=np.float64).flatten() 22 | dif_x[i] = delta_x 23 | dif_x = dif_x.reshape(x_shape) 24 | y[i, :, 1] = y0 25 | y[i, :, 0] = func(x0 - dif_x) 26 | y[i, :, 2] = func(x0 + dif_x) 27 | 28 | jacobian = np.gradient(y, delta_x, axis=2) 29 | return jacobian[:, :, 1] 30 | -------------------------------------------------------------------------------- /qopt_tests/unittests/test_matrix.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test Dense operators: 3 | - multiplication 4 | - addition 5 | - matrix exponential 6 | """ 7 | from qopt import matrix 8 | 9 | import unittest 10 | import math 11 | import numpy as np 12 | 13 | 14 | SIGMA_X = np.asarray([[0, 1], [1, 0]]) 15 | 16 | 17 | class TestMatrix(unittest.TestCase): 18 | def test_dense_control_matrix_arithmetic(self): 19 | mat = matrix.DenseOperator(SIGMA_X) 20 | 21 | # test scalar multiplication: 22 | two_times_mat = 2 * mat 23 | two_times_mat2 = mat * 2 24 | mat *= 2 25 | 26 | np.testing.assert_array_almost_equal(two_times_mat.data, 27 | two_times_mat2.data) 28 | np.testing.assert_array_almost_equal(two_times_mat2.data, mat.data) 29 | 30 | # test addition 31 | 32 | mat = matrix.DenseOperator(SIGMA_X) 33 | two_times_mat3 = mat + np.asarray(SIGMA_X.data) 34 | two_times_mat5 = mat + mat 35 | mat += mat 36 | 37 | np.testing.assert_array_almost_equal(two_times_mat3.data, 38 | two_times_mat.data) 39 | np.testing.assert_array_almost_equal(two_times_mat5.data, 40 | two_times_mat.data) 41 | np.testing.assert_array_almost_equal(mat.data, two_times_mat.data) 42 | 43 | # test subtraction 44 | 45 | mat = matrix.DenseOperator(SIGMA_X) 46 | should_be_zeros = mat - np.asarray(SIGMA_X.data) 47 | two_times_mat -= mat 48 | 49 | np.testing.assert_array_almost_equal(two_times_mat.data, mat.data) 50 | np.testing.assert_array_almost_equal(should_be_zeros.data, 51 | np.zeros((2, 2))) 52 | 53 | def test_dense_control_matrix_functions(self): 54 | 55 | tau = .5j * math.pi 56 | sigma_x = matrix.DenseOperator(SIGMA_X) 57 | exponential = sigma_x.exp(tau) 58 | 59 | np.testing.assert_array_almost_equal( 60 | exponential.data, sigma_x.data * 1j) 61 | exponential = sigma_x.exp(2 * tau) 62 | np.testing.assert_array_almost_equal(exponential.data, -1 * np.eye(2)) 63 | 64 | prop_spectral, prop_grad_spectral = sigma_x.dexp( 65 | direction=sigma_x, tau=tau, compute_expm=True, 66 | method='spectral') 67 | prop_frechet, prop_grad_frechet = sigma_x.dexp( 68 | direction=sigma_x, tau=tau, compute_expm=True, 69 | method='Frechet') 70 | """ 71 | prop_approx, prop_grad_approx = sigma_x.dexp( 72 | direction=sigma_x, tau=tau, compute_expm=True, 73 | method='approx') 74 | """ 75 | prop_first_order, prop_grad_first_order = sigma_x.dexp( 76 | direction=sigma_x, tau=tau, compute_expm=True, 77 | method='first_order') 78 | prop_second_order, prop_grad_second_order = sigma_x.dexp( 79 | direction=sigma_x, tau=tau, compute_expm=True, 80 | method='second_order') 81 | prop_third_order, prop_grad_third_order = sigma_x.dexp( 82 | direction=sigma_x, tau=tau, compute_expm=True, 83 | method='third_order') 84 | np.testing.assert_array_almost_equal(prop_spectral.data, 85 | prop_frechet.data) 86 | """ 87 | np.testing.assert_array_almost_equal(prop_spectral.data, 88 | prop_approx.data) 89 | """ 90 | np.testing.assert_array_almost_equal(prop_spectral.data, 91 | prop_first_order.data) 92 | np.testing.assert_array_almost_equal(prop_spectral.data, 93 | prop_second_order.data) 94 | np.testing.assert_array_almost_equal(prop_spectral.data, 95 | prop_third_order.data) 96 | np.testing.assert_array_almost_equal(prop_grad_spectral.data, 97 | prop_grad_frechet.data) 98 | """ 99 | np.testing.assert_array_almost_equal(prop_grad_spectral.data, 100 | prop_grad_approx.data) 101 | """ 102 | 103 | # test kronecker product: 104 | np.random.seed(0) 105 | a = matrix.DenseOperator(np.random.rand(5, 5)) 106 | b = matrix.DenseOperator(np.random.rand(5, 5)) 107 | c = a.kron(b) 108 | c_2 = a.kron(b.data) 109 | c_np = np.kron(a.data, b.data) 110 | 111 | np.testing.assert_array_almost_equal(c.data, c_2.data) 112 | np.testing.assert_array_almost_equal(c.data, c_np) 113 | 114 | norm = sigma_x.norm() 115 | self.assertAlmostEqual(norm, np.sqrt(2)) 116 | 117 | def test_dense_partial_trace(self): 118 | a = .5 * matrix.DenseOperator.pauli_0().kron( 119 | matrix.DenseOperator.pauli_y()) 120 | trace_1 = a.ptrace([2, 2], [0]) 121 | trace_2 = a.ptrace([2, 2], [1]) 122 | np.testing.assert_array_almost_equal( 123 | trace_1.data, matrix.DenseOperator.pauli_y().data) 124 | np.testing.assert_array_almost_equal( 125 | trace_2.data, np.zeros([2, 2])) 126 | 127 | def test_vectorization(self): 128 | # test vectorization 129 | a = matrix.DenseOperator(np.arange(4).reshape(2, 2)) 130 | rho = matrix.ket_vectorize_density_matrix(a) 131 | reference_rho = np.asarray([[0], [2], [1], [3]]) 132 | np.testing.assert_array_almost_equal(rho.data, reference_rho) 133 | 134 | # test inverse operation 135 | a_large = matrix.DenseOperator(np.arange(25).reshape(5, 5)) 136 | np.testing.assert_array_almost_equal( 137 | a_large.data, 138 | (matrix.convert_ket_vectorized_density_matrix_to_square( 139 | matrix.ket_vectorize_density_matrix(a_large) 140 | )).data 141 | ) 142 | -------------------------------------------------------------------------------- /qopt_tests/unittests/test_noise.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test fast noise generators vs their periodogram. 3 | 4 | Test quasi static samples for standard deviation and mean. 5 | """ 6 | import unittest 7 | import numpy as np 8 | from scipy import special, integrate 9 | 10 | 11 | from qopt.noise import NTGColoredNoise, sample_1dim_gaussian_distribution, \ 12 | NTGQuasiStatic, inverse_cumulative_gaussian_distribution_function 13 | 14 | random_scaling_factor = 50 15 | 16 | 17 | def white_noise_spectrum(frequencies): 18 | return random_scaling_factor * np.ones_like(frequencies) 19 | 20 | 21 | def pink_noise_spectrum(frequencies): 22 | return random_scaling_factor / frequencies 23 | 24 | 25 | def gaussian(x, std, mean): 26 | return 1 / (std * np.sqrt(2 * np.pi)) * np.exp( 27 | -.5 * (x - mean) ** 2 / std ** 2) 28 | 29 | 30 | class TestNoiseSampling(unittest.TestCase): 31 | n_average = 10000 32 | 33 | def test_white_noise_sampling(self): 34 | 35 | ntg = NTGColoredNoise( 36 | noise_spectral_density=white_noise_spectrum, 37 | n_samples_per_trace=2000, n_traces=2, n_noise_operators=1, 38 | always_redraw_samples=True, dt=1e-4 39 | ) 40 | 41 | deviation_norm = ntg.plot_periodogram( 42 | n_average=self.n_average, scaling='density', draw_plot=False) 43 | self.assertLess(deviation_norm / self.n_average, 1e-2) 44 | 45 | np.random.randn() 46 | 47 | def test_pink_noise_sampling(self): 48 | ntg = NTGColoredNoise( 49 | noise_spectral_density=pink_noise_spectrum, 50 | n_samples_per_trace=2000, n_traces=1, n_noise_operators=1, 51 | always_redraw_samples=True, dt=1e-4 52 | ) 53 | 54 | deviation_norm = ntg.plot_periodogram( 55 | n_average=self.n_average, scaling='density', log_plot='loglog', 56 | draw_plot=False) 57 | self.assertLess(deviation_norm / self.n_average, 1e-3) 58 | 59 | def test_inverse_cumulative_gaussian_dist(self): 60 | def gaussian(x, std, mean): 61 | return 1 / (std * np.sqrt(2 * np.pi)) * np.exp( 62 | -.5 * (x - mean) ** 2 / std ** 2) 63 | 64 | for std in [1, 2.3, 5]: 65 | for mean in [-4, .03, 33]: 66 | for x_lim in [.2, .4, .66]: 67 | x_lim = x_lim * std + mean 68 | integral, error = integrate.quad( 69 | lambda x: gaussian(x, std=std, mean=mean), -np.inf, 70 | x_lim) 71 | inverse = inverse_cumulative_gaussian_distribution_function( 72 | integral, std=std, mean=mean) 73 | # print('std: ' + str(std)) 74 | # print('mean: ' + str(mean)) 75 | assert (x_lim - inverse) < 1e-5 * error 76 | 77 | def test_gaussian_sampling(self): 78 | 79 | for std in [1, 2.3, 5]: 80 | for mean in [-4, .03, 33]: 81 | for n_samples in [5, 10, 200]: 82 | samples = sample_1dim_gaussian_distribution( 83 | std=std, mean=mean, n_samples=n_samples) 84 | 85 | # apply the inverse cumulative gaussian dist 86 | for i in range(1, n_samples): 87 | integral, error = integrate.quad( 88 | lambda x: gaussian(x, std=std, mean=mean), 89 | samples[i-1], 90 | samples[i]) 91 | assert (integral - 1 / n_samples) < 1e-6 * std 92 | 93 | def test_std_rescaling_quasi_static(self): 94 | for std in [1, 2.3, 5]: 95 | for n_samples in [5, 10, 200]: 96 | ntg = NTGQuasiStatic(standard_deviation=[std, ], 97 | n_samples_per_trace=10, 98 | n_traces=n_samples, 99 | correct_std_for_discrete_sampling=True) 100 | samples = ntg.noise_samples 101 | assert np.std(samples) - std < 1e-10 102 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | matplotlib 4 | filter_functions 5 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='qopt', 5 | version='1.3.5', 6 | packages=['qopt'], 7 | url='https://github.com/qutech/qopt', 8 | license='GLP3', 9 | author='Julian Teske', 10 | author_email='j.teske@fz-juelich.de', 11 | description='Qubit Simulation and Optimal Control for Quantum Systems', 12 | package_dir={'qopt': 'qopt'}, 13 | install_requires=['numpy', 'scipy', 'matplotlib', 14 | 'filter_functions>=1.1.2'], 15 | extras_require={ 16 | 'doc': ['ipython', 'ipykernel', 'nbsphinx', 'numpydoc', 'sphinx', 17 | 'jupyter_client', 'sphinx_rtd_theme'], 18 | 'qopt_tests': ['pytest', 'pytest_cov'], 19 | }, 20 | test_suite='qopt_tests', 21 | classifiers=[ 22 | 'Programming Language :: Python :: 3', 23 | 'License :: OSI Approved :: GNU General Public License v3 or later ' 24 | '(GPLv3+)', 25 | 'Operating System :: OS Independent', 26 | 'Topic :: Scientific/Engineering :: Physics', 27 | ] 28 | ) 29 | -------------------------------------------------------------------------------- /temp/GaAsExchangeCoupledCerf.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file is meant to reproduce the results of the paper 'A High-Fidelity Gate 3 | Set for Exchange-Coupled Singlet-Triplet Qubits' by Cerfontaine et al. 4 | 5 | All energies are given in inverse nano seconds. They denote angular frequencies. 6 | All times in nano seconds. 7 | 8 | """ 9 | import numpy as np 10 | import qutip 11 | from qopt.matrix import DenseOperator 12 | 13 | INFIDELITIES = { 14 | 'leakage': 1.8e-5, 15 | 'entanglement_bound': 1e-10, 16 | 'fast_white': 1.6e-3, 17 | 'quasi_static_elec': 4.9e-4, 18 | 'quasi_static_magn': 1.7e-4 19 | } 20 | 21 | REQUIRED_ACCURACY = { 22 | 'leakage': .1, 23 | 'fast_white_mc': .15, 24 | 'fast_white_me': .31, 25 | 'quasi_static_elec': .1, 26 | 'quasi_static_magn': .1 27 | } 28 | 29 | SIGMA_X = DenseOperator(qutip.sigmax()) 30 | SIGMA_Y = DenseOperator(qutip.sigmay()) 31 | SIGMA_Z = DenseOperator(qutip.sigmaz()) 32 | SIGMA_0 = DenseOperator(np.eye(2)) 33 | 34 | # global constants (Table 1 in the paper) 35 | SIGMA_EPS = 8e-3 # mV 36 | SIGMA_b = .3 # mT 37 | EPSILON_0 = .272 # mV 38 | J_0 = 1 # ns^-1 39 | EPSILON_MIN = -5.4 * EPSILON_0 # mV 40 | EPSILON_MAX = 2.4 * EPSILON_0 # mV 41 | F_S = 1 # GS/s 42 | B_G = 500 # mT 43 | 44 | # changing dimensions: 1mT ~ 5.6 MHz ~ 5.6 / mu s ~ 0.0056 / ns 45 | # milli_tesla_to_inv_ns = 5.6e-3 this was master thesis 46 | milli_tesla_to_inv_ns = 6.2e-3 47 | B_G *= milli_tesla_to_inv_ns 48 | SIGMA_b *= milli_tesla_to_inv_ns 49 | SIGMA_b *= 2 * np.pi # to make it angular freq 50 | # more constants 51 | PADDING_END = 4 * 5 # due to oversampling 52 | d_subspace = 6 53 | 54 | AWG_RISE_TIME = .5 # ns 55 | OFFSET = 0 56 | DELTA_T = 1 57 | N_TIME_SLICES = 46 58 | 59 | B12 = 1 # in 1 / ns 60 | B23 = 7 61 | B34 = -1 62 | # This has been used by Tobias (understand why? Doesnt change anything.) 63 | B_G = 19.34702098 64 | 65 | OVERSAMPLING = 5 66 | N_QS_NOISE_TRACES = 20 67 | 68 | # S_0 = 4e-5 # mV^2 / ns unit cancels when divided by epsilon 0 69 | S_0 = 4e-5 70 | S_12 = S_0 71 | S_23 = S_0 72 | S_34 = S_0 73 | S_fast_noise = [S_12, S_23, S_34] 74 | 75 | CONSTANTS = { 76 | 'leakage_infid': 1.8e-5, 77 | 'systematic_infid': 1e-10, 78 | 'sigma_eps': SIGMA_EPS, 79 | 'eps0': EPSILON_0, 80 | 's0_white_noise': S_0, 81 | 'sigma_b': SIGMA_b 82 | } 83 | 84 | 85 | # Operators 86 | 87 | # subspace 88 | sub_space_ind = np.ix_([3, 5, 6, 9, 10, 12], [3, 5, 6, 9, 10, 12]) 89 | comp_sub_sub_space_ind = [1, 2, 3, 4] 90 | 91 | CNOT = np.zeros((d_subspace, d_subspace)) 92 | CNOT[0, 0] = 1 93 | CNOT[1, 1] = 1 94 | CNOT[2, 2] = 1 95 | CNOT[3, 4] = 1 96 | CNOT[4, 3] = 1 97 | CNOT[5, 5] = 1 98 | CNOT = DenseOperator(CNOT) 99 | 100 | CNOT_4 = CNOT.truncate_to_subspace(comp_sub_sub_space_ind) 101 | 102 | 103 | def exchange_interaction(amplitude): 104 | return np.exp(amplitude) 105 | 106 | 107 | def deriv_exchange_interaction(amplitude): 108 | return np.exp(amplitude) 109 | 110 | 111 | sig_1_z = SIGMA_Z.kron(SIGMA_0).kron(SIGMA_0).kron(SIGMA_0) 112 | sig_2_z = SIGMA_0.kron(SIGMA_Z).kron(SIGMA_0).kron(SIGMA_0) 113 | sig_3_z = SIGMA_0.kron(SIGMA_0).kron(SIGMA_Z).kron(SIGMA_0) 114 | sig_4_z = SIGMA_0.kron(SIGMA_0).kron(SIGMA_0).kron(SIGMA_Z) 115 | 116 | h_drift = B12 / 8 * ((-3 * sig_1_z) + sig_2_z + sig_3_z + sig_4_z) \ 117 | + B23 / 4 * ((-1 * sig_1_z) - sig_2_z + sig_3_z + sig_4_z) \ 118 | + B34 / 8 * ((-1 * sig_1_z) - sig_2_z - sig_3_z + (3 * sig_4_z)) 119 | 120 | # h_drift += B_G / 2 * (sig_1_z + sig_2_z + sig_3_z + sig_4_z) 121 | # this contribution is 0 on the subspace. 122 | 123 | h_drift = DenseOperator(h_drift.data[sub_space_ind]) 124 | 125 | h_ctrl = [DenseOperator(np.zeros((2 ** 4, 2 ** 4))) for _ in range(3)] 126 | 127 | for pauli in [SIGMA_X, SIGMA_Y, SIGMA_Z]: 128 | h_ctrl[0] += .25 * pauli.kron(SIGMA_0).kron(SIGMA_0).kron(SIGMA_0) \ 129 | * SIGMA_0.kron(pauli).kron(SIGMA_0).kron(SIGMA_0) 130 | 131 | h_ctrl[1] += .25 * SIGMA_0.kron(pauli).kron(SIGMA_0).kron(SIGMA_0) \ 132 | * SIGMA_0.kron(SIGMA_0).kron(pauli).kron(SIGMA_0) 133 | 134 | h_ctrl[2] += .25 * SIGMA_0.kron(SIGMA_0).kron(pauli).kron(SIGMA_0) \ 135 | * SIGMA_0.kron(SIGMA_0).kron(SIGMA_0).kron(pauli) 136 | 137 | for ctrl in range(3): 138 | h_ctrl[ctrl] = DenseOperator(h_ctrl[ctrl].data[sub_space_ind]) 139 | h_ctrl[ctrl] = h_ctrl[ctrl] - h_ctrl[ctrl].tr() / \ 140 | d_subspace * DenseOperator(np.eye(d_subspace)) 141 | 142 | initial_state = DenseOperator(np.eye(6)) 143 | initial_state_sup_op = DenseOperator(np.eye(36)) 144 | 145 | 146 | h_noise_electric = h_ctrl 147 | h_noise_magnetic = [ 148 | 1 / 8 * ((-3 * sig_1_z) + sig_2_z + sig_3_z + sig_4_z), 149 | 1 / 4 * ((-1 * sig_1_z) - sig_2_z + sig_3_z + sig_4_z), 150 | 1 / 8 * ((-1 * sig_1_z) - sig_2_z - sig_3_z + (3 * sig_4_z))] 151 | 152 | for i in range(len(h_noise_magnetic)): 153 | h_noise_magnetic[i] = type(h_noise_magnetic[i])( 154 | h_noise_magnetic[i].data[sub_space_ind]) 155 | 156 | lindbladians = [DenseOperator(np.zeros((2 ** 4, 2 ** 4))) for _ in range(3)] 157 | 158 | for pauli in [SIGMA_X, SIGMA_Y, SIGMA_Z]: 159 | lindbladians[0] += pauli.kron(SIGMA_0).kron(SIGMA_0).kron(SIGMA_0) \ 160 | * SIGMA_0.kron(pauli).kron(SIGMA_0).kron(SIGMA_0) 161 | 162 | lindbladians[1] += SIGMA_0.kron(pauli).kron(SIGMA_0).kron(SIGMA_0) \ 163 | * SIGMA_0.kron(SIGMA_0).kron(pauli).kron(SIGMA_0) 164 | 165 | lindbladians[2] += SIGMA_0.kron(SIGMA_0).kron(pauli).kron(SIGMA_0) \ 166 | * SIGMA_0.kron(SIGMA_0).kron(SIGMA_0).kron(pauli) 167 | 168 | diss_op = [[], [], []] 169 | 170 | for i in range(len(lindbladians)): 171 | lindbladians[i] = DenseOperator( 172 | lindbladians[i].data[sub_space_ind]) 173 | 174 | diss_op[i] = lindbladians[i].conj(do_copy=True).kron(lindbladians[i]) 175 | diss_op[i] -= .5 * lindbladians[i].identity_like().kron( 176 | lindbladians[i].dag(do_copy=True) * lindbladians[i]) 177 | diss_op[i] -= .5 * (lindbladians[i].conj(do_copy=True).dag(do_copy=True) 178 | * lindbladians[i].conj()).kron( 179 | lindbladians[i].identity_like()) 180 | 181 | normation_factor = 1 / ((2 * np.pi) ** 2) / (EPSILON_0 ** 2) 182 | 183 | 184 | def create_diss_super_op_fkt(noise=S_fast_noise[0]): 185 | def diss_super_op_fkt_(_, transferred_parameters: np.ndarray): 186 | derivatives = deriv_exchange_interaction(transferred_parameters) 187 | derivatives = derivatives ** 2 188 | diss_super_op = [] 189 | for t in range(transferred_parameters.shape[0]): 190 | # the factor of 2pi accounts for the fact that S is given per 191 | # frequency but we need angular frequencies. 192 | diss_super_op.append( 193 | noise * normation_factor * derivatives[ 194 | t, 0] * diss_op[0]) 195 | for k in range(1, len(diss_op)): 196 | diss_super_op[-1] += noise * normation_factor \ 197 | * derivatives[t, k] * diss_op[k] 198 | return diss_super_op 199 | return diss_super_op_fkt_ 200 | 201 | 202 | def diss_super_op_fkt(_, transferred_parameters: np.ndarray): 203 | derivatives = deriv_exchange_interaction(transferred_parameters) 204 | derivatives = derivatives ** 2 205 | diss_super_op = [] 206 | for t in range(transferred_parameters.shape[0]): 207 | # the factor of 2pi accounts for the fact that S is given per 208 | # frequency but we need angular frequencies. 209 | diss_super_op.append( 210 | S_fast_noise[0] * normation_factor * derivatives[ 211 | t, 0] * diss_op[0]) 212 | for k in range(1, len(diss_op)): 213 | diss_super_op[-1] += S_fast_noise[k] * normation_factor \ 214 | * derivatives[t, k] * diss_op[k] 215 | return diss_super_op 216 | 217 | 218 | def diss_super_op_deriv_fkt(_, transferred_parameters: np.ndarray): 219 | derivatives = deriv_exchange_interaction(transferred_parameters) 220 | derivatives = (np.abs(derivatives) ** 2) * 2 221 | diss_super_op_deriv = [] 222 | for t in range(transferred_parameters.shape[0]): 223 | diss_super_op_deriv.append( 224 | [S_fast_noise[0] * normation_factor * derivatives[ 225 | t, 0] * diss_op[ 226 | 0], ]) 227 | for k in range(1, len(diss_op)): 228 | diss_super_op_deriv[-1].append( 229 | S_fast_noise[k] * normation_factor * derivatives[t, k] * 230 | diss_op[k]) 231 | return diss_super_op_deriv 232 | 233 | 234 | OPERATORS = { 235 | 'CNOT_4': CNOT_4, 236 | 'h_ctrl': h_ctrl, 237 | 'h_drift': h_drift, 238 | 'h_noise_magnetic': h_noise_magnetic, 239 | 'initial_state': initial_state, 240 | 'initial_state_sup_op': initial_state_sup_op 241 | } 242 | -------------------------------------------------------------------------------- /temp/ff_testutil.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # qopt 3 | # Copyright (C) 2019 Quantum Technology Group, RWTH Aachen University 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with this program. If not, see . 17 | # 18 | # Contact email: tobias.hangleiter@rwth-aachen.de 19 | # ============================================================================= 20 | """ 21 | This module defines some testing utilities. 22 | """ 23 | 24 | import unittest 25 | import os 26 | from pathlib import Path 27 | 28 | import numpy as np 29 | import qutip as qt 30 | from numpy.random import randn 31 | from numpy.testing import assert_allclose, assert_array_equal 32 | from scipy.io import loadmat 33 | from scipy.linalg import expm 34 | 35 | from filter_functions import util 36 | 37 | 38 | class TestCase(unittest.TestCase): 39 | 40 | def assertCorrectDiagonalization(self, x, rtol=1e-7, atol=0, 41 | equal_nan=True, err_msg='', verbose=True): 42 | """ 43 | Assert eigenvalues and eigenvectors of PulseSequence fulfill 44 | characteristic equation. 45 | """ 46 | try: 47 | H = np.einsum('ijk,il->ljk', x.c_opers, x.c_coeffs) 48 | except AttributeError: 49 | raise ValueError('Can only work with PulseSequences') 50 | 51 | for i, H in enumerate(H): 52 | self.assertArrayAlmostEqual(x._HV[i].conj().T @ H @ x._HV[i], 53 | np.diag(x._HD[i]), err_msg=err_msg, 54 | atol=atol, rtol=rtol, verbose=verbose, 55 | equal_nan=equal_nan) 56 | 57 | def assertArrayEqual(self, x, y, err_msg='', verbose=True): 58 | """ 59 | Wraps numpy.testing.assert_array_equal 60 | """ 61 | assert_array_equal(x, y, err_msg, verbose) 62 | 63 | def assertArrayAlmostEqual(self, actual, desired, rtol=1e-7, atol=0, 64 | equal_nan=True, err_msg='', verbose=True): 65 | """ 66 | Wraps numpy.testing.assert_allclose 67 | """ 68 | # Catch trying to compare to Nones 69 | if actual is not None and desired is not None: 70 | assert_allclose(actual, desired, rtol, atol, equal_nan, err_msg, 71 | verbose) 72 | elif (actual is None and desired is not None or 73 | actual is not None and desired is None): 74 | raise AssertionError('One of {} or {} '.format(actual, desired) + 75 | 'is None but the other not!') 76 | else: 77 | assert_array_equal(actual, desired, err_msg, verbose) 78 | 79 | 80 | def generate_dd_hamiltonian(n, tau=10, tau_pi=1e-2, dd_type='cpmg', 81 | pulse_type='primitive'): 82 | """ 83 | Generate a Hamiltonian in the correct format as required by PulseSequence 84 | for a dynamical decoupling sequence of duration *tau* and order *n*. 85 | *pulse_type* toggles between a primitive NOT-pulse and a dynamically 86 | corrected gate. 87 | """ 88 | if dd_type == 'cpmg': 89 | delta = np.array([0] + [(l - 0.5)/n for l in range(1, n+1)]) 90 | elif dd_type == 'udd': 91 | delta = np.array( 92 | [0] + [np.sin(np.pi*l/(2*n + 2))**2 for l in range(1, n+1)] 93 | ) 94 | 95 | if pulse_type == 'primitive': 96 | tau_p = tau_pi 97 | s_p = np.pi/tau_pi*np.array([0, 1]) 98 | t_p = tau_pi*np.array([0, 1]) 99 | elif pulse_type == 'dcg': 100 | tau_p = 4*tau_pi 101 | s_p = np.pi/tau_pi*np.array([0, 1, 0.5, 1]) 102 | t_p = np.array([0, tau_pi, 2*tau_pi, tau_pi]).cumsum() 103 | 104 | s = np.array([]) 105 | t = np.array([0]) 106 | for i in range(n): 107 | s = np.append(s, s_p) 108 | t = np.append(t, t_p + (delta*tau)[i+1] - tau_p/2) 109 | t = np.append(t, tau) 110 | s = np.append(s, 0) 111 | 112 | H = [[qt.sigmax()/2, s]] 113 | return H, np.diff(t) 114 | 115 | 116 | def rand_herm(d: int, n: int = 1) -> np.ndarray: 117 | """n random Hermitian matrices of dimension d""" 118 | A = randn(n, d, d) + 1j*randn(n, d, d) 119 | return (A + A.conj().transpose([0, 2, 1])).squeeze() 120 | 121 | 122 | def rand_unit(d: int, n: int = 1) -> np.ndarray: 123 | """n random unitary matrices of dimension d""" 124 | H = rand_herm(d, n) 125 | if n == 1: 126 | return expm(1j*H) 127 | else: 128 | return np.array([expm(1j*h) for h in H]) 129 | 130 | 131 | # Set up Hamiltonian for CNOT gate 132 | data_path = r"C:\Users\Inst2C\Documents\python-filter_functions\examples\data" 133 | struct = loadmat(os.path.join(data_path, 'CNOT.mat')) 134 | eps = np.asarray(struct['eps'], order='C') 135 | dt = np.asarray(struct['t'].ravel(), order='C') 136 | B = np.asarray(struct['B'].ravel(), order='C') 137 | B_avg = struct['BAvg'].ravel() 138 | cnot_infid_fast = struct['infid_fast'].ravel() 139 | 140 | J = np.exp(eps) 141 | n_dt = len(dt) 142 | 143 | d = 16 144 | H = np.empty((6, d, d), dtype=float) 145 | Id, Px, Py, Pz = util.P_np 146 | # Exchange Hamiltonians 147 | H[0] = 1/4*sum(util.tensor(P, P, Id, Id) for P in (Px, Py, Pz)).real 148 | H[1] = 1/4*sum(util.tensor(Id, P, P, Id) for P in (Px, Py, Pz)).real 149 | H[2] = 1/4*sum(util.tensor(Id, Id, P, P) for P in (Px, Py, Pz)).real 150 | # Zeeman Hamiltonians 151 | H[3] = 1/8*(util.tensor(Pz, Id, Id, Id)*(-3) + 152 | util.tensor(Id, Pz, Id, Id) + 153 | util.tensor(Id, Id, Pz, Id) + 154 | util.tensor(Id, Id, Id, Pz)).real 155 | H[4] = 1/4*(util.tensor(Pz, Id, Id, Id)*(-1) + 156 | util.tensor(Id, Pz, Id, Id)*(-1) + 157 | util.tensor(Id, Id, Pz, Id) + 158 | util.tensor(Id, Id, Id, Pz)).real 159 | H[5] = 1/8*(util.tensor(Pz, Id, Id, Id)*(-1) + 160 | util.tensor(Id, Pz, Id, Id)*(-1) + 161 | util.tensor(Id, Id, Pz, Id)*(-1) + 162 | util.tensor(Id, Id, Id, Pz)*3).real 163 | # Mean Magnetic field 164 | H0 = B_avg/2*sum(util.tensor(*np.roll((Pz, Id, Id, Id), shift=i, axis=0)) 165 | for i in range(4)).real 166 | 167 | opers = [*list(H), H0] 168 | # Reduce to 6x6 subspace 169 | subspace = ([3, 5, 6, 9, 10, 12], [3, 5, 6, 9, 10, 12]) 170 | d_subspace = 6 171 | # H0 is zero on the subspace 172 | subspace_opers = [H[np.ix_(*subspace)] for H in H] 173 | 174 | # Subtract identity to make Hamiltonian traceless 175 | subspace_opers = [oper - np.trace(oper)/d_subspace*np.eye(d_subspace) 176 | for oper in subspace_opers] 177 | 178 | c_coeffs = [J[0], 179 | J[1], 180 | J[2], 181 | B[0]*np.ones(n_dt), 182 | B[1]*np.ones(n_dt), 183 | B[2]*np.ones(n_dt)] 184 | 185 | n_coeffs = [J[0], 186 | J[1], 187 | J[2], 188 | np.ones(n_dt), 189 | np.ones(n_dt), 190 | np.ones(n_dt)] 191 | 192 | # %% Noise spectrum from Dial et al (1/f^(0, 0.7)) 193 | 194 | eps0 = 2.7241e-4 195 | # S(f) = A f^{-\alpha} 196 | alpha = np.array([0, 0.7]) 197 | # At f = 1 MHz = 1e-3 GHz, S = S_0 = 4e-20 V^2/Hz = 4e-11 1/GHz 198 | # Correspondingly, S(\omega) = A \omega^{-\alpha} such that at 199 | # \omega = 2\pi 10^{-3} GHz, S = S_0 = 4e-11 1/GHz 200 | S0 = 4e-11/eps0**2 201 | A = S0*(2*np.pi*1e-3)**alpha 202 | # Get S(\omega) like so: 203 | # S = [A/omega**alpha for A, alpha in zip(A, alpha)] 204 | -------------------------------------------------------------------------------- /temp/reproduce_previous_infids.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import unittest 3 | import os 4 | 5 | from scipy.io import loadmat 6 | import filter_functions as ff 7 | import temp.ff_testutil as ff_testutil 8 | 9 | from qopt.transfer_function import OversamplingMTF 10 | from qopt.amplitude_functions import UnaryAnalyticAmpFunc 11 | from qopt.solver_algorithms import SchroedingerSolver, \ 12 | LindbladSolver, SchroedingerSMCControlNoise 13 | from qopt.noise import NTGColoredNoise 14 | from qopt.cost_functions import OperationInfidelity, \ 15 | OperationNoiseInfidelity, OperatorFilterFunctionInfidelity 16 | 17 | from temp.GaAsExchangeCoupledCerf import \ 18 | CONSTANTS, OPERATORS, INFIDELITIES, REQUIRED_ACCURACY, \ 19 | create_diss_super_op_fkt 20 | import temp.GaAsExchangeCoupledCerf as GaAs 21 | 22 | 23 | # subspace 24 | sub_space_ind = np.ix_([3, 5, 6, 9, 10, 12], [3, 5, 6, 9, 10, 12]) 25 | comp_sub_sub_space_ind = [1, 2, 3, 4] 26 | 27 | # Data loaded by Tobias 28 | data_path = r"C:\Users\Inst2C\Documents\python-filter_functions\examples\data" 29 | struct = loadmat(os.path.join(data_path, 'CNOT.mat')) 30 | eps = np.asarray(struct['eps'], order='C') 31 | dt = np.asarray(struct['t'].ravel(), order='C') 32 | cnot_infid_fast = struct['infid_fast'].ravel() 33 | B = np.asarray(struct['B'].ravel(), order='C') 34 | B_avg = struct['BAvg'].ravel() 35 | 36 | # Frequently used auxiliary classes 37 | id_tf = OversamplingMTF(num_ctrls=3) 38 | id_tf.set_times(dt) 39 | 40 | exp_amp_func = UnaryAnalyticAmpFunc( 41 | value_function=np.exp, derivative_function=np.exp) 42 | 43 | 44 | class CompareToPreviousResults(unittest.TestCase): 45 | """ 46 | Calculate infidelities in multiple ways and compare with the results of 47 | Dr. Cerfontaine. 48 | 49 | The entanglement fidelity is compared with the sum of the previous 50 | systematic deviation and the leakage, because in the previous optimization, 51 | the evolution was mapped to the closest unitary. 52 | 53 | """ 54 | def test_filter_functions(self): 55 | 56 | # Basis for qubit subspace 57 | qubit_subspace_basis = ff.Basis( 58 | [np.pad(b, 1, 'constant') for b in ff.Basis.pauli(2)], 59 | skip_check=True, 60 | btype='Pauli' 61 | ) 62 | 63 | c_opers = ff_testutil.subspace_opers 64 | n_opers = c_opers 65 | c_coeffs, n_coeffs = ff_testutil.c_coeffs, ff_testutil.n_coeffs 66 | dt = ff_testutil.dt 67 | infid_MC = ff_testutil.cnot_infid_fast 68 | A = ff_testutil.A 69 | 70 | identifiers = ['eps_12', 'eps_23', 'eps_34', 'b_12', 'b_23', 'b_34'] 71 | H_c = list(zip(c_opers[:3] + [c_opers[3] + 7 * c_opers[4] - c_opers[5]], 72 | c_coeffs[:3] + [c_coeffs[3]], identifiers[:4])) 73 | H_n = list(zip(n_opers[:3], n_coeffs[:3], identifiers[:3])) 74 | cnot = ff.PulseSequence(H_c, H_n, dt, basis=qubit_subspace_basis) 75 | 76 | T = dt.sum() 77 | omega = np.logspace(np.log10(1/T), 2, 125) 78 | S_t, omega_t = ff.util.symmetrize_spectrum(A[0]/omega**0.0, omega) 79 | infid, xi = ff.infidelity(cnot, S_t, omega_t, identifiers[:3], 80 | return_smallness=True) 81 | # infid scaled with d = 6, but we actually have d = 4 82 | infid *= 1.5 83 | self.assertLessEqual(np.abs(1 - (infid.sum()/infid_MC[0])), .4) 84 | self.assertLessEqual(infid.sum(), xi**2/4) 85 | 86 | time_slot_comp_closed = SchroedingerSolver( 87 | h_drift=[OPERATORS['h_drift']] * len(dt), 88 | h_ctrl=OPERATORS['h_ctrl'], 89 | initial_state=OPERATORS['initial_state'], 90 | tau=list(dt), 91 | calculate_propagator_derivatives=True, 92 | exponential_method='spectral', 93 | is_skew_hermitian=True, 94 | transfer_function=id_tf, 95 | amplitude_function=exp_amp_func, 96 | filter_function_h_n=H_n, 97 | filter_function_basis=qubit_subspace_basis 98 | ) 99 | time_slot_comp_closed.set_optimization_parameters(eps.T) 100 | 101 | ff_infid = OperatorFilterFunctionInfidelity( 102 | solver=time_slot_comp_closed, 103 | noise_power_spec_density=S_t, 104 | omega=omega_t 105 | ) 106 | print(ff_infid.grad()) 107 | np.testing.assert_array_almost_equal(infid, ff_infid.costs()*1.5) 108 | 109 | def test_fast_noise_master_equation(self): 110 | """ 111 | Changing the oversampling does not change the result. 112 | 113 | """ 114 | infids = [] 115 | for noise_damping in .1 * np.arange(10): 116 | diss_super_op_fkt = create_diss_super_op_fkt(4e-5 * noise_damping) 117 | diss_super_op_deriv_fkt = GaAs.diss_super_op_deriv_fkt 118 | 119 | time_slot_lindblad = LindbladSolver( 120 | h_drift=[OPERATORS['h_drift']] * len(dt), 121 | h_ctrl=OPERATORS['h_ctrl'], 122 | initial_state=OPERATORS['initial_state_sup_op'], tau=list(dt), 123 | is_skew_hermitian=False, super_operator_function=diss_super_op_fkt, 124 | super_operator_derivative_function=diss_super_op_deriv_fkt, 125 | exponential_method='Frechet', 126 | transfer_function=id_tf, 127 | amplitude_function=exp_amp_func) 128 | time_slot_lindblad.set_optimization_parameters(eps.T) 129 | sub_space_ind_super_op = [el1 + el2 * 6 130 | for el2 in comp_sub_sub_space_ind 131 | for el1 in comp_sub_sub_space_ind] 132 | 133 | control_me_infid = OperationInfidelity( 134 | solver=time_slot_lindblad, target=OPERATORS['CNOT_4'], 135 | fidelity_measure='entanglement', super_operator_formalism=True, 136 | index=['white_noise_lindblad'], 137 | computational_states=sub_space_ind_super_op 138 | ) 139 | infids.append(control_me_infid.costs()) 140 | self.assertLess( 141 | np.abs(1 - 4 / 5 * control_me_infid.costs() / INFIDELITIES[ 142 | 'fast_white']), REQUIRED_ACCURACY['fast_white_me']) 143 | -------------------------------------------------------------------------------- /temp/unrefactored_integration_tests/fidelity_computing_integration_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the derivative of the state fidelity. 3 | """ 4 | import numpy as np 5 | import unittest 6 | 7 | from qopt.matrix import DenseOperator 8 | from qopt.solver_algorithms import SchroedingerSolver 9 | from qopt.cost_functions import StateInfidelity 10 | from qopt.simulator import Simulator 11 | 12 | sigma_x = DenseOperator(np.asarray([[0, 1], [1, 0]])) 13 | sigma_y = DenseOperator(np.asarray([[0, -1j], [1j, 0]])) 14 | sigma_z = DenseOperator(np.asarray([[1, 0], [0, -1]])) 15 | 16 | n_time_steps = 5 17 | delta_t = .5 * np.pi 18 | 19 | up = DenseOperator(np.asarray([[1], [0]])) 20 | down = DenseOperator(np.asarray([[0], [1]])) 21 | 22 | schroedinger_solver = SchroedingerSolver( 23 | h_drift=[0 * sigma_x] * n_time_steps, 24 | h_ctrl=[sigma_x, sigma_y], 25 | tau=delta_t * np.ones(n_time_steps), 26 | initial_state=up 27 | ) 28 | 29 | 30 | class TestFidelitySchroedingerEq(unittest.TestCase): 31 | 32 | def test_state_fid(self): 33 | cost_fkt = StateInfidelity( 34 | schroedinger_solver, 35 | target=down 36 | ) 37 | 38 | simulator = Simulator( 39 | solvers=[schroedinger_solver, ], 40 | cost_funcs=[cost_fkt, ] 41 | ) 42 | 43 | np.random.seed(0) 44 | random_pulse = np.random.randn(5, 2) 45 | 46 | diff_norm, diff_rel = simulator.compare_numeric_to_analytic_gradient( 47 | pulse=random_pulse, 48 | delta_eps=1e-6 49 | ) 50 | self.assertLess(diff_norm, 1e-5) 51 | self.assertLess(diff_rel, 1e-5) 52 | -------------------------------------------------------------------------------- /temp/unrefactored_integration_tests/pure_dephasing_fidelity.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the entanglement fidelity of quasi static noise vs the analytic result 3 | for pure dephasing. 4 | """ 5 | import numpy as np 6 | import unittest 7 | 8 | from qopt.noise import NTGQuasiStatic 9 | from qopt.matrix import DenseOperator 10 | from qopt.solver_algorithms import SchroedingerSMonteCarlo 11 | from qopt.cost_functions import OperationNoiseInfidelity 12 | 13 | 14 | class PureDephasing(unittest.TestCase): 15 | def test_quasi_static_noise_deterministic_sampling(self): 16 | """ The same problem has also been positively tested with two time 17 | steps. """ 18 | h_ctrl = [DenseOperator(np.diag([.5, -.5])), ] 19 | h_drift = [DenseOperator(np.zeros((2, 2)))] 20 | 21 | noise_levels = 1e-4 * np.arange(1, 101) 22 | actual_noise_levels = np.zeros((100,)) 23 | average_infids = np.zeros((100,)) 24 | 25 | for i, std_dev in enumerate(noise_levels): 26 | ntg = NTGQuasiStatic(standard_deviation=[std_dev, ], 27 | n_samples_per_trace=1, n_traces=200, 28 | sampling_mode='uncorrelated_deterministic') 29 | 30 | ctrl_amps = 2 * np.pi * np.ones((1, 1)) 31 | t_slot_comp = SchroedingerSMonteCarlo( 32 | h_drift=h_drift, 33 | h_ctrl=h_ctrl, 34 | initial_state=DenseOperator(np.eye(2)), 35 | tau=[1], 36 | h_noise=h_ctrl, 37 | noise_trace_generator=ntg 38 | ) 39 | t_slot_comp.set_optimization_parameters(ctrl_amps) 40 | 41 | quasi_static_infid = OperationNoiseInfidelity( 42 | solver=t_slot_comp, 43 | target=DenseOperator(np.eye(2)), 44 | neglect_systematic_errors=True, 45 | fidelity_measure='entanglement' 46 | ) 47 | average_infids[i] = quasi_static_infid.costs() * (2 / 3) 48 | actual_noise_levels[i] = np.std(ntg.noise_samples) 49 | 50 | self.assertLess( 51 | np.sum(np.abs((np.ones_like(average_infids) 52 | - average_infids / (noise_levels ** 2 / 6)))) / 100, 53 | 0.05) 54 | self.assertLess( 55 | np.sum(np.abs((np.ones_like(average_infids) 56 | - average_infids / ( 57 | actual_noise_levels ** 2 / 6)))) / 100, 58 | 1e-5) 59 | 60 | def test_quasi_static_noise_monte_carlo(self): 61 | np.random.seed(0) 62 | h_ctrl = [DenseOperator(np.diag([.5, -.5])), ] 63 | h_drift = [DenseOperator(np.zeros((2, 2)))] 64 | 65 | n_noise_values = 20 66 | noise_levels = 1e-4 * np.arange(1, n_noise_values + 1) 67 | actual_noise_levels = np.zeros((n_noise_values,)) 68 | average_infids = np.zeros((n_noise_values,)) 69 | 70 | for i, std_dev in enumerate(noise_levels): 71 | ntg = NTGQuasiStatic(standard_deviation=[std_dev, ], 72 | n_samples_per_trace=1, n_traces=2000, 73 | sampling_mode='monte_carlo') 74 | 75 | ctrl_amps = 2 * np.pi * np.ones((1, 1)) * 0 76 | t_slot_comp = SchroedingerSMonteCarlo( 77 | h_drift=h_drift, 78 | h_ctrl=h_ctrl, 79 | initial_state=DenseOperator(np.eye(2)), 80 | tau=[1], 81 | h_noise=h_ctrl, 82 | noise_trace_generator=ntg 83 | ) 84 | t_slot_comp.set_optimization_parameters(ctrl_amps) 85 | 86 | quasi_static_infid = OperationNoiseInfidelity( 87 | solver=t_slot_comp, 88 | target=DenseOperator(np.eye(2)), 89 | neglect_systematic_errors=False, 90 | fidelity_measure='entanglement' 91 | ) 92 | average_infids[i] = quasi_static_infid.costs() * (2 / 3) 93 | actual_noise_levels[i] = np.std(ntg.noise_samples) 94 | 95 | self.assertLess( 96 | np.sum(np.abs((np.ones_like(average_infids) 97 | - average_infids / (noise_levels ** 2 / 6)))) / 100, 98 | 0.05) 99 | self.assertLess( 100 | np.sum(np.abs((np.ones_like(average_infids) 101 | - average_infids / ( 102 | actual_noise_levels ** 2 / 6)))) / 100, 103 | 0.05) 104 | 105 | 106 | """ 107 | import matplotlib.pyplot as plt 108 | plt.figure() 109 | plt.plot(noise_levels, average_infids) 110 | plt.plot(noise_levels, noise_levels ** 2 / 6) 111 | plt.plot(noise_levels, actual_noise_levels ** 2 / 6) 112 | plt.legend(['simulation', 'analytic_calculation', 'analytic_actual_noise']) 113 | p = np.polyfit(noise_levels, average_infids, deg=2) 114 | """ 115 | -------------------------------------------------------------------------------- /temp/unrefactored_integration_tests/rabi_driving_lab_frame.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plots a t2-star decay and verifies its shape. 3 | """ 4 | 5 | from qopt.matrix import DenseOperator 6 | from qopt.noise import NTGQuasiStatic 7 | from qopt.solver_algorithms import SchroedingerSMonteCarlo, LindbladSolver, \ 8 | SchroedingerSolver 9 | from qopt.amplitude_functions import CustomAmpFunc 10 | from qopt.transfer_function import OversamplingMTF 11 | 12 | import qopt.examples.rabi_driving.setup as rabi 13 | import numpy as np 14 | import scipy.optimize 15 | 16 | from unittest import TestCase 17 | 18 | 19 | class Lab_frame_rabi_driving(TestCase): 20 | def quasi_static_noise(self): 21 | 22 | sigma_z = DenseOperator(np.asarray([[1, 0], [0, -1]])) 23 | sigma_x = DenseOperator(np.asarray([[0, 1], [1, 0]])) 24 | h_drift = DenseOperator(np.zeros((2, 2))) 25 | 26 | # reference_frequency = 20e9 * 2 * np.pi 27 | reference_frequency = 100e6 * 2 * np.pi 28 | driving_frequency = 1e6 * 2 * np.pi 29 | 30 | # 100 per reference period 31 | # int(reference_frequency / driving_frequency) to make one driving period 32 | # 20 driving periods 33 | n_time_steps = int(35 * reference_frequency / driving_frequency * 20) 34 | n_noise_traces = 100 # int(10 * 60 * 1e6 / 35) 35 | evolution_time = 35e-6 36 | 37 | delta_t = evolution_time / n_time_steps 38 | 39 | down = np.asarray([[0], [1]]) 40 | up = np.asarray([[1], [0]]) 41 | x_half = rabi.x_half.data 42 | 43 | projector_left = up.T 44 | projector_right = up 45 | 46 | def up_amplitude(unitary): 47 | probability = projector_left @ unitary.data @ projector_right 48 | return np.abs(probability) ** 2 49 | 50 | ctrl_amps = delta_t * np.arange(1, 1 + n_time_steps) 51 | ctrl_amps = driving_frequency * np.sin(reference_frequency * ctrl_amps) 52 | 53 | 54 | def rabi_driving(transferred_parameters, **_): 55 | ctrl_amps = delta_t * np.arange(1, 1 + n_time_steps) 56 | ctrl_amps = 2 * np.sin(reference_frequency * ctrl_amps) 57 | # times 2 because the rabi frequency is .5 * Amplitude 58 | ctrl_amps = np.einsum("tc, t->tc", transferred_parameters, ctrl_amps) 59 | return ctrl_amps 60 | 61 | 62 | def rabi_driving_noise(noise_samples, **_): 63 | ctrl_amps = delta_t * np.arange(1, 1 + n_time_steps) 64 | ctrl_amps = 2 * np.sin(reference_frequency * ctrl_amps) 65 | ctrl_amps = np.einsum("sno, t->tno", noise_samples, ctrl_amps) 66 | return ctrl_amps 67 | 68 | 69 | rabi_driving_amp_func = CustomAmpFunc(value_function=rabi_driving, 70 | derivative_function=None) 71 | id_transfer_func = OversamplingMTF(oversampling=n_time_steps) 72 | id_transfer_func.set_times(np.asarray([evolution_time])) 73 | 74 | 75 | ts_comp_unperturbed = SchroedingerSolver( 76 | h_drift=[reference_frequency * .5 * sigma_z, ] * n_time_steps, 77 | h_ctrl=[.5 * sigma_x, ], 78 | initial_state=DenseOperator(np.eye(2)), 79 | tau=[delta_t, ] * n_time_steps, 80 | exponential_method='Frechet', 81 | 82 | ) 83 | 84 | ts_comp_lindblad = LindbladSolver( 85 | h_drift=[reference_frequency * .5 * sigma_z, ] * n_time_steps, 86 | h_ctrl=[.5 * sigma_x, ], 87 | initial_state=DenseOperator(np.eye(2)), 88 | tau=[delta_t, ] * n_time_steps, 89 | exponential_method='Frechet' 90 | ) 91 | 92 | ts_comp_unperturbed.set_optimization_parameters(np.expand_dims(ctrl_amps, 1)) 93 | 94 | 95 | """ 96 | # unperturbed: 97 | forward_propagators = ts_comp_unperturbed.forward_propagators 98 | 99 | propabilities = np.zeros((n_time_steps, )) 100 | for j in range(n_time_steps): 101 | propabilities[j] = up_amplitude(forward_propagators[j]) 102 | 103 | plt.figure() 104 | plt.plot(delta_t * np.arange(n_time_steps), propabilities) 105 | """ 106 | 107 | # Tom 108 | # todo: he seems to assume angular frequencies in his spectrum 109 | S_01 = 3e8 110 | S_02 = 3e4 111 | # S(f) = S_01 / f + S_02 / f^2 112 | 113 | f_min = 1 / 10 / 60 # 1 over 10 minutes 114 | f_max = 1 / 35e-6 115 | 116 | variance_f = S_01 * (np.log(f_max) - np.log(f_min)) \ 117 | - S_02 * (1 / f_max - 1 / f_min) 118 | sigma_f = np.sqrt(variance_f) 119 | 120 | 121 | """ 122 | # Yoneda 123 | S_0 = 3.2 * 1e6 * 4 * np.pi * np.pi 124 | 125 | f_min = 1e-2 126 | f_max = 1 / 35e-6 127 | 128 | variance_f = S_0 * (np.log(f_max) - np.log(f_min)) 129 | sigma_f = np.sqrt(variance_f) # 29 kHz 130 | """ 131 | expected_t2star = np.sqrt(2 / variance_f) 132 | 133 | ntg = NTGQuasiStatic( 134 | standard_deviation=[sigma_f, ], 135 | n_samples_per_trace=1, 136 | n_traces=n_noise_traces, 137 | always_redraw_samples=False, 138 | sampling_mode='monte_carlo' 139 | ) 140 | 141 | 142 | tslot_comp = SchroedingerSMonteCarlo( 143 | h_drift=[reference_frequency * .5 * sigma_z, ] * n_time_steps, 144 | h_ctrl=[.5 * sigma_x, ], 145 | h_noise=[.5 * sigma_x], 146 | initial_state=DenseOperator(np.eye(2)), 147 | tau=[delta_t, ] * n_time_steps, 148 | noise_trace_generator=ntg, 149 | exponential_method='Frechet', 150 | transfer_function=id_transfer_func, 151 | amplitude_function=rabi_driving_amp_func, 152 | noise_amplitude_function=rabi_driving_noise 153 | ) 154 | 155 | """ 156 | # for the rotating frame 157 | delta_rabi = 1.5 / 10 * 1e6 158 | tslot_comp.set_optimization_parameters( 159 | (2 * np.pi * delta_rabi) * np.ones((n_time_steps, 1))) 160 | """ 161 | tslot_comp.set_optimization_parameters(np.asarray([[driving_frequency]])) 162 | 163 | forward_propagators = tslot_comp.forward_propagators_noise 164 | 165 | propabilities = np.zeros((n_noise_traces, n_time_steps)) 166 | for i in range(n_noise_traces): 167 | for j in range(n_time_steps): 168 | propabilities[i, j] = up_amplitude(forward_propagators[i][j]) 169 | 170 | propabilities = np.mean(propabilities, axis=0) 171 | 172 | """ 173 | def t2star_decay(t, delta_f, t2_star): 174 | return .5 * np.exp(-(t / t2_star) ** 2) * np.cos( 175 | 2 * np.pi * delta_f * t) + .5 176 | """ 177 | 178 | 179 | def t2star_decay(t, sigma_driving): 180 | return .5 * np.exp(-.5 * (sigma_driving * t) ** 2) * np.cos(driving_frequency * t) + .5 181 | 182 | 183 | def t2star_decay_2(t, sigma_driving): 184 | return .5 * (1 + (sigma_driving ** 2 / driving_frequency * t)) ** -.25 * np.cos(driving_frequency * t) + .5 185 | 186 | 187 | def t2star_decay_3(t, sigma_driving, sigma_ref): 188 | up_prop = np.exp(-.5 * (sigma_driving * t) ** 2) 189 | up_prop *= (1 + (sigma_ref ** 2 / driving_frequency * t) ** 2) ** -.25 190 | up_prop *= .5 * np.cos(driving_frequency * t) 191 | up_prop += .5 192 | return up_prop 193 | 194 | 195 | def t2star_decay_4(t, sigma_driving, sigma_ref, periodicity): 196 | up_prop = np.exp(-.5 * (sigma_driving * t) ** 2) 197 | up_prop *= (1 + ((sigma_ref ** 2) / periodicity * t) ** 2) ** -.25 198 | up_prop *= .5 * np.cos(periodicity * t) 199 | up_prop += .5 200 | return up_prop 201 | 202 | 203 | def t2star_decay_5(t, sigma_driving, sigma_ref, periodicity, lin_decay): 204 | up_prop = np.exp(-.5 * (sigma_driving * t) ** 2) 205 | up_prop *= np.exp(-1 * lin_decay * t) 206 | up_prop *= (1 + ((sigma_ref ** 2) / periodicity * t) ** 2) ** -.25 207 | up_prop *= .5 * np.cos(periodicity * t) 208 | up_prop += .5 209 | return up_prop 210 | 211 | 212 | popt, pcov = scipy.optimize.curve_fit( 213 | t2star_decay_3, 214 | xdata=delta_t * np.arange(n_time_steps), 215 | ydata=propabilities, 216 | p0=np.asarray([sigma_f, sigma_f]) 217 | ) 218 | 219 | 220 | popt, pcov = scipy.optimize.curve_fit( 221 | t2star_decay_5, 222 | xdata=delta_t * np.arange(n_time_steps), 223 | ydata=propabilities, 224 | p0=np.asarray([sigma_f, sigma_f, driving_frequency, sigma_f]) 225 | ) 226 | 227 | self.assertLess(np.linalg.norm( 228 | propabilities - t2star_decay(delta_t * np.arange(n_time_steps), 229 | sigma_driving=sigma_f)) / len(propabilities), 1e-3) 230 | 231 | """ 232 | plt.figure() 233 | plt.plot(delta_t * np.arange(n_time_steps), propabilities) 234 | 235 | plt.plot( 236 | delta_t * np.arange(n_time_steps), 237 | t2star_decay_3(delta_t * np.arange(n_time_steps), 238 | sigma_driving=popt[0], 239 | sigma_ref=popt[1]) 240 | ) 241 | 242 | plt.plot( 243 | delta_t * np.arange(n_time_steps), 244 | t2star_decay(delta_t * np.arange(n_time_steps), 245 | sigma_driving=sigma_f) 246 | ) 247 | 248 | 249 | plt.plot( 250 | delta_t * np.arange(n_time_steps), 251 | t2star_decay_3(delta_t * np.arange(n_time_steps), 252 | sigma_driving=sigma_f, 253 | sigma_ref=sigma_f) 254 | ) 255 | 256 | plt.plot( 257 | delta_t * np.arange(n_time_steps), 258 | t2star_decay_5(delta_t * np.arange(n_time_steps), 259 | sigma_driving=popt[0], 260 | sigma_ref=popt[1], 261 | periodicity=popt[2], 262 | lin_decay=popt[3]) 263 | ) 264 | """ 265 | -------------------------------------------------------------------------------- /temp/unrefactored_integration_tests/rabi_driving_rotating_frame.py: -------------------------------------------------------------------------------- 1 | """ 2 | Compares T2 decay vs analytic result. 3 | """ 4 | from qopt.matrix import DenseOperator 5 | from qopt.noise import NTGQuasiStatic 6 | from qopt.solver_algorithms import SchroedingerSMonteCarlo, LindbladSolver 7 | 8 | import qopt.examples.rabi_driving.setup as rabi 9 | import numpy as np 10 | import scipy.optimize 11 | 12 | from unittest import TestCase 13 | 14 | 15 | sigma_z = DenseOperator(np.asarray([[1, 0], [0, -1]])) 16 | h_drift = DenseOperator(np.zeros((2, 2))) 17 | 18 | n_time_steps = 120 19 | n_noise_traces = 200 # int(10 * 60 * 1e6 / 35) 20 | evolution_time = 35e-6 21 | 22 | delta_t = evolution_time / n_time_steps 23 | delta_rabi = 1.5 / 10 * 1e6 24 | 25 | # Tom 26 | # todo: he seems to assume angular frequencies in his spectrum 27 | S_01 = 3e8 28 | S_02 = 3e4 29 | # S(f) = S_01 / f + S_02 / f^2 30 | 31 | f_min = 1 / 10 / 60 # 1 over 10 minutes 32 | f_max = 1 / 35e-6 33 | 34 | variance_f = S_01 * (np.log(f_max) - np.log(f_min)) \ 35 | - S_02 * (1 / f_max - 1 / f_min) 36 | sigma_f = np.sqrt(variance_f) 37 | 38 | variance_lindbladt = S_01 / delta_rabi 39 | """ 40 | # Yoneda 41 | S_0 = 3.2 * 1e6 * 4 * np.pi * np.pi 42 | 43 | f_min = 1e-2 44 | f_max = 1 / 35e-6 45 | 46 | variance_f = S_0 * (np.log(f_max) - np.log(f_min)) 47 | sigma_f = np.sqrt(variance_f) # 29 kHz 48 | """ 49 | 50 | down = np.asarray([[0, 1]]) 51 | up = np.asarray([[1], [0]]) 52 | x_half = rabi.x_half.data 53 | 54 | projector_left = down @ x_half 55 | projector_right = x_half @ up 56 | 57 | 58 | class RabiDrivingRotatingFrame(TestCase): 59 | 60 | def test_quasi_static_noise(self): 61 | 62 | expected_t2star = np.sqrt(2 / variance_f) 63 | 64 | ntg = NTGQuasiStatic( 65 | standard_deviation=[sigma_f, ], 66 | n_samples_per_trace=n_time_steps, 67 | n_traces=n_noise_traces, 68 | always_redraw_samples=False, 69 | sampling_mode='uncorrelated_deterministic' 70 | ) 71 | 72 | tslot_comp = SchroedingerSMonteCarlo( 73 | h_drift=[h_drift, ] * n_time_steps, 74 | h_ctrl=[.5 * sigma_z, ], 75 | h_noise=[.5 * sigma_z], 76 | initial_state=DenseOperator(np.eye(2)), 77 | tau=[delta_t, ] * n_time_steps, 78 | noise_trace_generator=ntg, 79 | exponential_method='Frechet' 80 | ) 81 | 82 | def up_amplitude(unitary): 83 | probability = projector_left @ unitary.data @ projector_right 84 | return np.abs(probability) ** 2 85 | 86 | tslot_comp.set_optimization_parameters( 87 | (2 * np.pi * delta_rabi) * np.ones((n_time_steps, 1))) 88 | forward_propagators = tslot_comp.forward_propagators_noise 89 | 90 | propabilities = np.zeros((n_noise_traces, n_time_steps)) 91 | for i in range(n_noise_traces): 92 | for j in range(n_time_steps): 93 | propabilities[i, j] = up_amplitude(forward_propagators[i][j]) 94 | 95 | propabilities = np.mean(propabilities, axis=0) 96 | # plt.figure() 97 | # plt.plot(delta_t * np.arange(n_time_steps), propabilities, marker='.') 98 | 99 | def t2star_decay(t, delta_f, t2_star): 100 | return .5 * np.exp(-(t / t2_star) ** 2) * np.cos( 101 | 2 * np.pi * delta_f * t) + .5 102 | 103 | popt, pcov = scipy.optimize.curve_fit( 104 | t2star_decay, 105 | xdata=delta_t * np.arange(n_time_steps), 106 | ydata=propabilities, 107 | p0=np.asarray([delta_rabi, expected_t2star]) 108 | ) 109 | 110 | self.assertLess( 111 | np.linalg.norm( 112 | propabilities - t2star_decay( 113 | delta_t * np.arange(n_time_steps), 114 | popt[0], 115 | popt[1] 116 | ) 117 | ) / len(propabilities), 118 | 1e-3 119 | ) 120 | 121 | self.assertLess( 122 | np.abs((expected_t2star - popt[1]) / (expected_t2star + popt[1])), 123 | 1e-2 124 | ) 125 | 126 | """ 127 | plt.plot( 128 | delta_t * np.arange(n_time_steps), 129 | t2star_decay(delta_t * np.arange(n_time_steps), 130 | delta_f=popt[0], 131 | t2_star=popt[1]) 132 | ) 133 | """ 134 | 135 | def test_fast_noise(self): 136 | 137 | def prefactor_function(transferred_parameters): 138 | return variance_lindbladt * np.ones_like( 139 | transferred_parameters) 140 | 141 | expected_t2_lindbladt = 2 / variance_lindbladt 142 | 143 | lindbladt_operators = [ 144 | .5 * DenseOperator(np.asarray([[0, 1], [1, 0]])), ] 145 | 146 | tslot_comp_lindblad = LindbladSolver( 147 | h_drift=[h_drift, ] * n_time_steps, 148 | h_ctrl=[.5 * sigma_z, ], 149 | initial_state=DenseOperator(np.eye(4)), 150 | tau=[delta_t, ] * n_time_steps, 151 | exponential_method='Frechet', 152 | lindblad_operators=lindbladt_operators, 153 | prefactor_function=prefactor_function 154 | ) 155 | 156 | tslot_comp_lindblad.set_optimization_parameters( 157 | (2 * np.pi * delta_rabi) * np.ones((n_time_steps, 1))) 158 | 159 | forward_propagators_lindbladt = tslot_comp_lindblad.forward_propagators 160 | 161 | def vec_to_density_matrix(vec: np.ndarray): 162 | return vec @ vec.conj().transfer_matrix 163 | 164 | def linearize_matrix(matrix: np.ndarray): 165 | return matrix.T.flatten() 166 | 167 | def vector_to_matrix(vec: np.ndarray): 168 | return vec.reshape((2, 2)).transfer_matrix 169 | 170 | initial = linearize_matrix(vec_to_density_matrix(x_half @ up)) 171 | probabilities_lindbladt = np.zeros(len(forward_propagators_lindbladt)) 172 | probabilities_lindbladt_c = np.zeros(len(forward_propagators_lindbladt), 173 | dtype=complex) 174 | 175 | sigma_x = np.asarray([[0, 1], [1, 0]]) 176 | 177 | for i, prop in enumerate(forward_propagators_lindbladt): 178 | density = prop.data @ initial 179 | density = vector_to_matrix(density) 180 | probabilities_lindbladt[i] = np.trace(density @ sigma_x) 181 | probabilities_lindbladt_c[i] = np.trace(density @ sigma_x) 182 | 183 | def t2_time(t, t2): 184 | return -1 * np.sin(2 * np.pi * t * delta_rabi) \ 185 | * np.exp(-.5 * t / t2) 186 | 187 | popt, pcov = scipy.optimize.curve_fit( 188 | t2_time, 189 | xdata=delta_t * np.arange(n_time_steps + 1), 190 | ydata=probabilities_lindbladt, 191 | p0=np.asarray([expected_t2_lindbladt]) 192 | ) 193 | 194 | self.assertLess( 195 | np.linalg.norm(probabilities_lindbladt - t2_time( 196 | delta_t * np.arange(n_time_steps + 1), 197 | t2=popt[0])) / len(probabilities_lindbladt), 198 | 1e-6 199 | ) 200 | 201 | self.assertLess( 202 | np.abs( 203 | (expected_t2_lindbladt - popt[0]) 204 | / (expected_t2_lindbladt + popt[0])), 205 | 1e-6 206 | ) 207 | 208 | """ 209 | plt.figure() 210 | plt.plot(delta_t * np.arange(n_time_steps + 1), probabilities_lindbladt) 211 | plt.plot( 212 | delta_t * np.arange(n_time_steps + 1), 213 | t2_time(delta_t * np.arange(n_time_steps + 1), 214 | t2=popt[0]) 215 | ) 216 | 217 | plt.plot( 218 | delta_t * np.arange(n_time_steps + 1), 219 | t2_time(delta_t * np.arange(n_time_steps + 1), 220 | t2=2e-2 * expected_t2) 221 | ) 222 | """ 223 | -------------------------------------------------------------------------------- /temp/unrefactored_integration_tests/test_gradients.py: -------------------------------------------------------------------------------- 1 | """ 2 | This test uses the rabi driving as test case. So far it covers: 3 | 4 | - Gradient of the entanglement fidelity 5 | - Gradient of the entanglement fidelity in the presence of quasi static noise 6 | """ 7 | 8 | import qopt.examples.rabi_driving.setup as rabi 9 | from qopt.solver_algorithms import SchroedingerSMonteCarlo 10 | from qopt.matrix import DenseOperator 11 | from qopt.cost_functions import OperationNoiseInfidelity, \ 12 | OperationInfidelity 13 | from qopt.simulator import Simulator 14 | from qopt.noise import NTGQuasiStatic 15 | import numpy as np 16 | import unittest 17 | 18 | 19 | class RabiTestCase(unittest.TestCase): 20 | def test_relative_gradients_xy(self): 21 | amp_bound = rabi.rabi_frequency_max / rabi.lin_freq_rel 22 | np.random.seed(0) 23 | initial_pulse = amp_bound * ( 24 | 2 * np.random.rand(rabi.n_time_samples, 2) - 1) 25 | 26 | ntg_quasi_static = NTGQuasiStatic( 27 | standard_deviation=[rabi.sigma_rabi, ], 28 | n_samples_per_trace=rabi.n_time_samples * rabi.oversampling, 29 | n_traces=10, 30 | always_redraw_samples=False, 31 | sampling_mode='uncorrelated_deterministic') 32 | 33 | tslot = SchroedingerSMonteCarlo( 34 | h_drift=[0 * rabi.h_drift, ], 35 | h_ctrl=rabi.h_ctrl, 36 | h_noise=[rabi.h_drift, ], 37 | noise_trace_generator=ntg_quasi_static, 38 | initial_state=DenseOperator(np.eye(2)), 39 | tau=[rabi.time_step, ] * rabi.n_time_samples, 40 | is_skew_hermitian=True, 41 | exponential_method='Frechet', 42 | transfer_function=rabi.exponential_transfer_function, 43 | amplitude_function=rabi.lin_amp_func 44 | ) 45 | 46 | entanglement_infid = OperationInfidelity( 47 | solver=tslot, 48 | target=rabi.x_half, 49 | fidelity_measure='entanglement', 50 | label=['Entanglement Fidelity QS-Noise XY-Control'] 51 | ) 52 | 53 | tslot_noise = SchroedingerSMonteCarlo( 54 | h_drift=[0 * rabi.h_drift, ], 55 | h_ctrl=rabi.h_ctrl, 56 | h_noise=[rabi.h_drift, ], 57 | noise_trace_generator=ntg_quasi_static, 58 | initial_state=DenseOperator(np.eye(2)), 59 | tau=[rabi.time_step, ] * rabi.n_time_samples, 60 | is_skew_hermitian=True, 61 | exponential_method='Frechet', 62 | transfer_function=rabi.exponential_transfer_function, 63 | amplitude_function=rabi.lin_amp_func 64 | ) 65 | 66 | entanglement_infid_qs_noise_xy = OperationNoiseInfidelity( 67 | solver=tslot_noise, 68 | target=rabi.x_half, 69 | fidelity_measure='entanglement', 70 | label=['Entanglement Fidelity QS-Noise XY-Control'], 71 | neglect_systematic_errors=True 72 | ) 73 | 74 | dynamics = Simulator( 75 | solvers=[tslot, ], 76 | cost_funcs=[entanglement_infid, ] 77 | ) 78 | 79 | dynamics_noise = Simulator( 80 | solvers=[tslot_noise, ], 81 | cost_funcs=[entanglement_infid_qs_noise_xy] 82 | ) 83 | 84 | _, rel_grad_deviation_unperturbed = \ 85 | dynamics.compare_numeric_to_analytic_gradient(initial_pulse) 86 | self.assertLess(rel_grad_deviation_unperturbed, 1e-6) 87 | 88 | _, rel_grad_deviation_qs_noise = \ 89 | dynamics_noise.compare_numeric_to_analytic_gradient(initial_pulse) 90 | self.assertLess(rel_grad_deviation_qs_noise, 1e-4) 91 | # This gradient calculation is numerically unstable and anti correlates 92 | # with the number of traces. 93 | # 10 traces -> 20% 94 | # 100 traces -> 9% 95 | # if we use the scipy dexpm 96 | # 10 traces -> 5.4e-5 97 | 98 | def test_phase_control_gradient(self): 99 | amp_bound = rabi.rabi_frequency_max / rabi.lin_freq_rel 100 | phase_bound_upper = 50 / 180 * np.pi 101 | phase_bound_lower = -50 / 180 * np.pi 102 | 103 | def random_phase_control_pulse(n): 104 | amp = amp_bound * (2 * np.random.rand(n) - 1) 105 | phase = (phase_bound_upper - phase_bound_lower) \ 106 | * np.random.rand(n) \ 107 | - (phase_bound_upper - phase_bound_lower) / 2 108 | return np.concatenate( 109 | (np.expand_dims(amp, 1), np.expand_dims(phase, 1)), axis=1) 110 | 111 | dynamics_phase_control = Simulator( 112 | solvers=[rabi.solver_qs_noise_phase_control], 113 | cost_funcs=[rabi.entanglement_infid_phase_control] 114 | ) 115 | 116 | ntg_quasi_static = NTGQuasiStatic( 117 | standard_deviation=[rabi.sigma_rabi, ], 118 | n_samples_per_trace=rabi.n_time_samples * rabi.oversampling, 119 | n_traces=10, 120 | always_redraw_samples=False, 121 | sampling_mode='uncorrelated_deterministic') 122 | 123 | time_slot_comp_qs_noise_phase_control = SchroedingerSMonteCarlo( 124 | h_drift=[0 * rabi.h_drift, ], 125 | h_ctrl=rabi.h_ctrl, 126 | h_noise=[rabi.h_drift, ], 127 | noise_trace_generator=ntg_quasi_static, 128 | initial_state=DenseOperator(np.eye(2)), 129 | tau=[rabi.time_step, ] * rabi.n_time_samples, 130 | is_skew_hermitian=True, 131 | exponential_method='Frechet', 132 | transfer_function=rabi.identity_transfer_function, 133 | amplitude_function=rabi.phase_ctrl_amp_func 134 | ) 135 | 136 | entanglement_infid_qs_noise_phase_control = OperationNoiseInfidelity( 137 | solver=time_slot_comp_qs_noise_phase_control, 138 | target=rabi.x_half, 139 | fidelity_measure='entanglement', 140 | label=['Entanglement Fidelity QS-Noise Phase Control'], 141 | neglect_systematic_errors=True 142 | ) 143 | 144 | dynamics_phase_control_qs_noise = Simulator( 145 | solvers=[time_slot_comp_qs_noise_phase_control, ], 146 | cost_funcs=[entanglement_infid_qs_noise_phase_control, ] 147 | ) 148 | 149 | np.random.seed(0) 150 | inital_pulse = random_phase_control_pulse(rabi.n_time_samples) 151 | 152 | _, rel_grad_deviation_unperturbed = dynamics_phase_control.\ 153 | compare_numeric_to_analytic_gradient(inital_pulse) 154 | self.assertLess(rel_grad_deviation_unperturbed, 2e-6) 155 | 156 | _, rel_grad_deviation_qs_noise = dynamics_phase_control_qs_noise.\ 157 | compare_numeric_to_analytic_gradient(inital_pulse) 158 | self.assertLess(rel_grad_deviation_qs_noise, 5e-5) 159 | -------------------------------------------------------------------------------- /temp/unrefactored_integration_tests/test_optimizers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Integration test: optimizing xy-Rabi problem with various optimizers. 3 | """ 4 | from qopt import * 5 | from qopt.examples.rabi_driving import setup as rabi_setup 6 | import unittest 7 | import numpy as np 8 | 9 | 10 | class TestOptimizers(unittest.TestCase): 11 | def test_assure_convergence_rabi_optimization(self): 12 | simulator = Simulator( 13 | solvers=[rabi_setup.solver_qs_noise_xy, ], 14 | cost_funcs=[rabi_setup.entanglement_infid_xy, 15 | rabi_setup.entanglement_infid_qs_noise_xy] 16 | ) 17 | 18 | optimizer = ScalarMinimizingOptimizer( 19 | system_simulator=simulator, 20 | cost_func_weights=[1, 1e2], 21 | bounds=rabi_setup.bounds_xy 22 | ) 23 | 24 | optimizer_no_jac = ScalarMinimizingOptimizer( 25 | system_simulator=simulator, 26 | cost_func_weights=[1, 1e2], 27 | use_jacobian_function=False, 28 | bounds=rabi_setup.bounds_xy 29 | ) 30 | 31 | optimizer_least_squares = LeastSquaresOptimizer( 32 | system_simulator=simulator, 33 | cost_func_weights=[1, 1e2], 34 | bounds=rabi_setup.bounds_xy_least_sq 35 | ) 36 | 37 | init_pulse = rabi_setup.random_xy_init_pulse(seed=1) 38 | result = optimizer.run_optimization(init_pulse) 39 | 40 | data_container = DataContainer() 41 | data_container.append_optim_result(result) 42 | analyzer = Analyser(data_container) 43 | #analyzer.plot_costs() 44 | 45 | result_no_jac = optimizer_no_jac.run_optimization(init_pulse) 46 | data_container2 = DataContainer() 47 | data_container2.append_optim_result(result_no_jac) 48 | analyzer2 = Analyser(data_container2) 49 | #analyzer2.plot_costs() 50 | 51 | result_least_squres = optimizer_least_squares.run_optimization(init_pulse) 52 | data_container3 = DataContainer() 53 | data_container3.append_optim_result(result_least_squres) 54 | analyzer3 = Analyser(data_container3) 55 | #analyzer3.plot_costs() 56 | 57 | self.assertLess(np.sum(result.final_cost), 1e-4) 58 | self.assertLess(np.sum(result_no_jac.final_cost), 1e-4) 59 | self.assertLess(np.sum(result_least_squres.final_cost), 2e-4) 60 | --------------------------------------------------------------------------------