├── .gitignore ├── LICENSE ├── README.md ├── notebooks ├── README.md ├── bell_state_tomography.ipynb ├── device_rb.ipynb ├── device_rb_w_lf.ipynb ├── dynamic_circuits_rb.ipynb ├── extract_benchmarks.ipynb ├── layer_fidelity.ipynb ├── layer_fidelity_placement.ipynb ├── layer_fidelity_single_chain.ipynb ├── mcm_rb.ipynb └── system_char.ipynb ├── pyproject.toml ├── qiskit_device_benchmarking ├── VERSION.txt ├── __init__.py ├── bench_code │ ├── __init__.py │ ├── bell │ │ ├── __init__.py │ │ └── bell_experiment.py │ ├── dynamic_circuits_rb │ │ ├── Readme.md │ │ ├── __init__.py │ │ └── dc_rb_experiment.py │ ├── mcm_rb │ │ ├── Readme.md │ │ ├── __init__.py │ │ └── mcm_rb_experiment.py │ ├── mrb │ │ ├── Readme.md │ │ ├── __init__.py │ │ ├── mirror_qv.py │ │ ├── mirror_qv_analysis.py │ │ ├── mirror_rb_analysis.py │ │ └── mirror_rb_experiment.py │ └── prb │ │ ├── Readme.md │ │ ├── __init__.py │ │ ├── pur_rb.py │ │ └── purrb_analysis.py ├── clops │ ├── README.md │ ├── __init__.py │ └── clops_benchmark.py ├── mirror_test │ ├── README.md │ ├── __init__.py │ ├── get_optimal_path.py │ ├── mirror_circuits.py │ ├── mirror_pub.py │ └── mirror_test.py ├── utilities │ ├── __init__.py │ ├── clifford_utils.py │ ├── file_utils.py │ ├── gate_map.py │ ├── graph_utils.py │ ├── layer_fidelity_utils.py │ └── sampling_utils.py └── verification │ ├── Readme.md │ ├── __init__.py │ ├── bench_analyze.py │ ├── count_analyze.py │ ├── fast_bench.py │ ├── fast_count.py │ ├── fast_layer_fidelity.py │ └── gen_circuits.py ├── requirements-dev.txt ├── requirements.txt └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | .DS_Store 163 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Qiskit Device Benchmarking 2 | 3 | *Qiskit Device Benchmarking* is a respository for code to run various device level benchmarks through Qiskit. The repository endevours to accomplish several goals, including, but not limited to: 4 | - Code examples for users to replicate reported benchmarking metrics through the Qiskit backend. These will likely be notebooks to run code in the [Qiskit Experiments](https://github.com/Qiskit-Extensions/qiskit-experiments) repo, but some of the code may reside here. 5 | - More in-depth benchmarking code that was discussed in papers and has not been integrated into Qiskit Experiments. 6 | - Fast circuit validation tests. 7 | 8 | The repository is not intended to define a benchmark standard. This code base is not guaranteed to be stable and may have breaking changes. 9 | 10 | # Structure 11 | 12 | At the top level we have notebooks that gives users examples on how to run various benchmarks. 13 | - [Notebooks](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/notebooks): Jupyter notebooks for running benchmarks 14 | 15 | Under a top level folder `qiskit_device_benchmarking` we have repository code files that can be imported from python: 16 | - [Utilities](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/utilities): Benchmarking utility/helper code not found elsewhere in qiskit. If these prove useful they will be pushed into standard qiskit or qiskit-experiments. 17 | - [Benchmarking Code](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/bench_code): General folder for benchmarking code, which may include standalone code and extensions to qiskit-experiments for custom benchmarks. 18 | - [Verification](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/verification): Fast verification via mirror circuits using a command line program. 19 | 20 | # Paper Code 21 | 22 | For clarity here we provide links from various papers to the code in this repo. Not necessarily the exact code used in these manuscripts, but representative of what was run. 23 | 24 | - [Layer Fidelity](https://arxiv.org/abs/2311.05933): David C. McKay, Ian Hincks, Emily J. Pritchett, Malcolm Carroll, Luke C. G. Govia, Seth T. Merkel. Benchmarking Quantum Processor Performance at Scale (2023). [Code](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/notebooks/layer_fidelity.ipynb) 25 | - [Mirror QV](https://arxiv.org/abs/2303.02108): Mirko Amico, Helena Zhang, Petar Jurcevic, Lev S. Bishop, Paul Nation, Andrew Wack, David C. McKay. Defining Standard Strategies for Quantum Benchmarks (2023). [Code](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/bench_code/mrb) 26 | - [Mid-circuit measurement RB](https://arxiv.org/abs/2207.04836): Luke C. G. Govia, Petar Jurcevic, Christopher J. Wood, Naoki Kanazawa, Seth T. Merkel, David C. McKay. A randomized benchmarking suite for mid-circuit measurements (2022). [Code](notebooks/mcm_rb.ipynb) 27 | - [Dynamic circuits RB](https://arxiv.org/abs/2408.07677): Liran Shirizly, Luke C. G. Govia, David C. McKay. Randomized Benchmarking Protocol for Dynamic Circuits (2024). [Code](notebooks/dynamic_circuits_rb.ipynb) 28 | 29 | # Installation 30 | 31 | ``` 32 | git clone git@github.com:qiskit-community/qiskit-device-benchmarking.git 33 | cd qiskit-device-benchmarking 34 | pip install . 35 | ``` 36 | 37 | # Contribution Guidelines 38 | 39 | Please open a github issue or pull request if you would like to contribute. 40 | 41 | # License 42 | 43 | [Apache License 2.0](LICENSE.txt) 44 | 45 | # Acknowledgements 46 | 47 | Portions of the code in this repository was developed via sponsorship by the Army Research Office ``QCISS Program'' under Grant Number W911NF-21-1-0002. 48 | -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # Qiskit Device Benchmarking Notebooks 2 | 3 | This folder contains example notebooks for running benchmarks. 4 | 5 | - [Layer Fidelity](layer_fidelity.ipynb): Example notebook for running the layer fidelity using the generation code in qiskit-experiments. In this particular example the code uses the qiskit reported errors to guess the best chain of qubits for running the layer fidelity. However, it can be easily adjusted to run on an arbitrary chain. 6 | 7 | - [Layer Fidelity Single Chain](layer_fidelity_single_chain.ipynb): Example notebook for running a layer fidelity experiment on a single chain. The default chain is the one reported on qiskit (100Q long chain) but this notebook can be easily modified to run on any arbitrary chain. A nice feature of this notebook is that it allows the user to find the best subchain within a larger chain, that is, if layer fidelity is run on a 100Q long chain, the user can easily find the best x = [4,5,6,...,98,99,100] long subchain within that chain. This notebook uses helper functions from module `layer_fidelity_utils.py` and should be less code heavy. 8 | 9 | - [Bell State Tomography](bell_state_tomography.ipynb): Example notebook for running parallel state tomography using qiskit-experiments. 10 | 11 | - [Device RB](device_rb.ipynb): Example notebook for running full device 2Q RB and Purity RB. 12 | 13 | - [Device (Direct) RB](device_rb_w_lf.ipynb): Example notebook for running full device 2Q RB using layer fidelity (direct RB). This reduces the number of single qubit gates per 2Q gate in RB. 14 | 15 | - [System Characterization](system_char.ipynb): Notebook to do general (non-RB) characterization of a system. Runs Coherence (T1/T2), measurement fidelity, hellinger fidelities of Bell states produced with repeated two-qubit gates and ZZ. 16 | 17 | - [Extract Benchmarks](extract_benchmarks.ipynb): Example notebook for extracting and plotting benchmarks and properties from a list of devices. This information includes LF, EPLG, 2Q errors, 1Q errors, T1s, T2s, and readout errors, but can be easily modified to include any other properties. 18 | 19 | - [MCM RB](mcm_rb.ipynb): Example notebook for running Mid-circuit measurement RB experiment. 20 | 21 | - [Dynamic circuits RB](dynamic_circuits_rb.ipynb): Example notebook for running dynamic circuits RB experiment. 22 | 23 | - [Layer Fidelity Placement](layer_fidelity_placement.ipynb): Example notebook of using layer fidelity to build an updated error map of the device that is more reflective of layered circuits. Also gives an example of a heuristic algorithm for finding the best N-qubit chain based on the error map. 24 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/VERSION.txt: -------------------------------------------------------------------------------- 1 | 0.1.0 2 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | #from . import utilities 26 | #from . import bench_code 27 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | # from . import framework 26 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/bell/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Variants of the Bell experiments 14 | 15 | .. currentmodule:: qiskit_experiments_internal.library.quantum_volume 16 | 17 | Classes 18 | ======= 19 | .. autosummary:: 20 | ::undoc-members: 21 | 22 | Bell 23 | 24 | """ 25 | 26 | from .bell_experiment import BellExperiment 27 | from .bell_experiment import BellAnalysis 28 | from .bell_experiment import CHSHAnalysis 29 | from .bell_experiment import CHSHExperiment 30 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/bell/bell_experiment.py: -------------------------------------------------------------------------------- 1 | from typing import List, Tuple, Sequence 2 | import numpy as np 3 | import matplotlib 4 | import matplotlib.pyplot as plt 5 | 6 | from qiskit.circuit import QuantumCircuit 7 | from qiskit.result import marginal_counts 8 | 9 | from qiskit_experiments.framework import ( 10 | BaseExperiment, 11 | BaseAnalysis, 12 | Options, 13 | ExperimentData, 14 | AnalysisResultData 15 | ) 16 | 17 | class CHSHExperiment(BaseExperiment): 18 | """Custom experiment class template.""" 19 | 20 | def __init__(self, 21 | physical_qubits: Sequence[int], 22 | backend = None): 23 | """Initialize a chsh bell experiment 24 | 25 | Args: 26 | physical_qubits: List of physical qubits for the experiment. 27 | backend: The backend to run the experiment on. 28 | 29 | Raises: 30 | QiskitError: If any invalid argument is supplied. 31 | """ 32 | 33 | 34 | super().__init__(physical_qubits, 35 | analysis = CHSHAnalysis(), 36 | backend = backend) 37 | 38 | def circuits(self) -> List[QuantumCircuit]: 39 | """Generate the list of circuits to be run.""" 40 | 41 | #Four circuits for this experiment 42 | #Assume the ideal basis for this inequality 43 | circuits = [] 44 | for i in range(4): 45 | qc = QuantumCircuit(2) 46 | qc.h(0) 47 | qc.cx(0,1) 48 | 49 | #rotate the 2nd qubit by pi/4 (optimal for the inequality) 50 | qc.rx(np.pi/4,1) 51 | 52 | #measure in Z, ZY, YZ, YY 53 | if np.mod(i,2): 54 | qc.sx(0) 55 | if np.mod(int(i/2),2): 56 | qc.sx(0) 57 | qc.measure_all() 58 | circuits.append(qc) 59 | 60 | return circuits 61 | 62 | @classmethod 63 | def _default_experiment_options(cls) -> Options: 64 | """Set default experiment options here.""" 65 | options = super()._default_experiment_options() 66 | options.update_options( 67 | shots = 300, 68 | ) 69 | return options 70 | 71 | 72 | class CHSHAnalysis(BaseAnalysis): 73 | """Custom analysis class template.""" 74 | 75 | @classmethod 76 | def _default_options(cls) -> Options: 77 | """Set default analysis options. Plotting is on by default.""" 78 | 79 | options = super()._default_options() 80 | options.dummy_analysis_option = None 81 | options.plot = False 82 | options.ax = None 83 | return options 84 | 85 | def _estate(self, counts): 86 | #from a counts dictionary determine the correlation function E 87 | 88 | shots = np.sum([counts[i] for i in counts]) 89 | return (counts.get('11',0)+counts.get('00',0)-counts.get('10',0)-counts.get('01',0))/shots 90 | 91 | def _run_analysis( 92 | self, 93 | experiment_data: ExperimentData, 94 | ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: 95 | """Run the analysis.""" 96 | 97 | # Process the data here 98 | 99 | res = experiment_data.data() 100 | 101 | aa = [1,-1,-1,-1] 102 | S = np.sum([aa[i]*self._estate(res[i]['counts']) for i in range(4)]) 103 | 104 | analysis_results = [ 105 | AnalysisResultData(name="S", value=S) 106 | ] 107 | 108 | return analysis_results, None 109 | 110 | 111 | class BellExperiment(BaseExperiment): 112 | """Custom experiment class template.""" 113 | 114 | def __init__(self, 115 | layered_coupling_map, 116 | cxnum=5, 117 | backend = None): 118 | """Initialize the experiment.""" 119 | 120 | 121 | physical_qubits = [] 122 | for layer in layered_coupling_map: 123 | for pair in layer: 124 | if pair[0] not in physical_qubits: 125 | physical_qubits.append(pair[0]) 126 | if pair[1] not in physical_qubits: 127 | physical_qubits.append(pair[1]) 128 | physical_qubits = range(backend.configuration().num_qubits) 129 | 130 | self.layered_coupling_map = layered_coupling_map 131 | self.cxnum = cxnum 132 | super().__init__(physical_qubits, 133 | analysis = BellAnalysis(), 134 | backend = backend) 135 | 136 | def circuits(self) -> List[QuantumCircuit]: 137 | """Generate the list of circuits to be run.""" 138 | conf = self.backend.configuration() 139 | circuits = make_bell_circs(self.layered_coupling_map, conf, cxnum=self.cxnum) 140 | 141 | return circuits 142 | 143 | @classmethod 144 | def _default_experiment_options(cls) -> Options: 145 | """Set default experiment options here.""" 146 | options = super()._default_experiment_options() 147 | options.update_options( 148 | shots = 2048, 149 | ) 150 | return options 151 | 152 | 153 | class BellAnalysis(BaseAnalysis): 154 | """Custom analysis class template.""" 155 | 156 | @classmethod 157 | def _default_options(cls) -> Options: 158 | """Set default analysis options. Plotting is on by default.""" 159 | 160 | options = super()._default_options() 161 | options.dummy_analysis_option = None 162 | options.plot = True 163 | options.ax = None 164 | return options 165 | 166 | def _run_analysis( 167 | self, 168 | experiment_data: ExperimentData, 169 | ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: 170 | """Run the analysis.""" 171 | 172 | # Process the data here 173 | from qiskit.quantum_info import hellinger_fidelity 174 | from qiskit.result import marginal_counts 175 | import pandas as pd 176 | 177 | res = experiment_data.data() 178 | 179 | cxnum = res[0]['metadata']['cxnum'] 180 | if cxnum % 2 == 1: # usual case of making a Bell state 181 | target = {'00': 0.5, '11': 0.5} 182 | else: # even number of CX should be an identity 183 | target = {'00': 0.5, '01': 0.5} 184 | 185 | fid = []; cmap=[] 186 | for datum in res: 187 | coupling_map = datum['metadata']['coupling_map'] 188 | # cxnum 189 | counts = datum['counts'] 190 | tmp = extract_ind_counts(coupling_map, counts, measure_idle=False) 191 | for cr, val in tmp.items(): 192 | cmap.append([int(bit) for bit in cr.split('_')]) 193 | fid.append(hellinger_fidelity(val, target)) 194 | 195 | df = {'connection':cmap,'fidelity':fid} 196 | fidelity_data = pd.DataFrame(df).sort_values(by='connection') 197 | 198 | 199 | analysis_results = [ 200 | AnalysisResultData(name="hellinger_fidelities", value=fidelity_data) 201 | ] 202 | figures = [] 203 | if self.options.plot: 204 | figures.append(self._plot(fidelity_data)) 205 | 206 | return analysis_results, figures 207 | 208 | def _plot(self,data): 209 | fig, ax = plt.subplots() 210 | data.sort_values(by='connection').plot(x='connection',y='fidelity',kind='bar',ax=ax) 211 | return fig 212 | 213 | 214 | def flatten_bits(crs): 215 | # it is important to follow bits in int format to match the arrangement 216 | if len(crs) == 0: 217 | return [] 218 | else: 219 | bits=[int(cr[0]) for cr in crs] 220 | bits.extend([int(cr[1]) for cr in crs]) 221 | return bits 222 | 223 | def make_bell_circs(layered_coupling_map, conf, cxnum): 224 | """ 225 | run simultaneous bell test. simultaneous pairs are obtained from get_layered_coupling_map 226 | We assume each cr ran only one time 227 | (e.g. [[1_2, 3_4], [5_6, 7_8]] is okay, but [[1_2, 3_4], [1_2, 5_6, 7_8]] is not okay) 228 | """ 229 | 230 | from qiskit.transpiler import CouplingMap 231 | 232 | 233 | n_reset = 2 234 | cxnum = 5 235 | insert_barrier = False 236 | 237 | hadamard_idle = False 238 | y_basis = False 239 | measure_idle = False 240 | circs=[] 241 | 242 | for coupling_map in layered_coupling_map: 243 | bits=flatten_bits(coupling_map); 244 | nbits=len(bits) 245 | 246 | qc = QuantumCircuit(conf.n_qubits, nbits) 247 | # prepare qubits in superposition and then reset (conditionally) if requested 248 | if n_reset > 0: 249 | for bit in bits: 250 | qc.h(bit) 251 | for rnum in range(n_reset): 252 | qc.barrier() 253 | for bit in bits: 254 | qc.reset(bit) 255 | qc.barrier() 256 | elif insert_barrier: 257 | qc.barrier(bits) 258 | # now do the Bell state 259 | if hadamard_idle: # Hadamard all qubits except CNOT targets 260 | for i in range(conf.n_qubits): 261 | if i not in [edge[1] for edge in coupling_map]: 262 | qc.h(i) 263 | else: #Hadamard only CNOT control qubits 264 | for edge in coupling_map: 265 | qc.h(edge[0]) 266 | for i in range(cxnum): 267 | if insert_barrier: 268 | qc.barrier(bits) 269 | for edge in coupling_map: 270 | qc.cx(edge[0], edge[1]) 271 | qc.barrier(edge[0],edge[1]) 272 | if y_basis: 273 | if insert_barrier: 274 | qc.barrier(bits) 275 | for edge in coupling_map: 276 | qc.s(edge[0]) 277 | qc.sdg(edge[1]) 278 | qc.h(edge[0]) 279 | qc.h(edge[1]) 280 | if measure_idle: 281 | full_list = list(range(conf.n_qubits)) 282 | qc.measure(full_list, full_list) 283 | else: 284 | qc.measure(bits, list(range(nbits))) 285 | 286 | qc.metadata['coupling_map'] = coupling_map 287 | qc.metadata['cxnum'] = cxnum 288 | circs.append(qc) 289 | return circs 290 | 291 | 292 | def extract_ind_counts(crs, counts, measure_idle): 293 | # it is important to follow bits in int format to match the arrangement 294 | # of classical register in circuit composer in run code 295 | if not measure_idle: 296 | bits=flatten_bits(crs); nbits=len(bits) 297 | bit2idx={} 298 | for i, bit in enumerate(bits): 299 | bit2idx.update({int(bit): i}) 300 | # shuffle the data 301 | ind_counts = {} 302 | for i, cr in enumerate(crs): 303 | label='{}_{}'.format(cr[0], cr[1]) 304 | if measure_idle: 305 | idx1 = int(cr[0]) 306 | idx2 = int(cr[1]) 307 | else: 308 | idx1 = bit2idx[int(cr[0])] 309 | idx2 = bit2idx[int(cr[1])] 310 | ind_counts.update({label:marginal_counts(counts, [idx1, idx2])}) 311 | 312 | if measure_idle and cr[0] > cr[1]: 313 | ind_counts[label]['01'], ind_counts[label]['10'] = ind_counts[label].get('10', 0), ind_counts[label].get('01', 0) 314 | 315 | return ind_counts -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/dynamic_circuits_rb/Readme.md: -------------------------------------------------------------------------------- 1 | ## Dynamic circuits RB 2 | 3 | Code for running Dynamic circuits RB (notebook [here](../../../notebooks/dynamic_circuits_rb.ipynb). Representative of the code used in [arXiv:2408.07677](https://arxiv.org/abs/2408.07677) and based on the qiskit-experiments framework. 4 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/dynamic_circuits_rb/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | =============================================================== 3 | Dynamic Circuits Randomized Benchmarking (:mod:`dynamic_circuits_rb`) 4 | =============================================================== 5 | 6 | Classes 7 | ======= 8 | 9 | .. autosummary:: 10 | :toctree: ../stubs/ 11 | 12 | DynamicCircuitsRB 13 | DynamicCircuitsRBAnalysis 14 | """ 15 | 16 | from .dc_rb_experiment import DynamicCircuitsRB, DynamicCircuitsRBAnalysis 17 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/dynamic_circuits_rb/dc_rb_experiment.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | """ 12 | Dynamic circuits RB Experiment class. 13 | """ 14 | 15 | from matplotlib.backends.backend_svg import FigureCanvasSVG 16 | from matplotlib.figure import Figure 17 | import cmath 18 | import math 19 | 20 | from qiskit_ibm_runtime.transpiler.passes.scheduling import ( 21 | DynamicCircuitInstructionDurations, 22 | ) 23 | from scipy.linalg import det 24 | from numpy.random import default_rng 25 | from qiskit.circuit import QuantumCircuit, Delay 26 | 27 | from qiskit.quantum_info import Clifford 28 | from qiskit.quantum_info.random import random_clifford 29 | from qiskit.circuit.instruction import Instruction 30 | from qiskit.circuit.library import UGate, SXGate, RZGate 31 | from qiskit.exceptions import QiskitError 32 | from numpy.random import Generator 33 | from typing import Sequence, List, Iterator 34 | import numpy as np 35 | from qiskit.providers.backend import Backend 36 | from qiskit_experiments.framework import ( 37 | BaseExperiment, 38 | BackendTiming, 39 | ) 40 | from qiskit_experiments.data_processing import ( 41 | DataProcessor, 42 | Probability, 43 | MarginalizeCounts, 44 | ) 45 | import qiskit_experiments.curve_analysis as curve 46 | from ..mcm_rb import SubDecayFit 47 | 48 | 49 | class DynamicCircuitsRB(BaseExperiment): 50 | """Dynamic circuits Randomized Benchmarking. 51 | 52 | # section: overview 53 | 54 | a series of dynamic circuit benchmarking routines based on interleaving dynamic circuit 55 | operation blocks in one-qubit randomized benchmarking sequences of data qubits. The blocks span 56 | between the set of data qubits and a measurement qubit and may include feedforward operations 57 | based on the measurement. 58 | 59 | # section: reference 60 | .. ref_arxiv:: 1 2408.07677 61 | 62 | """ 63 | 64 | def __init__( 65 | self, 66 | physical_qubits: Sequence[int], 67 | backend: Backend, 68 | n_blocks=(0, 1, 2, 3, 4, 5, 10, 15, 20), 69 | num_samples=3, 70 | seed=100, 71 | cliff_per_meas=5, 72 | ff_operations=("I_c0", "Z_c0", "I_c1", "Z_c1", "Delay"), 73 | ff_delay=2120, 74 | plot_measured_qubit=False, 75 | plot_summary=False, 76 | ): 77 | """Dynamic circuits RB. 78 | Args: 79 | physical_qubits: The qubits on which to run the experiment. 80 | backend: The backend to run the experiment on. 81 | n_blocks: Number of measurements/feedforward operations 82 | num_samples: Number of different sequences to generate. 83 | seed: Seed for the random number generator. 84 | ff_operations: Sequence of the dynamic circuits blocks labels. 85 | ff_delay: Feedforward latency in dt units. 86 | plot_measured_qubit: Plot the decay curve of the measured qubit. 87 | plot_summary: Plot summary of the decay parameters. 88 | """ 89 | super().__init__(physical_qubits=physical_qubits, backend=backend) 90 | self.analysis = DynamicCircuitsRBAnalysis( 91 | physical_qubits=physical_qubits, 92 | ff_operations=ff_operations, 93 | plot_measured_qubit=plot_measured_qubit, 94 | plot_summary=plot_summary, 95 | ) 96 | self.n_blocks = n_blocks 97 | self.seed = seed 98 | self.num_samples = num_samples 99 | self.ff_operations = ff_operations 100 | self.ff_delay = ff_delay 101 | self.cliff_per_meas = cliff_per_meas 102 | if "H_CNOT" in self.ff_operations and len(physical_qubits) != 2: 103 | raise Exception("The CNOT blocks are supported only for 2 physical qubits") 104 | 105 | def circuits(self) -> List[QuantumCircuit]: 106 | """Return a list of RB circuits. 107 | 108 | Returns: 109 | A list of :class:`QuantumCircuit`. 110 | """ 111 | rng = default_rng(seed=self.seed) 112 | 113 | n_qubits = self.num_qubits 114 | 115 | # Construct interleaved parts 116 | ff_circs = [] 117 | for ff_type in self.ff_operations: 118 | ff_circs.append(self.ff_circ(ff_type)) 119 | 120 | circuits = [] 121 | 122 | for i in range(self.num_samples): 123 | for length in self.n_blocks: 124 | generators = ( 125 | self._generate_sequences(length * self.cliff_per_meas, rng) 126 | for _ in range(n_qubits - 1) 127 | ) 128 | 129 | # Generate MCM RB circuit 130 | circs = [] 131 | for ff_type in self.ff_operations: 132 | circ = QuantumCircuit(n_qubits, n_qubits) 133 | circ.metadata = { 134 | "xval": length, 135 | "physical_qubits": self.physical_qubits, 136 | "num_sample": i, 137 | "ff_type": ff_type, 138 | } 139 | circs.append(circ) 140 | 141 | n_elms = 0 142 | for elms in zip(*generators): 143 | n_elms += 1 144 | for q, elm in enumerate(elms): 145 | # Add a single random clifford 146 | for inst in self._sequence_to_instructions(elm): 147 | for circ in circs: 148 | circ._append(inst, [circ.qubits[q]], []) 149 | # Sync time 150 | for circ in circs: 151 | circ.barrier() 152 | if n_elms <= (length * self.cliff_per_meas) and ( 153 | np.mod(n_elms, self.cliff_per_meas) == 0 154 | ): 155 | # Interleave MCM 156 | for circ, ff_circ in zip(circs, ff_circs): 157 | circ.compose(ff_circ, inplace=True, qubits=circ.qubits) 158 | circ.barrier() 159 | for circ in circs: 160 | circ.barrier() 161 | circ.measure(circ.qubits, circ.clbits) 162 | 163 | circuits.extend(circs) 164 | 165 | return circuits 166 | 167 | def ff_circ(self, ff_type): 168 | 169 | circ = QuantumCircuit(self.num_qubits, self.num_qubits) 170 | timing = BackendTiming(self.backend) 171 | durations = DynamicCircuitInstructionDurations.from_backend(self.backend) 172 | clbits = circ.clbits 173 | qubits = circ.qubits 174 | if ff_type == "H_CNOT": 175 | circ.h(qubits[-1]) 176 | circ.barrier() 177 | circ.cx(qubits[-1], qubits[0]) 178 | circ.barrier() 179 | circ.measure(qubits[-1], clbits[-1]) 180 | with circ.if_test((clbits[-1], 1)): 181 | circ.x(qubits[0]) 182 | circ.x(qubits[-1]) 183 | elif ff_type == "H_CNOT_FFDD": 184 | meas_dt = durations.get("measure", 0, "dt") 185 | x_dt = durations.get("x", 0, "dt") 186 | ff_dt = self.ff_delay 187 | delay1 = timing.round_delay( 188 | time=((meas_dt - ff_dt - 2 * x_dt) / 2) * timing.dt 189 | ) 190 | delay2 = timing.round_delay(time=(ff_dt - x_dt) * timing.dt) 191 | circ.h(qubits[-1]) 192 | circ.barrier() 193 | circ.cx(qubits[-1], qubits[0]) 194 | circ.barrier() 195 | circ.x([qubits[0]]) 196 | circ.append(Delay(delay1, "dt"), [qubits[0]], []) 197 | circ.x([qubits[0]]) 198 | circ.append(Delay(delay1, "dt"), [qubits[0]], []) 199 | circ.x([qubits[0]]) 200 | circ.append(Delay(delay2, "dt"), [qubits[0]], []) 201 | circ.x([qubits[0]]) 202 | circ.measure(qubits[-1], clbits[-1]) 203 | circ.barrier() 204 | with circ.if_test((clbits[-1], 1)): 205 | circ.x(qubits[0]) 206 | circ.x(qubits[-1]) 207 | elif ff_type == "H_CNOT_MDD": 208 | meas_dt = durations.get("measure", 0, "dt") 209 | x_dt = durations.get("x", 0, "dt") 210 | delay_quarter = timing.round_delay( 211 | time=((meas_dt - 2 * x_dt) / 4) * timing.dt 212 | ) 213 | circ.h(qubits[-1]) 214 | circ.barrier() 215 | circ.cx(qubits[-1], qubits[0]) 216 | circ.barrier() 217 | circ.barrier() 218 | circ.x([qubits[0]]) 219 | circ.append(Delay(delay_quarter, "dt"), [qubits[0]], []) 220 | circ.x([qubits[0]]) 221 | circ.append(Delay(delay_quarter * 2, "dt"), [qubits[0]], []) 222 | circ.x([qubits[0]]) 223 | circ.append(Delay(delay_quarter, "dt"), [qubits[0]], []) 224 | circ.x([qubits[0]]) 225 | circ.measure(qubits[-1], clbits[-1]) 226 | circ.barrier() 227 | with circ.if_test((clbits[-1], 1)): 228 | circ.x(qubits[0]) 229 | circ.x(qubits[-1]) 230 | elif ff_type == "X_c1": 231 | circ.x(qubits) 232 | circ.barrier() 233 | circ.measure(qubits[-1], clbits[-1]) 234 | with circ.if_test((clbits[-1], 1)): 235 | circ.x(qubits) 236 | elif ff_type == "X_c0": 237 | circ.measure(qubits[-1], clbits[-1]) 238 | with circ.if_test((clbits[-1], 1)): 239 | circ.x(qubits) 240 | elif ff_type == "Z_c1": 241 | circ.z(qubits[:-1]) 242 | circ.x(qubits[-1]) 243 | circ.barrier() 244 | circ.measure(qubits[-1], clbits[-1]) 245 | with circ.if_test((clbits[-1], 1)): 246 | circ.z(qubits[:-1]) 247 | circ.x(qubits[-1]) 248 | elif ff_type == "Z_c0": 249 | circ.measure(qubits[-1], clbits[-1]) 250 | with circ.if_test((clbits[-1], 1)): 251 | circ.z(qubits[:-1]) 252 | circ.x(qubits[-1]) 253 | elif ff_type == "I_c0": 254 | circ.measure(qubits[-1], clbits[-1]) 255 | circ.barrier() 256 | # uses repeated Z instead of identity to make sure it uses the same feedforward timing 257 | with circ.if_test((clbits[-1], 1)): 258 | circ.x(qubits[-1]) 259 | circ.z(qubits[:-1]) 260 | circ.barrier() 261 | circ.z(qubits[:-1]) 262 | elif ff_type == "I_c1": 263 | circ.x(qubits[-1]) 264 | circ.barrier() 265 | circ.measure(qubits[-1], clbits[-1]) 266 | # uses repeated Z instead of identity to make sure it uses the same feedforward timing 267 | with circ.if_test((clbits[-1], 1)): 268 | circ.x(qubits[-1]) 269 | circ.z(qubits[:-1]) 270 | circ.barrier() 271 | circ.z(qubits[:-1]) 272 | elif ff_type == "Delay": 273 | meas_dt = durations.get("measure", self.physical_qubits[-1], "dt") 274 | circ.append(Delay(meas_dt, unit="dt"), [qubits[-1]], []) 275 | circ.barrier() 276 | circ.append(Delay(self.ff_delay, unit="dt"), [qubits[-1]], []) 277 | else: 278 | raise Exception(f"Not supporting {ff_type}") 279 | return circ 280 | 281 | def _generate_sequences(self, length: int, rng: Generator) -> Iterator[Clifford]: 282 | """Generate N+1 Clifford sequences with inverse at the end.""" 283 | composed = Clifford([[1, 0], [0, 1]]) 284 | for _ in range(length): 285 | elm = random_clifford(1, rng) 286 | composed = composed.compose(elm) 287 | yield elm 288 | if length > 0: 289 | yield composed.adjoint() 290 | 291 | def _sequence_to_instructions(self, elm: Clifford) -> List[Instruction]: 292 | """Single qubit Clifford decomposition with fixed number of physical gates. 293 | 294 | This overrules standard Qiskit transpile protocol and immediately 295 | apply hard-coded decomposition with respect to the backend basis gates. 296 | Note that this decomposition ignores global phase. 297 | 298 | This decomposition guarantees constant gate duration per every Clifford. 299 | """ 300 | if not self.backend: 301 | return [elm.to_instruction()] 302 | else: 303 | basis_gates = self.backend.configuration().basis_gates 304 | # First decompose into Euler angle rotations. 305 | theta, phi, lam = self._zyz_decomposition(elm.to_matrix()) 306 | 307 | if all(op in basis_gates for op in ("sx", "rz")): 308 | return [ 309 | RZGate(lam), 310 | SXGate(), 311 | RZGate(theta + math.pi), 312 | SXGate(), 313 | RZGate(phi - math.pi), 314 | ] 315 | if "u" in basis_gates: 316 | return [UGate(theta, phi, lam)] 317 | raise QiskitError( 318 | f"Current decomposition mechanism doesn't support basis gates {basis_gates}." 319 | ) 320 | 321 | def _zyz_decomposition(self, mat: np.ndarray): 322 | # This code is copied from 323 | # qiskit.quantum_info.synthesis.one_qubit_decompose.OneQubitEulerDecomposer 324 | su_mat = det(mat) ** (-0.5) * mat 325 | theta = 2 * math.atan2(abs(su_mat[1, 0]), abs(su_mat[0, 0])) 326 | phiplambda2 = cmath.phase(su_mat[1, 1]) 327 | phimlambda2 = cmath.phase(su_mat[1, 0]) 328 | phi = phiplambda2 + phimlambda2 329 | lam = phiplambda2 - phimlambda2 330 | 331 | return theta, phi, lam 332 | 333 | 334 | class DynamicCircuitsRBAnalysis(SubDecayFit): 335 | def __init__( 336 | self, 337 | physical_qubits, 338 | ff_operations, 339 | plot_measured_qubit=True, 340 | plot_summary=True, 341 | ): 342 | super().__init__() 343 | self.physical_qubits = physical_qubits 344 | self.ff_operations = ff_operations 345 | self.plot_summary = plot_summary 346 | self.plot_measured_qubit = plot_measured_qubit 347 | 348 | def _run_analysis( 349 | self, 350 | experiment_data, 351 | ): 352 | 353 | analysis_results, figs = [], [] 354 | q_m = self.physical_qubits[-1] 355 | for ff_type in self.ff_operations: 356 | for i, q in enumerate(self.physical_qubits): 357 | name = f"{ff_type}(Q{q}_M{q_m})" 358 | self.set_options( 359 | data_processor=DataProcessor( 360 | "counts", [MarginalizeCounts({i}), Probability("0")] 361 | ), 362 | result_parameters=[curve.ParameterRepr("alpha", name)], 363 | filter_data={"ff_type": ff_type}, 364 | ) 365 | self._name = name 366 | self.plotter.set_figure_options( 367 | xlabel="Number of FF operation", 368 | ylabel="P(0)", 369 | figure_title=f"Data qubit: {q}, Measured qubit: {q_m} Operation: {ff_type}", 370 | ) 371 | analysis_result, fig = super()._run_analysis(experiment_data) 372 | analysis_results += analysis_result 373 | if q == q_m and not self.plot_measured_qubit: 374 | continue 375 | figs += fig 376 | 377 | if self.plot_summary: 378 | results_fig = Figure(figsize=(6, 4)) 379 | results_separate_fig = Figure(figsize=(len(self.physical_qubits) * 1.4, 4)) 380 | _ = FigureCanvasSVG(results_fig) 381 | _ = FigureCanvasSVG(results_separate_fig) 382 | ax = results_fig.subplots(1, 1) 383 | axs = results_separate_fig.subplots(1, len(self.physical_qubits)) 384 | x = np.arange(len(self.physical_qubits)) 385 | x_ticks = [f"Q{q}" for q in self.physical_qubits] 386 | for ff_type in self.ff_operations: 387 | ys, y_errs = [], [] 388 | for i, q in enumerate(self.physical_qubits): 389 | alpha = next( 390 | filter( 391 | lambda res: res.name == f"{ff_type}(Q{q}_M{q_m})", 392 | analysis_results, 393 | ) 394 | ) 395 | y, y_err = alpha.value.n, alpha.value.s 396 | ys.append(y) 397 | y_errs.append(y_err) 398 | axs[i].errorbar( 399 | [1], 400 | y, 401 | yerr=y_err, 402 | fmt="o", 403 | alpha=0.5, 404 | capsize=4, 405 | markersize=5, 406 | label=ff_type, 407 | ) 408 | ax.errorbar( 409 | x, 410 | ys, 411 | yerr=y_errs, 412 | fmt="o", 413 | alpha=0.5, 414 | capsize=4, 415 | markersize=5, 416 | label=ff_type, 417 | ) 418 | ax.legend() 419 | ax.set_xticks(x, x_ticks) 420 | ax.set_title(f"Measured qubit: {q_m}") 421 | for i, q in enumerate(self.physical_qubits): 422 | axs[i].set_xticks([1], [f"Q{q}"]) 423 | axs[-1].set_xticks([1], [f"Q{q_m}:M"]) 424 | axs[-1].legend(bbox_to_anchor=(1.05, 1.0), loc="upper left") 425 | results_separate_fig.tight_layout() 426 | figs += [results_fig, results_separate_fig] 427 | 428 | return analysis_results, figs 429 | 430 | @classmethod 431 | def _default_options(cls): 432 | 433 | default_options = super()._default_options() 434 | default_options.plot_raw_data = True 435 | default_options.average_method = "sample" 436 | 437 | return default_options 438 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mcm_rb/Readme.md: -------------------------------------------------------------------------------- 1 | ## Mid-circuit measurement RB 2 | 3 | Code for running Mid-circuit measurement RB (notebook [here](../../../notebooks/mcm_rb.ipynb). Representative of the code used in [arXiv:2207.04836](https://arxiv.org/abs/2207.04836) and based on the qiskit-experiments framework. 4 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mcm_rb/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | =============================================================== 3 | Mid-circuit measurement Randomized Benchmarking (:mod:`mcm_rb`) 4 | =============================================================== 5 | 6 | Classes 7 | ======= 8 | 9 | .. autosummary:: 10 | :toctree: ../stubs/ 11 | 12 | McmRB 13 | McmRBAnalysis 14 | """ 15 | 16 | 17 | from .mcm_rb_experiment import McmRB, McmRBAnalysis, SubDecayFit 18 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/Readme.md: -------------------------------------------------------------------------------- 1 | # Mirror QV and RB 2 | 3 | Code written for https://arxiv.org/abs/2303.02108 and based on the earlier works T. Proctor, S. Seritan, K. Rudinger, E. Nielsen, R. Blume-Kohout, and K. Young, 4 | [Physical Review Letters 129, 150502 (2022)](https://doi.org/10.48550/arXiv.2112.09853) and T. Proctor, K. Rudinger, K. Young, E. Nielsen, and R. Blume-Kohout, [Nature Physics 18, 75 (2022)](https://doi.org/10.48550/arXiv.2008.11294). 5 | 6 | Code is based on the qiskit-experiments framework. 7 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2022. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Variants on the quantum volume experiment 14 | 15 | .. currentmodule:: qiskit_experiments_internal.library.quantum_volume 16 | 17 | Classes 18 | ======= 19 | .. autosummary:: 20 | ::undoc-members: 21 | 22 | MirrorQuantumVolume 23 | MirrorQuantumVolumeAnalysis 24 | 25 | """ 26 | 27 | from .mirror_qv import MirrorQuantumVolume 28 | from .mirror_qv_analysis import MirrorQuantumVolumeAnalysis 29 | from .mirror_rb_experiment import MirrorRB 30 | from .mirror_rb_analysis import MirrorRBAnalysis 31 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/mirror_qv.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2021. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Quantum Volume Experiment class. 14 | """ 15 | 16 | import warnings 17 | from typing import Union, Sequence, Optional, List 18 | from numpy.random import Generator, default_rng 19 | from numpy.random.bit_generator import BitGenerator, SeedSequence 20 | 21 | 22 | from qiskit.circuit import ( 23 | QuantumCircuit, 24 | ClassicalRegister, 25 | ) 26 | 27 | from qiskit.circuit.library import QuantumVolume as QuantumVolumeCircuit 28 | from qiskit.circuit.library import XGate 29 | from qiskit.providers.backend import Backend 30 | from qiskit_experiments.framework import BaseExperiment, Options 31 | from .mirror_qv_analysis import MirrorQuantumVolumeAnalysis 32 | 33 | from qiskit.converters import circuit_to_dag, dag_to_circuit 34 | from qiskit.quantum_info import random_pauli_list, random_unitary 35 | 36 | 37 | from qiskit.transpiler import PassManager, InstructionDurations 38 | from qiskit_ibm_runtime.transpiler.passes.scheduling import ALAPScheduleAnalysis 39 | from qiskit_ibm_runtime.transpiler.passes.scheduling import PadDynamicalDecoupling 40 | from qiskit_experiments.exceptions import QiskitError 41 | 42 | 43 | class MirrorQuantumVolume(BaseExperiment): 44 | """Mirror Quantum Volume Experiment class. 45 | 46 | # section: overview 47 | Quantum Volume (QV) is a single-number metric that can be measured using a concrete protocol 48 | on near-term quantum computers of modest size. The QV method quantifies the largest random 49 | circuit of equal width and depth that the computer successfully implements. 50 | Quantum computing systems with high-fidelity operations, high connectivity, 51 | large calibrated gate sets, and circuit rewriting toolchains are expected to 52 | have higher quantum volumes. 53 | 54 | The Quantum Volume is determined by the largest circuit depth :math:`d_{max}`, 55 | and equals to :math:`2^{d_{max}}`. 56 | See `Qiskit Textbook 57 | `_ 58 | for an explanation on the QV protocol. 59 | 60 | In the QV experiment we generate :class:`~qiskit.circuit.library.QuantumVolume` circuits on 61 | :math:`d` qubits, which contain :math:`d` layers, where each layer consists of random 2-qubit 62 | unitary gates from :math:`SU(4)`, followed by a random permutation on the :math:`d` qubits. 63 | Then these circuits run on the quantum backend and on an ideal simulator (either 64 | :class:`~qiskit.providers.aer.AerSimulator` or :class:`~qiskit.quantum_info.Statevector`). 65 | 66 | A depth :math:`d` QV circuit is successful if it has 'mean heavy-output probability' > 2/3 with 67 | confidence level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran. 68 | 69 | See :class:`MirrorQuantumVolumeAnalysis` documentation for additional 70 | information on QV experiment analysis. 71 | 72 | # section: analysis_ref 73 | :py:class:`MirrorQuantumVolumeAnalysis` 74 | 75 | # section: reference 76 | .. ref_arxiv:: 1 1811.12926 77 | .. ref_arxiv:: 2 2008.08571 78 | 79 | """ 80 | 81 | def __init__( 82 | self, 83 | qubits: Sequence[int], 84 | backend: Optional[Backend] = None, 85 | trials: Optional[int] = 100, 86 | seed: Optional[Union[int, SeedSequence, BitGenerator, Generator]] = None, 87 | pauli_randomize: Optional[bool] = True, 88 | pauli_randomize_barriers: Optional[bool] = False, 89 | left_and_right: Optional[bool] = False, 90 | he: Optional[bool] = False 91 | ): 92 | """Initialize a quantum volume experiment. 93 | 94 | Args: 95 | qubits: list of physical qubits for the experiment. 96 | backend: Optional, the backend to run the experiment on. 97 | trials: The number of trials to run the quantum volume circuit. 98 | seed: Optional, seed used to initialize ``numpy.random.default_rng`` 99 | when generating circuits. The ``default_rng`` will be initialized 100 | with this seed value everytime :meth:`circuits` is called. 101 | pauli_randomize: If True, add random Paulis to the beginning and end of 102 | a mirrored QV circuit 103 | pauli_randomize_barriers: If True, add barriers between the Paulis from 104 | pauli_randomize and the SU(4) elements 105 | left_and_right: If True, construct mirrored QV circuits from the left 106 | and right halves QV circuits. Circuits constructed from the right 107 | half have their inverses prepended, rather than appended. 108 | he: If true use a hardware efficient circuit (TwoLocal) 109 | 110 | Raises: 111 | Warning: if user attempts to split_inverse a QV experiment with odd depth 112 | """ 113 | super().__init__(qubits, analysis=MirrorQuantumVolumeAnalysis(), backend=backend) 114 | 115 | # Set configurable options 116 | self.set_experiment_options(trials=trials, seed=seed) 117 | 118 | self.split_inverse = True 119 | # always set pauli_randomize to False if split_inverse is False 120 | self.pauli_randomize = pauli_randomize and self.split_inverse 121 | # always set pauli_randomize_barriers to False if pauli_randomize is False 122 | self.pauli_randomize_barriers = pauli_randomize_barriers and self.pauli_randomize 123 | self.middle_pauli_randomize = False 124 | 125 | self.left_and_right = left_and_right and pauli_randomize 126 | self.he = he 127 | 128 | if he and left_and_right: 129 | raise QiskitError("Not supported for HE and left and right") 130 | 131 | warnings.simplefilter("always") 132 | if self.split_inverse and len(qubits) % 2 == 1: 133 | self.split_inverse = False 134 | self.pauli_randomize = False 135 | self.middle_pauli_randomize = False 136 | self.left_and_right = False 137 | warnings.warn( 138 | "Cannot split and invert QV circuits with odd depth. Circuits will not " 139 | + "undergo these modifications and target bitstrings will not be computed." 140 | ) 141 | 142 | self._static_trans_circuits = None 143 | 144 | 145 | def dd_circuits(self) -> List[QuantumCircuit]: 146 | 147 | #run transpiler first 148 | self._transpiled_circuits() 149 | 150 | if self.backend is None: 151 | raise QiskitError("Can't run dd without backend specified") 152 | 153 | durations = InstructionDurations.from_backend(self.backend) 154 | 155 | dd_sequence = [XGate(), XGate()] 156 | pm = PassManager([ALAPScheduleAnalysis(durations), 157 | PadDynamicalDecoupling(durations, dd_sequence, qubits=self._physical_qubits)]) 158 | 159 | self._static_trans_circuits = pm.run(self._static_trans_circuits) 160 | 161 | return self._static_trans_circuits 162 | 163 | 164 | def _transpiled_circuits(self, retranspile: bool = False) -> List[QuantumCircuit]: 165 | """Transpiled circuits 166 | 167 | Args: 168 | retranspile: If true will re call the transpiled circuits function. If false, return 169 | the transpiled circuits if they exist 170 | """ 171 | 172 | if (not retranspile) and (self._static_trans_circuits is not None): 173 | return self._static_trans_circuits 174 | 175 | self._static_trans_circuits = super()._transpiled_circuits() 176 | 177 | #add the measurement now 178 | #NEED TO BE CAREFUL BECAUSE THE LAYOUT MAY HAVE PERMUTED 179 | for circ in self._static_trans_circuits: 180 | cregs = ClassicalRegister(self._num_qubits, name="c") 181 | #circ.measure_active() 182 | #qv_circ.measure_active() 183 | circ.add_register(cregs) 184 | circ.barrier(self._physical_qubits) 185 | for qi in range(self._num_qubits): 186 | circ.measure(circ.layout.final_index_layout()[qi], qi) 187 | 188 | 189 | return self._static_trans_circuits 190 | 191 | @classmethod 192 | def _default_experiment_options(cls) -> Options: 193 | """Default experiment options. 194 | 195 | Experiment Options: 196 | trials (int): Optional, number of times to generate new Quantum Volume 197 | circuits and calculate their heavy output. 198 | seed (None or int or SeedSequence or BitGenerator or Generator): A seed 199 | used to initialize ``numpy.random.default_rng`` when generating circuits. 200 | The ``default_rng`` will be initialized with this seed value everytime 201 | :meth:`circuits` is called. 202 | """ 203 | options = super()._default_experiment_options() 204 | 205 | options.trials = 100 206 | options.seed = None 207 | 208 | return options 209 | 210 | 211 | def circuits(self) -> List[QuantumCircuit]: 212 | """Return a list of Quantum Volume circuits. 213 | 214 | Returns: 215 | A list of :class:`QuantumCircuit`. 216 | """ 217 | rng = default_rng(seed=self.experiment_options.seed) 218 | circuits = [] 219 | depth = self._num_qubits 220 | 221 | # Note: the trials numbering in the metadata is starting from 1 for each new experiment run 222 | for trial in range(1, self.experiment_options.trials + 1): 223 | if self.he: 224 | 225 | #assume linear connectivity 226 | #but we could feed in a coupling map 227 | #copied from qiskit.circuit.library.QuantumVolume 228 | #and adopted for he 229 | name = "quantum_volume_he" + str([depth, depth]).replace(" ", "") 230 | qv_circ = QuantumCircuit(depth, name=name) 231 | unitary_seeds = rng.integers(low=1, high=1000, size=[depth, depth]) 232 | 233 | for i in range(depth): 234 | all_edges = [(i,i+1) for i in range(depth-1)] 235 | selected_edges = [] 236 | while all_edges: 237 | 238 | rand_edge = all_edges.pop(rng.integers(len(all_edges))) 239 | selected_edges.append(rand_edge) 240 | old_all_edges = all_edges[:] 241 | all_edges = [] 242 | # only keep edges in all_edges that do not share a vertex with rand_edge 243 | for edge in old_all_edges: 244 | if rand_edge[0] not in edge and rand_edge[1] not in edge: 245 | all_edges.append(edge) 246 | 247 | for edge_i, edge in enumerate(selected_edges): 248 | su4 = random_unitary(4, seed=unitary_seeds[i][edge_i]).to_instruction() 249 | su4.label = "su4_" + str(unitary_seeds[i][edge_i]) 250 | qv_circ.compose(su4, [edge[0],edge[1]], inplace=True) 251 | 252 | else: 253 | qv_circ = QuantumVolumeCircuit(depth, depth, seed=rng) 254 | qv_circ = qv_circ.decompose() 255 | cregs = ClassicalRegister(depth, name="c") 256 | if self.split_inverse and depth % 2 == 0: 257 | if self.left_and_right: 258 | qv_circ, target, right_qv_circ, right_target = self.mirror_qv_circuit( 259 | qv_circ, 260 | pauli_randomize=self.pauli_randomize, 261 | pauli_randomize_barriers=self.pauli_randomize_barriers, 262 | middle_pauli_randomize=self.middle_pauli_randomize, 263 | left_and_right=self.left_and_right, 264 | seed=rng, 265 | ) 266 | else: 267 | qv_circ, target = self.mirror_qv_circuit( 268 | qv_circ, 269 | pauli_randomize=self.pauli_randomize, 270 | pauli_randomize_barriers=self.pauli_randomize_barriers, 271 | middle_pauli_randomize=self.middle_pauli_randomize, 272 | left_and_right=self.left_and_right, 273 | seed=rng, 274 | ) 275 | #qv_circ.measure_active() 276 | #qv_circ.add_register(cregs) 277 | #qv_circ.barrier([i for i in range(depth)]) 278 | #for qi in range(depth): 279 | # qv_circ.measure(qi, qi) 280 | 281 | qv_circ.metadata = { 282 | "experiment_type": self._type, 283 | "depth": depth, 284 | "trial": trial, 285 | "qubits": self.physical_qubits, 286 | "is_mirror_circuit": self.split_inverse, 287 | "is_from_left_half": True, 288 | } 289 | if self.left_and_right: 290 | right_qv_circ.measure_active() 291 | right_qv_circ.metadata = { 292 | "experiment_type": self._type, 293 | "depth": depth, 294 | "trial": trial, 295 | "qubits": self.physical_qubits, 296 | "is_mirror_circuit": self.split_inverse, 297 | "is_from_left_half": False, 298 | } 299 | if self.split_inverse and depth % 2 == 0: 300 | qv_circ.metadata["target_bitstring"] = target 301 | if self.left_and_right: 302 | right_qv_circ.metadata["target_bitstring"] = right_target 303 | else: 304 | qv_circ.metadata["target_bitstring"] = "1" * depth 305 | circuits.append(qv_circ) 306 | if self.left_and_right: 307 | circuits.append(right_qv_circ) 308 | return circuits 309 | 310 | def mirror_qv_circuit( 311 | self, 312 | qv_circ: QuantumCircuit, 313 | pauli_randomize: Optional[bool] = True, 314 | pauli_randomize_barriers: Optional[bool] = False, 315 | middle_pauli_randomize: Optional[bool] = False, 316 | left_and_right: Optional[bool] = False, 317 | seed: Optional[Union[int, SeedSequence, BitGenerator, Generator]] = None, 318 | ) -> QuantumCircuit: 319 | """Modify QV circuits by splitting, inverting, and composing and/or Pauli 320 | twirling 321 | 322 | Args: 323 | qv_circ: the circuit to modify 324 | pauli_randomize: if True, add layers of random Paulis to the beginning 325 | and the end of the cicuit 326 | pauli_randomize_barriers: if True, add barriers before/after Pauli twirling 327 | middle_pauli_randomize: if True, add a layer of random Paulis between 328 | the mirrored halves of the circuit 329 | left_and_right: if True, generate circuits using both the left and righ halves of the 330 | original circuit 331 | seed: seed for RNG 332 | 333 | Returns: 334 | A modifed QV circuit 335 | """ 336 | depth = self._num_qubits 337 | 338 | 339 | dag = circuit_to_dag(qv_circ) 340 | subdags = [] # DAG circuits in the left half 341 | right_subdags = [] # DAG circuits in the right half 342 | for i, layer in enumerate(dag.layers()): 343 | if i < depth / 2: 344 | subdags.append(layer["graph"]) 345 | else: 346 | if left_and_right: 347 | right_subdags.append(layer["graph"]) 348 | 349 | new_dag = dag.copy_empty_like() 350 | right_new_dag = dag.copy_empty_like() 351 | for subdag in subdags: 352 | new_dag.compose(subdag) 353 | if left_and_right: 354 | for subdag in right_subdags: 355 | right_new_dag.compose(subdag) 356 | 357 | new_qv_circ = dag_to_circuit(new_dag) 358 | 359 | new_qv_circ_inv = new_qv_circ.inverse() # mirrored QV circuit from left half 360 | if left_and_right: 361 | right_new_qv_circ = dag_to_circuit(right_new_dag) 362 | right_new_qv_circ_inv = ( 363 | right_new_qv_circ.inverse() 364 | ) # mirrored QV circuit from right half 365 | 366 | paulis = random_pauli_list( 367 | depth, 368 | size=6, 369 | seed=seed, 370 | phase=False, 371 | ) 372 | 373 | if pauli_randomize: 374 | first_pauli_circ = QuantumCircuit(depth) 375 | first_pauli_circ.compose(paulis[0], front=True, inplace=True) 376 | if pauli_randomize_barriers: 377 | first_pauli_circ.barrier() 378 | new_qv_circ.compose(first_pauli_circ, front=True, inplace=True) 379 | new_qv_circ.barrier() 380 | if middle_pauli_randomize: 381 | new_qv_circ.compose(paulis[1], inplace=True) 382 | new_qv_circ.barrier() 383 | new_qv_circ.compose(new_qv_circ_inv, inplace=True) 384 | if pauli_randomize_barriers: 385 | new_qv_circ.barrier() 386 | if pauli_randomize: 387 | new_qv_circ.compose(paulis[2], inplace=True) 388 | 389 | if left_and_right: 390 | if pauli_randomize: 391 | right_new_qv_circ_inv.compose(paulis[3], front=True, inplace=True) 392 | right_new_qv_circ_inv.barrier() 393 | if middle_pauli_randomize: 394 | right_new_qv_circ_inv.compose(paulis[4], inplace=True) 395 | right_new_qv_circ_inv.barrier() 396 | right_new_qv_circ_inv.compose(right_new_qv_circ, inplace=True) 397 | if pauli_randomize: 398 | right_new_qv_circ_inv.compose(paulis[5], inplace=True) 399 | 400 | composed_pauli = paulis[0].compose(paulis[2]) 401 | if pauli_randomize: 402 | target = "".join(["1" if x else "0" for x in composed_pauli.x[::-1]]) 403 | if left_and_right: 404 | right_composed_pauli = paulis[3].compose(paulis[5]) 405 | right_target = "".join(["1" if x else "0" for x in right_composed_pauli.x[::-1]]) 406 | else: 407 | right_target = "0" * self._num_qubits 408 | else: 409 | target = "0" * depth 410 | right_target = "0" * self._num_qubits 411 | 412 | return_tuple = (new_qv_circ, target) 413 | if left_and_right: 414 | return_tuple += (right_new_qv_circ_inv, right_target) 415 | 416 | return return_tuple 417 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/mirror_qv_analysis.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Quantum Volume analysis class. 14 | """ 15 | 16 | 17 | import numpy as np 18 | from uncertainties import unumpy as unp 19 | from uncertainties import ufloat 20 | 21 | from qiskit_experiments.exceptions import AnalysisError 22 | from qiskit_experiments.data_processing import DataProcessor 23 | from qiskit_experiments.framework import ( 24 | BaseAnalysis, 25 | AnalysisResultData, 26 | Options, 27 | ExperimentData 28 | ) 29 | from qiskit_experiments.framework.containers import ArtifactData 30 | 31 | #import this data processor from rb_analysis 32 | from qiskit_device_benchmarking.bench_code.mrb.mirror_rb_analysis import _ComputeQuantities 33 | 34 | class MirrorQuantumVolumeAnalysis(BaseAnalysis): 35 | r"""A class to analyze mirror quantum volume experiments. 36 | 37 | # section: overview 38 | Calculate the success (fraction of target measured) and polarization 39 | Optionally calcuate an effective HOP 40 | """ 41 | 42 | def _initialize(self, experiment_data: ExperimentData): 43 | """Initialize curve analysis by setting up the data processor for Mirror 44 | RB data. 45 | 46 | Args: 47 | experiment_data: Experiment data to analyze. 48 | """ 49 | 50 | target_bs = [] 51 | self.depth = None 52 | self.ntrials = 0 53 | for circ_result in experiment_data.data(): 54 | target_bs.append(circ_result["metadata"]["target_bitstring"]) 55 | trial_depth = circ_result["metadata"]["depth"] 56 | self.ntrials += 1 57 | if self.depth is None: 58 | self.depth = trial_depth 59 | elif trial_depth != self.depth: 60 | raise AnalysisError("QuantumVolume circuits do not all have the same depth.") 61 | 62 | num_qubits = self.depth 63 | 64 | self.set_options( 65 | data_processor=DataProcessor( 66 | input_key="counts", 67 | data_actions=[ 68 | _ComputeQuantities( 69 | analyzed_quantity=self.options.analyzed_quantity, 70 | num_qubits=num_qubits, 71 | target_bs=target_bs, 72 | ) 73 | ], 74 | ) 75 | ) 76 | 77 | @classmethod 78 | def _default_options(cls) -> Options: 79 | """Return default analysis options. 80 | 81 | Analysis Options: 82 | plot (bool): Set ``True`` to create figure for fit result. 83 | ax(AxesSubplot): Optional. A matplotlib axis object to draw. 84 | """ 85 | options = super()._default_options() 86 | options.plot = False 87 | options.ax = None 88 | options.calc_hop = True 89 | 90 | # By default, effective polarization is plotted (see arXiv:2112.09853). We can 91 | # also plot success probability or adjusted success probability (see PyGSTi). 92 | # Do this by setting options to "Success Probability" or "Adjusted Success Probability" 93 | options.analyzed_quantity = "Effective Polarization" 94 | 95 | options.set_validator( 96 | field="analyzed_quantity", 97 | validator_value=[ 98 | "Success Probability", 99 | "Adjusted Success Probability", 100 | "Effective Polarization", 101 | ], 102 | ) 103 | 104 | return options 105 | 106 | 107 | def _run_analysis( 108 | self, 109 | experiment_data: ExperimentData, 110 | ): 111 | 112 | results = [] 113 | artifacts = [] 114 | 115 | # Prepare for fitting 116 | self._initialize(experiment_data) 117 | 118 | processed = self.options.data_processor(experiment_data.data()) 119 | yvals = unp.nominal_values(processed).flatten() 120 | 121 | success_prob_result = AnalysisResultData( 122 | "mean_success_probability", 123 | value=ufloat( 124 | nominal_value=np.mean(yvals), std_dev=np.std(yvals) 125 | ), 126 | quality="good", 127 | extra={ 128 | "depth": self.depth, 129 | "trials": self.ntrials, 130 | }, 131 | ) 132 | 133 | artifacts.append( 134 | ArtifactData( 135 | name="data", 136 | data=yvals, 137 | ) 138 | ) 139 | 140 | if self.options.plot: 141 | #figure out what to do 142 | figures = None 143 | else: 144 | figures = None 145 | 146 | results.append(success_prob_result) 147 | 148 | return results+artifacts, figures 149 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/mirror_rb_analysis.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2023. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Mirror RB analysis class. 14 | """ 15 | from typing import List, Union 16 | import numpy as np 17 | from uncertainties import unumpy as unp 18 | from scipy.spatial.distance import hamming 19 | 20 | import qiskit_experiments.curve_analysis as curve 21 | from qiskit_experiments.framework import AnalysisResultData, ExperimentData 22 | from qiskit_experiments.data_processing import DataProcessor 23 | from qiskit_experiments.data_processing.data_action import DataAction 24 | from qiskit_experiments.library.randomized_benchmarking.rb_analysis import RBAnalysis 25 | 26 | 27 | class MirrorRBAnalysis(RBAnalysis): 28 | r"""A class to analyze mirror randomized benchmarking experiment. 29 | 30 | # section: overview 31 | This analysis takes a series for Mirror RB curve fitting. 32 | From the fit :math:`\alpha` value this analysis estimates the mean entanglement infidelity (EI) 33 | and the error per Clifford (EPC), also known as the average gate infidelity (AGI). 34 | 35 | The EPC (AGI) estimate is obtained using the equation 36 | 37 | .. math:: 38 | 39 | EPC = \frac{2^n - 1}{2^n}\left(1 - \alpha\right) 40 | 41 | where :math:`n` is the number of qubits (width of the circuit). 42 | 43 | The EI is obtained using the equation 44 | 45 | .. math:: 46 | 47 | EI = \frac{4^n - 1}{4^n}\left(1 - \alpha\right) 48 | 49 | The fit :math:`\alpha` parameter can be fit using one of the following three quantities 50 | plotted on the y-axis: 51 | 52 | Success Probabilities (:math:`p`): The proportion of shots that return the correct bitstring 53 | 54 | Adjusted Success Probabilities (:math:`p_0`): 55 | 56 | .. math:: 57 | 58 | p_0 = \sum_{k = 0}^n \left(-\frac{1}{2}\right)^k h_k 59 | 60 | where :math:`h_k` is the probability of observing a bitstring of Hamming distance of k from the 61 | correct bitstring 62 | 63 | Effective Polarizations (:math:`S`): 64 | 65 | .. math:: 66 | 67 | S = \frac{4^n}{4^n-1}\left(\sum_{k=0}^n\left(-\frac{1}{2}\right)^k h_k\right)-\frac{1}{4^n-1} 68 | 69 | # section: fit_model 70 | The fit is based on the following decay functions: 71 | 72 | .. math:: 73 | 74 | F(x) = a \alpha^{x} + b 75 | 76 | # section: fit_parameters 77 | defpar a: 78 | desc: Height of decay curve. 79 | init_guess: Determined by :math:`1 - b`. 80 | bounds: [0, 1] 81 | defpar b: 82 | desc: Base line. 83 | init_guess: Determined by :math:`(1/2)^n` (for success probability) or :math:`(1/4)^n` 84 | (for adjusted success probability and effective polarization). 85 | bounds: [0, 1] 86 | defpar \alpha: 87 | desc: Depolarizing parameter. 88 | init_guess: Determined by :func:`~rb_decay` with standard RB curve. 89 | bounds: [0, 1] 90 | 91 | # section: reference 92 | .. ref_arxiv:: 1 2112.09853 93 | 94 | """ 95 | 96 | @classmethod 97 | def _default_options(cls): 98 | """Default analysis options. 99 | 100 | Analysis Options: 101 | analyzed_quantity (str): Set the metric to plot on the y-axis. Must be one of 102 | "Effective Polarization" (default), "Success Probability", or "Adjusted 103 | Success Probability". 104 | gate_error_ratio (Optional[Dict[str, float]]): A dictionary with gate name keys 105 | and error ratio values used when calculating EPG from the estimated EPC. 106 | The default value will use standard gate error ratios. 107 | If you don't know accurate error ratio between your basis gates, 108 | you can skip analysis of EPGs by setting this options to ``None``. 109 | epg_1_qubit (List[AnalysisResult]): Analysis results from previous RB experiments 110 | for individual single qubit gates. If this is provided, EPC of 111 | 2Q RB is corrected to exclude the depolarization of underlying 1Q channels. 112 | """ 113 | default_options = super()._default_options() 114 | 115 | # Set labels of axes 116 | default_options.plotter.set_figure_options( 117 | xlabel="Clifford Length", 118 | ylabel="Effective Polarization", 119 | ) 120 | 121 | # Plot all (adjusted) success probabilities 122 | default_options.plot_raw_data = True 123 | 124 | # Exponential decay parameter 125 | default_options.result_parameters = ["alpha"] 126 | 127 | # Default gate error ratio for calculating EPG 128 | default_options.gate_error_ratio = "default" 129 | 130 | # By default, EPG for single qubits aren't set 131 | default_options.epg_1_qubit = None 132 | 133 | # By default, effective polarization is plotted (see arXiv:2112.09853). We can 134 | # also plot success probability or adjusted success probability (see PyGSTi). 135 | # Do this by setting options to "Success Probability" or "Adjusted Success Probability" 136 | default_options.analyzed_quantity = "Effective Polarization" 137 | 138 | default_options.set_validator( 139 | field="analyzed_quantity", 140 | validator_value=[ 141 | "Success Probability", 142 | "Adjusted Success Probability", 143 | "Effective Polarization", 144 | ], 145 | ) 146 | 147 | return default_options 148 | 149 | def _generate_fit_guesses( 150 | self, 151 | user_opt: curve.FitOptions, 152 | curve_data: curve.ScatterTable, 153 | ) -> Union[curve.FitOptions, List[curve.FitOptions]]: 154 | """Create algorithmic guess with analysis options and curve data. 155 | 156 | Args: 157 | user_opt: Fit options filled with user provided guess and bounds. 158 | curve_data: Formatted data collection to fit. 159 | 160 | Returns: 161 | List of fit options that are passed to the fitter function. 162 | """ 163 | 164 | user_opt.bounds.set_if_empty(a=(0, 1), alpha=(0, 1), b=(0, 1)) 165 | num_qubits = len(self._physical_qubits) 166 | 167 | # Initialize guess for baseline and amplitude based on infidelity type 168 | b_guess = 1 / 4**num_qubits 169 | if self.options.analyzed_quantity == "Success Probability": 170 | b_guess = 1 / 2**num_qubits 171 | 172 | mirror_curve = curve_data.get_subset_of("rb_decay") 173 | alpha_mirror = curve.guess.rb_decay(mirror_curve.x, mirror_curve.y, b=b_guess) 174 | a_guess = (curve_data.y[0] - b_guess) / (alpha_mirror ** curve_data.x[0]) 175 | 176 | user_opt.p0.set_if_empty(b=b_guess, a=a_guess, alpha=alpha_mirror) 177 | 178 | return user_opt 179 | 180 | def _create_analysis_results( 181 | self, 182 | fit_data: curve.CurveFitResult, 183 | quality: str, 184 | **metadata, 185 | ) -> List[AnalysisResultData]: 186 | """Create analysis results for important fit parameters. Besides the 187 | default standard RB parameters, Entanglement Infidelity (EI) is also calculated. 188 | 189 | Args: 190 | fit_data: Fit outcome. 191 | quality: Quality of fit outcome. 192 | 193 | Returns: 194 | List of analysis result data. 195 | """ 196 | 197 | outcomes = super()._create_analysis_results(fit_data, quality, **metadata) 198 | num_qubits = len(self._physical_qubits) 199 | 200 | # nrb is calculated for both EPC and EI per the equations in the docstring 201 | ei_nrb = 4**num_qubits 202 | ei_scale = (ei_nrb - 1) / ei_nrb 203 | ei = ei_scale * (1 - fit_data.ufloat_params["alpha"]) 204 | 205 | outcomes.append( 206 | AnalysisResultData( 207 | name="EI", value=ei, chisq=fit_data.reduced_chisq, quality=quality, extra=metadata 208 | ) 209 | ) 210 | 211 | return outcomes 212 | 213 | def _initialize(self, experiment_data: ExperimentData): 214 | """Initialize curve analysis by setting up the data processor for Mirror 215 | RB data. 216 | 217 | Args: 218 | experiment_data: Experiment data to analyze. 219 | """ 220 | super()._initialize(experiment_data) 221 | 222 | num_qubits = len(self._physical_qubits) 223 | target_bs = [] 224 | for circ_result in experiment_data.data(): 225 | if circ_result["metadata"]["inverting_pauli_layer"] is True: 226 | target_bs.append("0" * num_qubits) 227 | else: 228 | target_bs.append(circ_result["metadata"]["target"]) 229 | 230 | self.set_options( 231 | data_processor=DataProcessor( 232 | input_key="counts", 233 | data_actions=[ 234 | _ComputeQuantities( 235 | analyzed_quantity=self.options.analyzed_quantity, 236 | num_qubits=num_qubits, 237 | target_bs=target_bs, 238 | ) 239 | ], 240 | ) 241 | ) 242 | 243 | 244 | class _ComputeQuantities(DataAction): 245 | """Data processing node for computing useful mirror RB quantities from raw results.""" 246 | 247 | def __init__( 248 | self, 249 | num_qubits, 250 | target_bs, 251 | analyzed_quantity: str = "Effective Polarization", 252 | validate: bool = True, 253 | ): 254 | """ 255 | Args: 256 | num_qubits: Number of qubits. 257 | quantity: The quantity to calculate. 258 | validate: If set to False the DataAction will not validate its input. 259 | """ 260 | super().__init__(validate) 261 | self._num_qubits = num_qubits 262 | self._analyzed_quantity = analyzed_quantity 263 | self._target_bs = target_bs 264 | 265 | def _process(self, data: np.ndarray): 266 | # Arrays to store the y-axis data and uncertainties 267 | y_data = [] 268 | y_data_unc = [] 269 | 270 | for i, circ_result in enumerate(data): 271 | target_bs = self._target_bs[i] 272 | 273 | # h[k] = proportion of shots that are Hamming distance k away from target bitstring 274 | hamming_dists = np.zeros(self._num_qubits + 1) 275 | success_prob = 0.0 276 | success_prob_unc = 0.0 277 | for bitstring, count in circ_result.items(): 278 | # Compute success probability 279 | if self._analyzed_quantity == "Success Probability": 280 | if bitstring == target_bs: 281 | success_prob = count / sum(circ_result.values()) 282 | success_prob_unc = np.sqrt(success_prob * (1 - success_prob)) 283 | break 284 | else: 285 | # Compute hamming distance proportions 286 | target_bs_to_list = [int(char) for char in target_bs] 287 | actual_bs_to_list = [int(char) for char in bitstring] 288 | k = int(round(hamming(target_bs_to_list, actual_bs_to_list) * self._num_qubits)) 289 | hamming_dists[k] += count / sum(circ_result.values()) 290 | 291 | if self._analyzed_quantity == "Success Probability": 292 | y_data.append(success_prob) 293 | y_data_unc.append(success_prob_unc) 294 | continue 295 | 296 | # Compute hamming distance uncertainties 297 | hamming_dist_unc = np.sqrt(hamming_dists * (1 - hamming_dists)) 298 | 299 | # Compute adjusted success probability and standard deviation 300 | adjusted_success_prob = 0.0 301 | adjusted_success_prob_unc = 0.0 302 | for k in range(self._num_qubits + 1): 303 | adjusted_success_prob += (-0.5) ** k * hamming_dists[k] 304 | adjusted_success_prob_unc += (0.5) ** k * hamming_dist_unc[k] ** 2 305 | adjusted_success_prob_unc = np.sqrt(adjusted_success_prob_unc) 306 | if self._analyzed_quantity == "Adjusted Success Probability": 307 | y_data.append(adjusted_success_prob) 308 | y_data_unc.append(adjusted_success_prob_unc) 309 | 310 | # Compute effective polarization and standard deviation (arXiv:2112.09853v1) 311 | pol_factor = 4**self._num_qubits 312 | pol = pol_factor / (pol_factor - 1) * adjusted_success_prob - 1 / (pol_factor - 1) 313 | pol_unc = np.sqrt(pol_factor / (pol_factor - 1)) * adjusted_success_prob_unc 314 | if self._analyzed_quantity == "Effective Polarization": 315 | y_data.append(pol) 316 | y_data_unc.append(pol_unc) 317 | 318 | return unp.uarray(y_data, y_data_unc) 319 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/Readme.md: -------------------------------------------------------------------------------- 1 | ## Purity RB 2 | 3 | Code for running purity RB (notebook [here](https://github.com/qiskit-community/qiskit-device-benchmarking/blob/main/notebooks/device_rb.ipynb)). Purity RB appends post rotations 4 | to the RB sequences to calculate Tr(rho^2) as described in the supplement of [arXiv:2302.10881](https://arxiv.org/abs/2302.10881) and previously in the Ignis code base. 5 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Purity RB 14 | 15 | .. currentmodule:: qiskit_experiments_internal.library.quantum_volume 16 | 17 | Classes 18 | ======= 19 | .. autosummary:: 20 | ::undoc-members: 21 | 22 | PurityRB 23 | PurityRBAnalysis 24 | 25 | """ 26 | 27 | from .pur_rb import PurityRB 28 | from .purrb_analysis import PurityRBAnalysis 29 | 30 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/pur_rb.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Purity RB Experiment class. 14 | """ 15 | 16 | import numpy as np 17 | from numpy.random import Generator 18 | from numpy.random.bit_generator import BitGenerator, SeedSequence 19 | from numbers import Integral 20 | from typing import Union, Iterable, Optional, List, Sequence 21 | 22 | from qiskit import QuantumCircuit 23 | from qiskit.quantum_info import Clifford 24 | from qiskit.providers.backend import Backend 25 | from qiskit.circuit import CircuitInstruction, Barrier 26 | from qiskit_experiments.library.randomized_benchmarking import StandardRB 27 | 28 | SequenceElementType = Union[Clifford, Integral, QuantumCircuit] 29 | 30 | from .purrb_analysis import PurityRBAnalysis 31 | 32 | class PurityRB(StandardRB): 33 | """An experiment to characterize the error rate of a gate set on a device. 34 | using purity RB 35 | 36 | # section: overview 37 | 38 | Randomized Benchmarking (RB) is an efficient and robust method 39 | for estimating the average error rate of a set of quantum gate operations. 40 | See `Qiskit Textbook 41 | `_ 42 | for an explanation on the RB method. 43 | 44 | A standard RB experiment generates sequences of random Cliffords 45 | such that the unitary computed by the sequences is the identity. 46 | After running the sequences on a backend, it calculates the probabilities to get back to 47 | the ground state, fits an exponentially decaying curve, and estimates 48 | the Error Per Clifford (EPC), as described in Refs. [1, 2]. 49 | 50 | .. note:: 51 | In 0.5.0, the default value of ``optimization_level`` in ``transpile_options`` changed 52 | from ``0`` to ``1`` for RB experiments. That may result in shorter RB circuits 53 | hence slower decay curves than before. 54 | 55 | # section: analysis_ref 56 | :class:`RBAnalysis` 57 | 58 | # section: manual 59 | :doc:`/manuals/verification/randomized_benchmarking` 60 | 61 | # section: reference 62 | .. ref_arxiv:: 1 1009.3639 63 | .. ref_arxiv:: 2 1109.6887 64 | """ 65 | 66 | def __init__( 67 | self, 68 | physical_qubits: Sequence[int], 69 | lengths: Iterable[int], 70 | backend: Optional[Backend] = None, 71 | num_samples: int = 3, 72 | seed: Optional[Union[int, SeedSequence, BitGenerator, Generator]] = None, 73 | full_sampling: Optional[bool] = False, 74 | ): 75 | """Initialize a standard randomized benchmarking experiment. 76 | 77 | Args: 78 | physical_qubits: List of physical qubits for the experiment. 79 | lengths: A list of RB sequences lengths. 80 | backend: The backend to run the experiment on. 81 | num_samples: Number of samples to generate for each sequence length. 82 | seed: Optional, seed used to initialize ``numpy.random.default_rng``. 83 | when generating circuits. The ``default_rng`` will be initialized 84 | with this seed value every time :meth:`circuits` is called. 85 | full_sampling: If True all Cliffords are independently sampled for all lengths. 86 | If False for sample of lengths longer sequences are constructed 87 | by appending additional samples to shorter sequences. 88 | The default is False. 89 | 90 | Raises: 91 | QiskitError: If any invalid argument is supplied. 92 | """ 93 | # Initialize base experiment (RB) 94 | super().__init__(physical_qubits, lengths, backend, num_samples, seed, full_sampling) 95 | 96 | #override the analysis 97 | self.analysis = PurityRBAnalysis() 98 | self.analysis.set_options(outcome="0" * self.num_qubits) 99 | self.analysis.plotter.set_figure_options( 100 | xlabel="Clifford Length", 101 | ylabel="Purity", 102 | ) 103 | 104 | 105 | def circuits(self) -> List[QuantumCircuit]: 106 | """Return a list of RB circuits. 107 | 108 | Returns: 109 | A list of :class:`QuantumCircuit`. 110 | """ 111 | # Sample random Clifford sequences 112 | sequences = self._sample_sequences() 113 | # Convert each sequence into circuit and append the inverse to the end. 114 | # and the post-rotations 115 | circuits = self._sequences_to_circuits(sequences) 116 | # Add metadata for each circuit 117 | # trial links all from the same trial 118 | # needed for post processing the purity RB 119 | for circ_i, circ in enumerate(circuits): 120 | circ.metadata = { 121 | "xval": len(sequences[int(circ_i/3**self.num_qubits)]), 122 | "trial": int(circ_i/3**self.num_qubits), 123 | "group": "Clifford", 124 | } 125 | return circuits 126 | 127 | 128 | def _sequences_to_circuits( 129 | self, sequences: List[Sequence[SequenceElementType]] 130 | ) -> List[QuantumCircuit]: 131 | """Convert an RB sequence into circuit and append the inverse to the end and 132 | then the post rotations for purity RB 133 | 134 | Returns: 135 | A list of purity RB circuits. 136 | """ 137 | synthesis_opts = self._get_synthesis_options() 138 | 139 | #post rotations as cliffords 140 | post_rot = [] 141 | for i in range(3**self.num_qubits): 142 | ##find clifford 143 | qc = QuantumCircuit(self.num_qubits) 144 | for j in range(self.num_qubits): 145 | qg_ind = np.mod(int(i/3**j),3) 146 | if qg_ind==1: 147 | qc.sx(j) 148 | elif qg_ind==2: 149 | qc.sdg(j) 150 | qc.sx(j) 151 | qc.s(j) 152 | 153 | post_rot.append(self._to_instruction(Clifford(qc), synthesis_opts)) 154 | 155 | # Circuit generation 156 | circuits = [] 157 | for i, seq in enumerate(sequences): 158 | if ( 159 | self.experiment_options.full_sampling 160 | or i % len(self.experiment_options.lengths) == 0 161 | ): 162 | prev_elem, prev_seq = self._StandardRB__identity_clifford(), [] 163 | 164 | circ = QuantumCircuit(self.num_qubits) 165 | for elem in seq: 166 | circ.append(self._to_instruction(elem, synthesis_opts), circ.qubits) 167 | circ._append(CircuitInstruction(Barrier(self.num_qubits), circ.qubits)) 168 | 169 | # Compute inverse, compute only the difference from the previous shorter sequence 170 | prev_elem = self._StandardRB__compose_clifford_seq(prev_elem, seq[len(prev_seq) :]) 171 | prev_seq = seq 172 | inv = self._StandardRB__adjoint_clifford(prev_elem) 173 | 174 | circ.append(self._to_instruction(inv, synthesis_opts), circ.qubits) 175 | 176 | #copy the circuit and apply post rotations 177 | for j in range(3**self.num_qubits): 178 | circ2 = circ.copy() 179 | circ2.append(post_rot[j], circ.qubits) 180 | circ2.measure_all() # includes insertion of the barrier before measurement 181 | circuits.append(circ2) 182 | 183 | return circuits -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/purrb_analysis.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Purity RB analysis class. 14 | """ 15 | 16 | from typing import List, Dict, Union 17 | 18 | from qiskit.result import sampled_expectation_value 19 | 20 | from qiskit_experiments.curve_analysis import ScatterTable 21 | import qiskit_experiments.curve_analysis as curve 22 | from qiskit_experiments.framework import AnalysisResultData 23 | from qiskit_experiments.library.randomized_benchmarking import RBAnalysis 24 | from qiskit_experiments.library.randomized_benchmarking.rb_analysis import (_calculate_epg, 25 | _exclude_1q_error) 26 | 27 | 28 | class PurityRBAnalysis(RBAnalysis): 29 | r"""A class to analyze purity randomized benchmarking experiments. 30 | 31 | # section: overview 32 | This analysis takes only single series. 33 | This series is fit by the exponential decay function. 34 | From the fit :math:`\alpha` value this analysis estimates the error per Clifford (EPC). 35 | 36 | When analysis option ``gate_error_ratio`` is provided, this analysis also estimates 37 | errors of individual gates assembling a Clifford gate. 38 | In computation of two-qubit EPC, this analysis can also decompose 39 | the contribution from the underlying single qubit depolarizing channels when 40 | ``epg_1_qubit`` analysis option is provided [1]. 41 | 42 | # section: fit_model 43 | .. math:: 44 | 45 | F(x) = a \alpha^x + b 46 | 47 | # section: fit_parameters 48 | defpar a: 49 | desc: Height of decay curve. 50 | init_guess: Determined by :math:`1 - b`. 51 | bounds: [0, 1] 52 | defpar b: 53 | desc: Base line. 54 | init_guess: Determined by :math:`(1/2)^n` where :math:`n` is number of qubit. 55 | bounds: [0, 1] 56 | defpar \alpha: 57 | desc: Depolarizing parameter. 58 | init_guess: Determined by :func:`~.guess.rb_decay`. 59 | bounds: [0, 1] 60 | 61 | # section: reference 62 | .. ref_arxiv:: 1 1712.06550 63 | 64 | """ 65 | 66 | def __init__(self): 67 | super().__init__() 68 | 69 | 70 | def _run_data_processing( 71 | self, 72 | raw_data: List[Dict], 73 | category: str = "raw", 74 | ) -> ScatterTable: 75 | """Perform data processing from the experiment result payload. 76 | 77 | For purity this converts the counts into Trace(rho^2) and then runs the 78 | rest of the standard RB fitters 79 | 80 | For now this does it by spoofing a new counts dictionary and then 81 | calling the super _run_data_processing 82 | 83 | Args: 84 | raw_data: Payload in the experiment data. 85 | category: Category string of the output dataset. 86 | 87 | Returns: 88 | Processed data that will be sent to the formatter method. 89 | 90 | Raises: 91 | DataProcessorError: When key for x values is not found in the metadata. 92 | ValueError: When data processor is not provided. 93 | """ 94 | 95 | #figure out the number of qubits... has to be 1 or 2 for now 96 | if self.options.outcome=='0': 97 | nq=1 98 | elif self.options.outcome=='00': 99 | nq=2 100 | else: 101 | raise ValueError("Only supporting 1 or 2Q purity") 102 | 103 | ntrials = int(len(raw_data)/3**nq) 104 | raw_data2 = [] 105 | nshots = int(sum(raw_data[0]['counts'].values())) 106 | 107 | for i in range(ntrials): 108 | trial_raw = [d for d in raw_data if d["metadata"]["trial"]==i] 109 | 110 | raw_data2.append(trial_raw[0]) 111 | 112 | purity = 1/2**nq 113 | if nq==1: 114 | for ii in range(3): 115 | purity += sampled_expectation_value(trial_raw[ii]['counts'],'Z')**2/2**nq 116 | else: 117 | for ii in range(9): 118 | purity += sampled_expectation_value(trial_raw[ii]['counts'],'ZZ')**2/2**nq 119 | purity += sampled_expectation_value(trial_raw[ii]['counts'],'IZ')**2/2**nq/3**(nq-1) 120 | purity += sampled_expectation_value(trial_raw[ii]['counts'],'ZI')**2/2**nq/3**(nq-1) 121 | 122 | raw_data2[-1]['counts'] = {'0'*nq: int(purity*nshots*10),'1'*nq: int((1-purity)*nshots*10)} 123 | 124 | return super()._run_data_processing(raw_data2,category) 125 | 126 | 127 | def _create_analysis_results( 128 | self, 129 | fit_data: curve.CurveFitResult, 130 | quality: str, 131 | **metadata, 132 | ) -> List[AnalysisResultData]: 133 | """Create analysis results for important fit parameters. 134 | 135 | Args: 136 | fit_data: Fit outcome. 137 | quality: Quality of fit outcome. 138 | 139 | Returns: 140 | List of analysis result data. 141 | """ 142 | outcomes = curve.CurveAnalysis._create_analysis_results(self, fit_data, quality, **metadata) 143 | num_qubits = len(self._physical_qubits) 144 | 145 | # Calculate EPC 146 | # For purity we need to correct by 147 | alpha = fit_data.ufloat_params["alpha"]**0.5 148 | scale = (2**num_qubits - 1) / (2**num_qubits) 149 | epc = scale * (1 - alpha) 150 | 151 | outcomes.append( 152 | AnalysisResultData( 153 | name="EPC", 154 | value=epc, 155 | chisq=fit_data.reduced_chisq, 156 | quality=quality, 157 | extra=metadata, 158 | ) 159 | ) 160 | 161 | # Correction for 1Q depolarizing channel if EPGs are provided 162 | if self.options.epg_1_qubit and num_qubits == 2: 163 | epc = _exclude_1q_error( 164 | epc=epc, 165 | qubits=self._physical_qubits, 166 | gate_counts_per_clifford=self._gate_counts_per_clifford, 167 | extra_analyses=self.options.epg_1_qubit, 168 | ) 169 | outcomes.append( 170 | AnalysisResultData( 171 | name="EPC_corrected", 172 | value=epc, 173 | chisq=fit_data.reduced_chisq, 174 | quality=quality, 175 | extra=metadata, 176 | ) 177 | ) 178 | 179 | # Calculate EPG 180 | if self._gate_counts_per_clifford is not None and self.options.gate_error_ratio: 181 | epg_dict = _calculate_epg( 182 | epc=epc, 183 | qubits=self._physical_qubits, 184 | gate_error_ratio=self.options.gate_error_ratio, 185 | gate_counts_per_clifford=self._gate_counts_per_clifford, 186 | ) 187 | if epg_dict: 188 | for gate, epg_val in epg_dict.items(): 189 | outcomes.append( 190 | AnalysisResultData( 191 | name=f"EPG_{gate}", 192 | value=epg_val, 193 | chisq=fit_data.reduced_chisq, 194 | quality=quality, 195 | extra=metadata, 196 | ) 197 | ) 198 | 199 | return outcomes 200 | 201 | def _generate_fit_guesses( 202 | self, 203 | user_opt: curve.FitOptions, 204 | curve_data: curve.ScatterTable, 205 | ) -> Union[curve.FitOptions, List[curve.FitOptions]]: 206 | """Create algorithmic initial fit guess from analysis options and curve data. 207 | 208 | Args: 209 | user_opt: Fit options filled with user provided guess and bounds. 210 | curve_data: Formatted data collection to fit. 211 | 212 | Returns: 213 | List of fit options that are passed to the fitter function. 214 | """ 215 | user_opt.bounds.set_if_empty( 216 | a=(0, 1), 217 | alpha=(0, 1), 218 | b=(0, 1), 219 | ) 220 | 221 | b_guess = 1 / 2 ** len(self._physical_qubits) 222 | if len(curve_data.x)>3: 223 | alpha_guess = curve.guess.rb_decay(curve_data.x[0:3], curve_data.y[0:3], b=b_guess) 224 | else: 225 | alpha_guess = curve.guess.rb_decay(curve_data.x, curve_data.y, b=b_guess) 226 | 227 | alpha_guess = alpha_guess**2 228 | 229 | if alpha_guess < 0.6: 230 | a_guess = (curve_data.y[0] - b_guess) 231 | else: 232 | a_guess = (curve_data.y[0] - b_guess) / (alpha_guess ** curve_data.x[0]) 233 | 234 | user_opt.p0.set_if_empty( 235 | b=b_guess, 236 | a=a_guess, 237 | alpha=alpha_guess, 238 | ) 239 | 240 | return user_opt 241 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/clops/README.md: -------------------------------------------------------------------------------- 1 | # CLOPS Benchmark 2 | 3 | This benchmark measures Circuit Layer Operations Per Seconds (CLOPS) of 4 | parameterized utility scale hardware efficient circuits. 5 | CLOPS measures the steady state throughput of a large quantity of 6 | these parameterized circuits that are 7 | of width 100 qubits with 100 layers of gates. 8 | Each layer consists of two qubit gates across as many qubits 9 | as possible that can be done in parallel, followed by a single qubit 10 | gate(s) on every qubit to allow any arbitrary rotation, with those 11 | rotations being parameterized. 12 | Parameters are applied to the circuit to generate a large number of 13 | instantiated circuits to be executed on the quantum computer. It is 14 | up to the vendor on how to optimally execute these circuits for 15 | maximal throughput. 16 | 17 | CLOPS now supports the new `gen3-turbo` flag for execution path available 18 | on some of our devices. 19 | 20 | ## Example 21 | 22 | ```python 23 | from qiskit_ibm_runtime import QiskitRuntimeService 24 | from qiskit_device_benchmarking.clops.clops_benchmark import clops_benchmark 25 | 26 | service = QiskitRuntimeService(channel="ibm_quantum", 27 | instance="your-hub/group/project") 28 | 29 | # Run clops with default settings (twirled circuits, 1000 circuits in run, 30 | # 100 wide by 100 layers, etc) Note this is done in a session and currently 31 | # takes about 10 minutes to run 32 | my_clops_run = clops_benchmark(service, "your-favorite-ibm-quantum-computer") 33 | 34 | # To run clops with the new new `gen3-turbo` path, you can specify the 35 | # execution path. For the new faster path you should increase the number 36 | # of circuits to 5,000 37 | my_clops_run = clops_benchmark(service, "machine supporting gen3-turbo", execution_path='gen3-turbo', num_circuits = 5000) 38 | 39 | # We can check the attributes of the benchmark run 40 | print(my_clops_run.job_attributes) 41 | {'backend_name': 'ibm_brisbane', 'width': 100, 'layers': 100, 'shots': 100, 'rep_delay': 0.00025, 'num_circuits': 1000, 'circuit_type': 'twirled', 'batch_size': None, 'pipelines': 1} 42 | 43 | # There is a standard qiskit job and we can check its status, job_id, etc 44 | print(my_clops_run.job.status()) 45 | QUEUED 46 | 47 | # The clops method will calculate the clops value for the run 48 | # Note this call will block until the result is ready 49 | print("Measured clops of", my_clops_run.job_attributes['backend_name'], "is", my_clops_run.clops()) 50 | Measured clops of ibm_brisbane is 30256 51 | ``` 52 | 53 | 54 | 55 | ## Variations 56 | 57 | 58 | The benchmark code provides several 59 | ways to measure CLOPS depending on the capability of the quantum computer. 60 | 61 | The "twirling" method uses the native parameterization of the Sampler 62 | primitive to parameterize the circuit, and optimal batching of the 63 | circuits is assumed to be done by the Sampler, freeing the user 64 | from having to optimize the batch size. The only requirement is 65 | that the total number of circuits executed needs to be chosen to 66 | get the system into a steady state to measure CLOPS. 67 | 68 | The "parameterized" method is similar, but instead sends an already 69 | parameterized circuit to the Sampler primitive, along with enough 70 | parameters to execute the specified number of circuits. Batching 71 | again is handled by the Sampler. This method requires larger bandwidth 72 | to send in all of the necessary parameters. Currently on IBM systems 73 | you will need to limit the number of circuits to approximately 160 to 74 | fit within API job input limits. 75 | 76 | The "instantiated" method (not yet implemented) is for systems that cannot natively 77 | handle parameterized circuits. In this case the circuit parameters 78 | are bound locally and then sent to the quantum computer for execution. 79 | This method requires the user to specify the desired size of each 80 | batch of circuits (so that they can be sent together the quantum computer) 81 | as well as the number of local parallel pipelines to bind parameters and 82 | create payloads in parallel. The user will need to tune both of these 83 | parameters to try and optimize performance of the system. This will 84 | tend to be much slower than on systems that natively support parameterized 85 | circuits 86 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/clops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qiskit-community/qiskit-device-benchmarking/367c1268defd5a0130f7ffafb00b788f3891e443/qiskit_device_benchmarking/clops/__init__.py -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/README.md: -------------------------------------------------------------------------------- 1 | # Mirror Circuit Benchmark 2 | 3 | This is meant to offer a straightforward test of Estimator primitives and the ability to 4 | deliver accurate expectation values for utility-scale circuits. It uses Trotterized time- 5 | evolution of a 1D Ising chain as the test circuit, but with the circuit mirrored so that 6 | its effective action is equivalent to the identity. This makes it trivial to detect 7 | whether the returned expectation values are accurate. 8 | 9 | Note that with its default settings -- at the time of this writing -- executing this 10 | benchmark requires 8-9 hours of wall clock time, and about 2.5 hours of QPU time. 11 | 12 | ## Usage example 13 | 14 | ```python 15 | from qiskit_ibm_runtime import QiskitRuntimeService 16 | from qiskit_device_benchmarking.mirror_test.mirror_test import submit_mirror_test, analyze_mirror_result 17 | 18 | service = QiskitRuntimeService(channel="ibm_quantum", 19 | instance="your-hub/group/project") 20 | 21 | backend = service.backend("your-favorite-ibm-quantum-computer") 22 | 23 | job = submit_mirror_test(backend, num_qubits=100, num_gates=2500) 24 | 25 | # wait for job to complete execution, then... 26 | result = job.result() 27 | analyze_mirror_result(result, accuracy_threshold=0.1, make_plots=True) 28 | ``` 29 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qiskit-community/qiskit-device-benchmarking/367c1268defd5a0130f7ffafb00b788f3891e443/qiskit_device_benchmarking/mirror_test/__init__.py -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/get_optimal_path.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | import numpy as np 12 | import pandas as pd 13 | from qiskit import QuantumCircuit 14 | from qiskit_ibm_runtime.models.exceptions import BackendPropertyError 15 | from qiskit.transpiler import AnalysisPass, CouplingMap, PassManager 16 | from qiskit.transpiler.passes import VF2Layout 17 | from qiskit.transpiler.passes.layout.vf2_utils import ErrorMap 18 | from qiskit_ibm_runtime import IBMBackend 19 | 20 | 21 | def build_error_dataframe(backend: IBMBackend) -> pd.DataFrame: 22 | data = [] 23 | props = backend.properties() 24 | gate_name_2q = list({"ecr", "cx", "cz"}.intersection(backend.basis_gates))[0] 25 | 26 | for q in range(backend.num_qubits): 27 | data.append( 28 | { 29 | "metric": "faulty", 30 | "value": np.nan if not props.is_qubit_operational(q) else 0, 31 | "qubits": (q, q), 32 | "sign": +1, 33 | } 34 | ) 35 | 36 | for q in range(backend.num_qubits): 37 | try: 38 | t1 = props.t1(q) 39 | except BackendPropertyError: 40 | t1 = np.nan 41 | data.append( 42 | { 43 | "metric": "t1", 44 | "value": t1, 45 | "qubits": (q, q), 46 | "sign": -1, 47 | } 48 | ) 49 | 50 | for q in range(backend.num_qubits): 51 | try: 52 | t2 = props.t2(q) 53 | except BackendPropertyError: 54 | t2 = np.nan 55 | data.append( 56 | { 57 | "metric": "t2", 58 | "value": t2, 59 | "qubits": (q, q), 60 | "sign": -1, 61 | } 62 | ) 63 | 64 | for q in range(backend.num_qubits): 65 | try: 66 | ro_err = props.readout_error(q) 67 | except BackendPropertyError: 68 | ro_err = np.nan 69 | data.append( 70 | { 71 | "metric": "readout_error", 72 | "value": ro_err, 73 | "qubits": (q, q), 74 | "sign": +1, 75 | } 76 | ) 77 | 78 | for edge in list(backend.coupling_map): 79 | try: 80 | gate_err_2q = props.gate_error(gate_name_2q, edge) 81 | if gate_err_2q == 1.0: 82 | gate_err_2q = np.nan 83 | except BackendPropertyError: 84 | gate_err_2q = np.nan 85 | data.append( 86 | { 87 | "metric": "gate_err_2q", 88 | "value": gate_err_2q, 89 | "qubits": edge, 90 | "sign": +1, 91 | } 92 | ) 93 | 94 | return pd.DataFrame(data) 95 | 96 | 97 | def compute_error_dataframe( 98 | df: pd.DataFrame, weights: dict[str, float] 99 | ) -> pd.DataFrame: 100 | df = df.copy() 101 | 102 | if set(df["metric"]) != set(weights.keys()): 103 | missing_keys = set(df["metric"]) - set(weights.keys()) 104 | raise ValueError(f"Missing weights for: {missing_keys}") 105 | 106 | # Drop any qubits which have missing properties 107 | bad_edges = list(df[df["value"].isna()]["qubits"]) 108 | bad_qubits = set([item for sublist in bad_edges for item in sublist]) 109 | df = df[ 110 | df["qubits"].map( 111 | lambda q: (q[0] not in bad_qubits) and (q[1] not in bad_qubits) 112 | ) 113 | ] 114 | 115 | # Sign indicates whether a metric should be small or large 116 | df["value"] *= df["sign"] 117 | df.drop(columns=["sign"], inplace=True) 118 | 119 | # Normalize values about the means 120 | df_mean = df.groupby(["metric"]).agg({"value": "mean"}) 121 | df = df.set_index(["metric", "qubits"]) - df_mean 122 | df_std = df.groupby(["metric"]).agg({"value": "std"}) 123 | df_std[df_std["value"] == 0.0] = 1.0 124 | df = df / df_std 125 | 126 | # Apply weights for properties 127 | df_weights = pd.DataFrame([{"metric": m, "value": v} for m, v in weights.items()]) 128 | df_weights = df_weights.set_index("metric") 129 | df = df * df_weights 130 | df.reset_index(inplace=True) 131 | 132 | ## Aggregate over metrics 133 | df = df.groupby(["qubits"]).agg({"value": "mean"}).reset_index() 134 | df["value"] -= df["value"].min() 135 | 136 | return df 137 | 138 | 139 | def build_error_map(backend, weights_dict, symmetrize: bool = True): 140 | df_props = build_error_dataframe(backend) 141 | df_err = compute_error_dataframe(df_props, weights_dict) 142 | 143 | HI_ERR_CONST = 1e3 144 | 145 | err_dict = {} 146 | for edge in list(backend.coupling_map): 147 | err_dict[edge] = HI_ERR_CONST 148 | err_dict[tuple(reversed(edge))] = HI_ERR_CONST 149 | for q in range(backend.num_qubits): 150 | err_dict[(q, q)] = HI_ERR_CONST 151 | 152 | for _, (qubits, err_rate) in df_err.iterrows(): 153 | err_dict[qubits] = err_rate 154 | err_dict[tuple(reversed(qubits))] = err_rate 155 | 156 | error_map = ErrorMap(len(err_dict)) 157 | for edge, err_rate in err_dict.items(): 158 | error_map.add_error(edge, err_rate) 159 | 160 | return df_err, error_map 161 | 162 | 163 | class VF2WeightedLayout(AnalysisPass): 164 | def __init__(self, weights_dict: dict[str, float], backend: IBMBackend): 165 | super().__init__() 166 | self._backend = backend 167 | self._weights_dict = weights_dict 168 | 169 | def run(self, dag): 170 | df, error_map = build_error_map(self._backend, self._weights_dict) 171 | self.property_set["vf2_avg_error_map"] = error_map 172 | self.property_set["vf2_dataframe"] = df 173 | return dag 174 | 175 | 176 | def dummy_path_circuit(num_qubits: int) -> QuantumCircuit: 177 | qc = QuantumCircuit(num_qubits) 178 | 179 | for i in range(0, num_qubits - 1, 2): 180 | qc.cz(i, i + 1) 181 | for i in range(1, num_qubits - 1, 2): 182 | qc.cz(i, i + 1) 183 | 184 | return qc 185 | 186 | 187 | def symmetrize_coupling_map(cm: CouplingMap) -> CouplingMap: 188 | edge_list = set() 189 | for edge in list(cm): 190 | edge_list |= {tuple(edge)} 191 | edge_list |= {tuple(reversed(edge))} 192 | return CouplingMap(edge_list) 193 | 194 | 195 | def get_optimal_path( 196 | weights_dict: dict[str, float], 197 | backend: IBMBackend, 198 | num_qubits: int, 199 | seed: int = 42, 200 | time_limit: float = 30.0, 201 | max_trials: int = -1, 202 | call_limit=None, 203 | ): 204 | pm = PassManager( 205 | [ 206 | VF2WeightedLayout(weights_dict=weights_dict, backend=backend), 207 | VF2Layout( 208 | strict_direction=False, 209 | seed=seed, 210 | coupling_map=symmetrize_coupling_map(backend.coupling_map), 211 | time_limit=time_limit, 212 | max_trials=max_trials, 213 | call_limit=call_limit, 214 | ), 215 | ] 216 | ) 217 | pm.run(dummy_path_circuit(num_qubits)) 218 | qubit_mapping = { 219 | k._index: v for k, v in pm.property_set["layout"].get_virtual_bits().items() 220 | } 221 | mapped_path = [qubit_mapping[i] for i in range(num_qubits)] 222 | return tuple(mapped_path) 223 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/mirror_circuits.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2023, 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | """Trotter circuit generation""" 12 | 13 | from collections import defaultdict 14 | from typing import Sequence 15 | from math import inf 16 | import numpy as np 17 | import networkx as nx 18 | from qiskit.circuit import QuantumCircuit, Parameter 19 | from qiskit.circuit.library import CXGate, CZGate, ECRGate 20 | from qiskit.transpiler import CouplingMap, generate_preset_pass_manager as generate_pm 21 | from qiskit.transpiler.exceptions import TranspilerError 22 | from qiskit.quantum_info import PauliList 23 | from qiskit_ibm_runtime import IBMBackend 24 | 25 | 26 | def remove_qubit_couplings( 27 | couplings: Sequence[tuple[int, int]], qubits: Sequence[int] | None = None 28 | ) -> list[tuple[int, int]]: 29 | """Remove qubits from a coupling list. 30 | 31 | Args: 32 | couplings: A sequence of qubit couplings. 33 | qubits: Optional, the qubits to remove. 34 | 35 | Returns: 36 | The input couplings with the specified qubits removed. 37 | """ 38 | if qubits is None: 39 | return couplings 40 | qubits = set(qubits) 41 | return [edge for edge in couplings if not qubits.intersection(edge)] 42 | 43 | 44 | def coupling_qubits( 45 | *couplings: Sequence[tuple[int, int]], allowed_qubits: Sequence[int] | None = None 46 | ) -> list[int]: 47 | """Return a sorted list of all qubits involved in 1 or more couplings lists. 48 | 49 | Args: 50 | couplings: 1 or more coupling lists. 51 | allowed_qubits: Optional, the allowed qubits to include. If None all 52 | qubits are allowed. 53 | 54 | Returns: 55 | The intersection of all qubits in the couplings and the allowed qubits. 56 | """ 57 | qubits = set() 58 | for edges in couplings: 59 | for edge in edges: 60 | qubits.update(edge) 61 | if allowed_qubits is not None: 62 | qubits = qubits.intersection(allowed_qubits) 63 | return list(qubits) 64 | 65 | 66 | def chain_coupling_map( 67 | coupling_map: list[tuple[int, int]], 68 | path: list[int], 69 | ) -> list[list[tuple[int, int]]]: 70 | """Construct the sub-CouplingMap for a 1D path through a 2D coupling map. 71 | 72 | Args: 73 | coupling_map: The input coupling map that is connected along the specified path. 74 | path: The ordered list of nodes to constructed a path for. 75 | 76 | Returns: 77 | The sub set of edges in the coupling map that are on the specified path. 78 | """ 79 | coupling_edges = nx.DiGraph(list(coupling_map)).edges() 80 | path_edges = [] 81 | for pos in range(1, len(path)): 82 | node_a = path[pos - 1] 83 | node_b = path[pos] 84 | for edge in ((node_a, node_b), (node_b, node_a)): 85 | if edge in coupling_edges: 86 | path_edges.append(edge) 87 | return path_edges 88 | 89 | 90 | def directed_coupling_map(backend: IBMBackend) -> CouplingMap: 91 | """Construct a single-directional coupling map of shortest gates. 92 | 93 | Args: 94 | backend: A backend to extract coupling map and gate durations from. 95 | 96 | Returns: 97 | The directed coupling map of the shortest gate for each coupling pair. 98 | """ 99 | directional_coupling = {} 100 | target = backend.target 101 | durations = target.durations() 102 | for inst, qubits in target.instructions: 103 | if inst.num_qubits == 2 and qubits is not None: 104 | key = tuple(sorted(qubits)) 105 | if key in directional_coupling: 106 | continue 107 | q0, q1 = key 108 | try: 109 | length1 = durations.get(inst, (q0, q1)) 110 | except TranspilerError: 111 | length1 = inf 112 | try: 113 | length2 = durations.get(inst, (q1, q0)) 114 | except TranspilerError: 115 | length2 = inf 116 | 117 | shortest_pair = [q0, q1] if length1 <= length2 else [q1, q0] 118 | directional_coupling[key] = shortest_pair 119 | return CouplingMap(sorted(directional_coupling.values())) 120 | 121 | 122 | def construct_layer_couplings( 123 | backend: IBMBackend, path: Sequence[int] = None 124 | ) -> list[list[tuple[int, int]]]: 125 | """Separate a coupling map into disjoint 2-qubit gate layers. 126 | 127 | Args: 128 | backend: A backend to construct layer couplings for. 129 | path: Optional, the ordered list of nodes to constructed a 1D path for couplings. 130 | 131 | Returns: 132 | A list of disjoint layers of directed couplings for the input coupling map. 133 | """ 134 | coupling_map = directed_coupling_map(backend) 135 | if path is not None: 136 | coupling_map = chain_coupling_map(coupling_map, path) 137 | 138 | # Convert coupling map to a networkx graph 139 | coupling_graph = nx.Graph(list(coupling_map)) 140 | 141 | # Edge coloring is vertex coloring on the dual graph 142 | dual_graph = nx.line_graph(coupling_graph) 143 | edge_coloring = nx.greedy_color(dual_graph, interchange=True) 144 | 145 | # Sort layers 146 | layers = defaultdict(list) 147 | for edge, color in edge_coloring.items(): 148 | if edge not in coupling_map: 149 | edge = tuple(reversed(edge)) 150 | layers[color].append(edge) 151 | layers = [sorted(layers[i]) for i in sorted(layers.keys())] 152 | 153 | return layers 154 | 155 | 156 | def entangling_layer( 157 | gate_2q: str, 158 | couplings: Sequence[tuple[int, int]], 159 | qubits: Sequence[int] | None = None, 160 | ) -> QuantumCircuit: 161 | """Generating a entangling layer for the specified couplings. 162 | 163 | This corresonds to a Trotter layer for a ZZ Ising term with angle Pi/2. 164 | 165 | Args: 166 | gate_2q: The 2-qubit basis gate for the layer, should be "cx", "cz", or "ecr". 167 | couplings: A sequence of qubit couplings to add CX gates to. 168 | qubits: Optional, the physical qubits for the layer. Any couplings involving 169 | qubits not in this list will be removed. If None the range up to the largest 170 | qubit in the couplings will be used. 171 | 172 | Returns: 173 | The QuantumCircuit for the entangling layer. 174 | """ 175 | # Get qubits and convert to set to order 176 | if qubits is None: 177 | qubits = range(1 + max(coupling_qubits(*couplings))) 178 | qubits = set(qubits) 179 | 180 | # Mapping of physical qubit to virtual qubit 181 | qubit_mapping = {q: i for i, q in enumerate(qubits)} 182 | 183 | # Convert couplings to indices for virtual qubits 184 | indices = [ 185 | [qubit_mapping[i] for i in edge] 186 | for edge in couplings 187 | if qubits.issuperset(edge) 188 | ] 189 | 190 | # Layer circuit on virtual qubits 191 | circuit = QuantumCircuit(len(qubits)) 192 | 193 | # Get 2-qubit basis gate and pre and post rotation circuits 194 | gate2q = None 195 | pre = QuantumCircuit(2) 196 | post = QuantumCircuit(2) 197 | 198 | if gate_2q == "cx": 199 | gate2q = CXGate() 200 | # Pre-rotation 201 | pre.sdg(0) 202 | pre.z(1) 203 | pre.sx(1) 204 | pre.s(1) 205 | # Post-rotation 206 | post.sdg(1) 207 | post.sxdg(1) 208 | post.s(1) 209 | elif gate_2q == "ecr": 210 | gate2q = ECRGate() 211 | # Pre-rotation 212 | pre.z(0) 213 | pre.s(1) 214 | pre.sx(1) 215 | pre.s(1) 216 | # Post-rotation 217 | post.x(0) 218 | post.sdg(1) 219 | post.sxdg(1) 220 | post.s(1) 221 | elif gate_2q == "cz": 222 | gate2q = CZGate() 223 | # Identity pre-rotation 224 | # Post-rotation 225 | post.sdg([0, 1]) 226 | else: 227 | raise ValueError( 228 | f"Invalid 2-qubit basis gate {gate_2q}, should be 'cx', 'cz', or 'ecr'" 229 | ) 230 | 231 | # Add 1Q pre-rotations 232 | for inds in indices: 233 | circuit.compose(pre, qubits=inds, inplace=True) 234 | 235 | # Use barriers around 2-qubit basis gate to specify a layer for PEA noise learning 236 | circuit.barrier() 237 | for inds in indices: 238 | circuit.append(gate2q, (inds[0], inds[1])) 239 | circuit.barrier() 240 | 241 | # Add 1Q post-rotations after barrier 242 | for inds in indices: 243 | circuit.compose(post, qubits=inds, inplace=True) 244 | 245 | # Add physical qubits as metadata 246 | circuit.metadata["physical_qubits"] = tuple(qubits) 247 | 248 | return circuit 249 | 250 | 251 | def trotter_circuit( 252 | theta: Parameter | float, 253 | layer_couplings: Sequence[Sequence[tuple[int, int]]], 254 | num_steps: int, 255 | gate_2q: str | None = "cx", 256 | backend: IBMBackend | None = None, 257 | qubits: Sequence[int] | None = None, 258 | ) -> QuantumCircuit: 259 | """Generate a Trotter circuit for the 2D Ising 260 | 261 | Args: 262 | theta: The angle parameter for X. 263 | layer_couplings: A list of couplings for each entangling layer. 264 | num_steps: the number of Trotter steps. 265 | gate_2q: The 2-qubit basis gate to use in entangling layers. 266 | Can be "cx", "cz", "ecr", or None if a backend is provided. 267 | backend: A backend to get the 2-qubit basis gate from, if provided 268 | will override the basis_gate field. 269 | qubits: Optional, the allowed physical qubits to truncate the 270 | couplings to. If None the range up to the largest 271 | qubit in the couplings will be used. 272 | 273 | Returns: 274 | The Trotter circuit. 275 | """ 276 | if backend is not None: 277 | try: 278 | basis_gates = backend.configuration().basis_gates 279 | except AttributeError: 280 | basis_gates = backend.basis_gates 281 | for gate in ["cx", "cz", "ecr"]: 282 | if gate in basis_gates: 283 | gate_2q = gate 284 | break 285 | 286 | # If no qubits, get the largest qubit from all layers and 287 | # specify the range so the same one is used for all layers. 288 | if qubits is None: 289 | qubits = range(1 + max(coupling_qubits(*layer_couplings))) 290 | 291 | coup_q_list = coupling_qubits(*layer_couplings) 292 | 293 | # Generate the entangling layers 294 | layers = [ 295 | entangling_layer(gate_2q, couplings, qubits=qubits) 296 | for couplings in layer_couplings 297 | ] 298 | 299 | # Construct the circuit for a single Trotter step 300 | num_qubits = len(qubits) 301 | trotter_step = QuantumCircuit(num_qubits) 302 | trotter_step.rx(theta, coup_q_list) 303 | for layer in layers: 304 | trotter_step.compose(layer, range(num_qubits), inplace=True) 305 | 306 | # Construct the circuit for the specified number of Trotter steps 307 | circuit = QuantumCircuit(num_qubits) 308 | for _ in range(num_steps): 309 | circuit.rx(theta, coup_q_list) 310 | for layer in layers: 311 | circuit.compose(layer, range(num_qubits), inplace=True) 312 | 313 | circuit.metadata["physical_qubits"] = tuple(qubits) 314 | return circuit 315 | 316 | 317 | def mirror_trotter_circuit_1d( 318 | theta: Parameter | float, 319 | delta: Parameter | float, 320 | num_steps: int, 321 | path: Sequence[int], 322 | backend: IBMBackend, 323 | ) -> QuantumCircuit: 324 | """Generate a mirrored Trotter circuit for 1D Ising on specified path. 325 | 326 | Args: 327 | theta: The angle parameter for X in simulated Hamiltonian. 328 | delta: The angle parameter for Rx rotation before final measurement 329 | num_steps: the number of Trotter steps. The returned circuit will 330 | have a 2-qubit layer depth of ``4 * num_steps``. 331 | path: The ordered list of nodes to constructed a 1D path for couplings. 332 | backend: A backend to get the 2-qubit basis gate from, if provided 333 | will override the basis_gate field. 334 | 335 | Returns: 336 | The Trotter circuit. 337 | """ 338 | layer_couplings = construct_layer_couplings(backend, path=path) 339 | circuit = trotter_circuit(theta, layer_couplings, num_steps, backend=backend) 340 | 341 | # Construct mirror circuit 342 | mirror_circuit = circuit.compose(circuit.inverse()) 343 | mirror_circuit.metadata["physical_qubits"] = path 344 | 345 | # Add contrast rotation 346 | mirror_circuit.rx(delta, path) 347 | 348 | return mirror_circuit 349 | 350 | 351 | def mirror_trotter_pub_1d( 352 | num_steps: int, 353 | path: Sequence[int], 354 | backend: IBMBackend, 355 | theta_values: Sequence[float] = (0,), 356 | magnetization_values: Sequence[float] = (1,), 357 | ): 358 | """Generate a mirrored Trotter circuit EstimatorPub for 1D Ising on specified path. 359 | 360 | Args: 361 | num_steps: the number of Trotter steps. The returned circuit will 362 | have a 2-qubit layer depth of ``4 * num_steps``. 363 | path: The ordered list of nodes to constructed a 1D path for couplings. 364 | backend: A backend to get the 2-qubit basis gate from, if provided 365 | will override the basis_gate field. 366 | theta_values: The angle parameter values for X in simulated Hamiltonian. 367 | magnetization_values: The ideal magnetization values for the circuit after 368 | mirroring. Implement by single-qubit rotations of each qubit to reduce 369 | contrast of final measurements. 370 | 371 | Returns: 372 | The mirrored Trotter circuit EstimatorPub. 373 | """ 374 | # Shape is theta first, magnetization second, observable last 375 | shape = (len(theta_values), len(magnetization_values), 1) 376 | theta_bc = np.broadcast_to(np.array(theta_values).reshape((-1, 1, 1)), shape) 377 | delta_vals = 2 * np.arccos(np.sqrt((1 + np.array(magnetization_values)) / 2)) 378 | delta_bc = np.broadcast_to(delta_vals.reshape(1, -1, 1), shape) 379 | param_vals = np.stack([theta_bc, delta_bc], axis=-1) 380 | 381 | # Important that theta < delta in circuit order 382 | theta = Parameter("A") 383 | delta = Parameter("C") 384 | circuit = mirror_trotter_circuit_1d(theta, delta, num_steps, path, backend=backend) 385 | pm = generate_pm(optimization_level=1, target=backend.target, layout_method="trivial") 386 | circuit = pm.run(circuit) 387 | obs = magnetization_observables(path, circuit.num_qubits) 388 | pub = (circuit, obs, param_vals) 389 | return pub 390 | 391 | 392 | def magnetization_observables( 393 | physical_qubits: Sequence[int], num_qubits: int | None = None 394 | ) -> PauliList: 395 | """Return the PauliList for magnetization measurement observables for ISA circuits.""" 396 | max_qubit = max(physical_qubits) 397 | if num_qubits is None: 398 | num_qubits = 1 + max_qubit 399 | elif num_qubits <= max_qubit: 400 | raise ValueError( 401 | f"num_qubits must be >= {max_qubit} for specified physical qubits" 402 | ) 403 | zs = np.zeros((len(physical_qubits), num_qubits), dtype=bool) 404 | for idx, qubit in enumerate(physical_qubits): 405 | zs[idx, qubit] = True 406 | xs = np.zeros_like(zs) 407 | return PauliList.from_symplectic(zs, xs) 408 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/mirror_pub.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | from functools import lru_cache 12 | from typing import Literal, Optional 13 | 14 | import networkx as nx 15 | import numpy as np 16 | from qiskit.primitives.containers.bindings_array import BindingsArray 17 | from qiskit.primitives.containers.estimator_pub import EstimatorPub 18 | from qiskit.primitives.containers.observables_array import ObservablesArray 19 | 20 | from .mirror_circuits import mirror_trotter_pub_1d 21 | from .get_optimal_path import get_optimal_path 22 | 23 | 24 | def mirror_pub( 25 | num_theta: int, 26 | backend, 27 | num_qubits: Optional[int] = None, 28 | num_steps: int = None, 29 | target_num_2q_gates: int = None, 30 | num_magnetization: int = 1, 31 | repeat_magnetization: bool = False, 32 | repeat_theta: bool = False, 33 | path: tuple[int] = None, 34 | theta: float = np.pi / 4, 35 | ) -> EstimatorPub: 36 | if num_steps is None and target_num_2q_gates is None: 37 | raise ValueError("Must specify either num steps or target 2q gates") 38 | if num_steps is not None and target_num_2q_gates is not None: 39 | raise ValueError("Can only specify either num steps or target 2q gates") 40 | 41 | if path is None: 42 | coupling_map = tuple(tuple(edge) for edge in backend.coupling_map) 43 | maximal_path = get_longest_path(coupling_map) 44 | 45 | if num_qubits is None: 46 | num_qubits = len(maximal_path) 47 | path = tuple(maximal_path[:num_qubits]) 48 | else: 49 | if len(path) < num_qubits: 50 | raise ValueError( 51 | f"Not enough qubits specified in path, {len(path)} < {num_qubits}" 52 | ) 53 | path = path[:num_qubits] 54 | 55 | if target_num_2q_gates is not None: 56 | num_steps = int(np.round(0.5 * target_num_2q_gates / num_qubits)) 57 | if num_steps is not None: 58 | target_num_2q_gates = int(np.round(2 * num_steps * num_qubits)) 59 | 60 | if num_steps == 0: 61 | num_steps = 1 62 | if target_num_2q_gates == 0: 63 | target_num_2q_gates = int(np.round(2 * num_steps * num_qubits)) 64 | 65 | if num_steps < 1 or target_num_2q_gates < 1: 66 | raise ValueError( 67 | f"Must have at least one step and 2q gate, got: {num_steps} steps and {target_num_2q_gates} 2q gates" 68 | ) 69 | 70 | if num_theta == 1: 71 | theta = (theta,) 72 | else: 73 | theta = tuple(np.linspace(0, theta, num=num_theta)) 74 | 75 | if num_magnetization == 1: 76 | magnetization = (1.0,) 77 | else: 78 | magnetization = tuple(np.linspace(0, 1, num=num_magnetization)) 79 | 80 | if repeat_magnetization: 81 | magnetization = (1.0,) * num_magnetization 82 | if repeat_theta: 83 | theta = (1.0,) * num_theta 84 | 85 | pub = mirror_trotter_pub_1d( 86 | num_steps=num_steps, 87 | path=path, 88 | backend=backend, 89 | theta_values=theta, 90 | magnetization_values=magnetization, 91 | ) 92 | 93 | if not (_gates := list(set(backend.basis_gates).intersection(["cx", "ecr", "cz"]))): 94 | raise ValueError("2q gate not recognized") 95 | else: 96 | gate_name = _gates[0] 97 | num_2q_gates = pub[0].count_ops().get(gate_name, 0) 98 | 99 | pub[0].metadata["circuit_depth"] = pub[0].depth(lambda instr: len(instr.qubits) > 1) 100 | pub[0].metadata["theta"] = theta 101 | pub[0].metadata["path"] = path 102 | pub[0].metadata["magnetization"] = magnetization 103 | pub[0].metadata["num_steps"] = num_steps 104 | pub[0].metadata["num_qubits"] = num_qubits 105 | pub[0].metadata["num_2q_gates"] = num_2q_gates 106 | pub[0].metadata["num_2q_gates_per_step_per_qubit"] = ( 107 | num_2q_gates / num_steps 108 | ) / num_qubits 109 | 110 | pars_from_circ = tuple(pub[0].parameters) 111 | if str(pars_from_circ[0]) != "A": 112 | raise ValueError("Assumed parameter order violated") 113 | 114 | pub = list(pub) 115 | pub[1] = ObservablesArray(pub[1]) 116 | pub[2] = BindingsArray({pars_from_circ: pub[2]}) 117 | pub = tuple(pub) 118 | pub = EstimatorPub(*pub) 119 | 120 | pub.circuit.metadata["bindings_array_shape"] = pub.parameter_values.shape 121 | pub.circuit.metadata["observables_array_shape"] = pub.observables.shape 122 | 123 | return pub 124 | 125 | 126 | @lru_cache() 127 | def get_longest_path(coupling_map): 128 | graph = nx.Graph(list(coupling_map)) 129 | maximal_path = max(nx.all_simple_paths(graph, 13, 113), key=len) 130 | return maximal_path 131 | 132 | 133 | class MirrorPubOptions: 134 | num_qubits: Optional[int] = None 135 | target_num_2q_gates: Optional[int] = 1000 136 | num_steps: Optional[int] = None 137 | num_magnetization: int = 1 138 | num_theta: int = 1 139 | theta: float = np.pi / 4 140 | repeat_theta: bool = False 141 | repeat_magnetization: bool = False 142 | num_pubs: int = 1 143 | path: Optional[tuple[int, ...]] = None 144 | path_strategy: Literal[None, "vf2_optimal", "eplg_chain"] = None 145 | 146 | def get_pubs(self, backend) -> list[EstimatorPub]: 147 | pub = mirror_pub( 148 | backend=backend, 149 | num_qubits=self.num_qubits, 150 | target_num_2q_gates=self.target_num_2q_gates, 151 | num_steps=self.num_steps, 152 | num_theta=self.num_theta, 153 | num_magnetization=self.num_magnetization, 154 | repeat_magnetization=self.repeat_magnetization, 155 | repeat_theta=self.repeat_theta, 156 | path=self.get_path(backend), 157 | theta=self.theta, 158 | ) 159 | 160 | return [pub] * self.num_pubs 161 | 162 | def get_path(self, backend): 163 | 164 | if self.path_strategy is None: 165 | if self.path: 166 | return self.path 167 | else: 168 | coupling_map = tuple(tuple(edge) for edge in backend.coupling_map) 169 | return get_longest_path(coupling_map)[:self.num_qubits] 170 | 171 | elif self.path_strategy == "eplg_chain": 172 | if self.num_qubits > 100: 173 | raise ValueError("ELPG chain only defined up to 100 qubits") 174 | eplg_chain = next(q_list["qubits"] for q_list in backend.properties().general_qlists 175 | if q_list["name"] == "lf_100") 176 | return eplg_chain[:self.num_qubits] 177 | 178 | elif self.path_strategy == "vf2_optimal": 179 | weights_dict = { 180 | "t1": 1.0, 181 | "t2": 0, 182 | "readout_error": 1.0, 183 | "faulty": 0, 184 | "gate_err_2q": 1.0, 185 | } 186 | 187 | path = get_optimal_path( 188 | weights_dict=weights_dict, 189 | backend=backend, 190 | num_qubits=self.num_qubits, 191 | time_limit=1e2, 192 | seed=42, 193 | max_trials=1000, 194 | ) 195 | 196 | return path 197 | else: 198 | raise ValueError(f"Unreconized path_strategy value {self.path_strategy}") -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/mirror_test.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | from typing import Optional, List 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | from scipy import stats 15 | from qiskit_ibm_runtime import (EstimatorV2 as Estimator, EstimatorOptions, IBMBackend, 16 | RuntimeJobV2 as RuntimeJob) 17 | from qiskit_ibm_runtime.utils.noise_learner_result import LayerError 18 | from qiskit.primitives import PrimitiveResult 19 | 20 | from .mirror_pub import MirrorPubOptions 21 | 22 | def submit_mirror_test(backend: IBMBackend, 23 | num_gates: int=4986, 24 | num_qubits: int=100, 25 | theta: float=0, 26 | path: Optional[tuple[int, ...]] = None, 27 | path_strategy: str = "eplg_chain", 28 | noise_model: Optional[List[LayerError]] = None, 29 | execution_path: Optional[str] = None) -> RuntimeJob: 30 | """ 31 | Constructs a mirror circuit test based upon a 1D Ising model simulation. The 1D model 32 | is executed on a line of qubits. The particular line can be selected automatically by 33 | passing `num_qubits` along with a `path_strategy`, or manually by specifying a `path` 34 | as a list of edges (q_i, q_j) in the connectivity graph of the backend. `num_gates` 35 | will control the number of distinct time-steps in the Trotter evolution of the model. 36 | The `theta` parameter controls the rotation angle of the layer of 1Q gates inserted 37 | between the 2Q gate layers. Non-zero values of `theta` will ensure entanglement growth 38 | in successive time steps. 39 | 40 | You can avoid re-learning noise models by passing in an already learned `noise_model` 41 | from a prior `NoiseLearner` execution. 42 | 43 | Args: 44 | backend: the IBM backend to submit the benchmark to. 45 | num_gates: proxy for number of Trotter time steps in the 1D Ising model circuit. 46 | num_qubits: determines the width of the benchmark circuit. 47 | theta: Controls rotation angle of 1Q gates in Trotter step. Non-zero values will 48 | spread entanglement. 49 | path: a list of edges (q_i, q_j) in the connectivity graph of the backend that 50 | defines the 1D chain of the Ising model 51 | path_strategy: one of "eplg_chain", "vf2_optimal", or None. "eplg_chain" will use 52 | the same chain as found by the EPLG benchmark. "vf2_optimal" will choose a 53 | chain using the same heuristics as the vf2 layout pass in Qiskit (also known 54 | as "mapomatic"). A value of None will simply select an appropriate length chain 55 | from the longest possible chain on the device. 56 | noise_model: A noise model from a prior NoiseLearner or Estimator job on the same 57 | layers as used in the benchmark circuit. 58 | execution_path: A value to pass to the experimental "execution_path" option of the 59 | Estimator. 60 | 61 | Returns: 62 | A RuntimeJob corresopnding to the Estimator query of the benchmark. 63 | """ 64 | pub_options = MirrorPubOptions() 65 | pub_options.num_qubits = num_qubits 66 | pub_options.target_num_2q_gates = num_gates 67 | pub_options.theta = theta 68 | if path is not None: 69 | pub_options.path = path 70 | pub_options.path_strategy = None 71 | else: 72 | pub_options.path_strategy = path_strategy 73 | 74 | pubs = pub_options.get_pubs(backend) 75 | 76 | options = EstimatorOptions() 77 | # turn on T-REX and ZNE 78 | options.resilience_level = 2 79 | 80 | # dynamical decoupling 81 | options.dynamical_decoupling.enable = True 82 | options.dynamical_decoupling.sequence_type = "XpXm" 83 | 84 | # twirling 85 | options.twirling.enable_gates = True 86 | options.twirling.num_randomizations = 1000 87 | options.twirling.shots_per_randomization = 64 88 | 89 | # PEA 90 | options.resilience.zne.amplifier = "pea" 91 | options.resilience.zne.noise_factors = [1, 1.6, 1.9, 2.8] 92 | 93 | if noise_model is not None: 94 | options.resilience.layer_noise_model = noise_model 95 | else: 96 | options.resilience.layer_noise_learning.shots_per_randomization = 64 97 | options.resilience.layer_noise_learning.num_randomizations = 50 98 | options.resilience.layer_noise_learning.layer_pair_depths = [0,6,16,32,64] 99 | 100 | # experimental options 101 | options.experimental = { 102 | "execution": {"fast_parametric_update": True} 103 | } 104 | if execution_path: 105 | options.experimental["execution_path"] = execution_path 106 | 107 | estimator = Estimator(backend, options=options) 108 | return estimator.run(pubs) 109 | 110 | 111 | def analyze_mirror_result(result: PrimitiveResult, 112 | accuracy_threshold: float=0.1, 113 | make_plots: bool=False): 114 | """ 115 | Analyze the outcome of a mirror test job. Pass the `PrimitiveResult` object produced 116 | by `job.result()` from the job generated by `submit_mirror_test`. This method will 117 | calculate statistics of the distribution of errors from the ideal expectation values. 118 | When `make_plots=True`, will plot the CDF of the errors. 119 | """ 120 | assert len(result) == 1, "Expected a length 1 PrimitiveResult" 121 | evs = result[0].data.evs 122 | evs_shape = evs.shape 123 | # we expect a shape of the form (1, 1, N) 124 | assert len(evs_shape) == 3, "Failed data shape check" 125 | assert evs_shape[0] == 1, "Failed data shape check" 126 | assert evs_shape[1] == 1, "Failed data shape check" 127 | 128 | evs = evs.flatten() 129 | N = len(evs) 130 | ev_errors = np.abs(1 - evs) 131 | ev_errors.sort() 132 | 133 | median_error = np.median(ev_errors) 134 | mean_error = np.average(ev_errors) 135 | print(f"Median error: {median_error}") 136 | print(f"Mean error: {mean_error}") 137 | 138 | # find fraction within the accuracy threshold of the ideal value 139 | fraction = np.argmax(ev_errors > accuracy_threshold) / N 140 | print(f"Fraction within {int(accuracy_threshold * 100)}% of ideal value: {fraction}") 141 | 142 | if make_plots: 143 | ev_cdf = stats.ecdf( ev_errors ) 144 | xpts = np.logspace(-3, 0, 101) 145 | plt.semilogx(xpts, ev_cdf.cdf.evaluate(xpts)) 146 | plt.xlabel(r"$|\left< Z_i \right> - Exact|$") 147 | plt.xlim((xpts[0],xpts[-1])) 148 | plt.ylabel("Fraction of observables") 149 | plt.grid() 150 | 151 | return median_error, mean_error, fraction 152 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | # from . import framework 26 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/clifford_utils.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2023. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """Clifford utilities supplementing the ones in qiskit-experiments.""" 14 | 15 | from qiskit.quantum_info import Clifford 16 | from qiskit.circuit import QuantumCircuit 17 | 18 | 19 | def compute_target_bitstring(circuit: QuantumCircuit) -> str: 20 | """For a Pauli circuit C, which consists only of Clifford gates, compute C|0>. 21 | Args: 22 | circuit: A Pauli QuantumCircuit. 23 | Returns: 24 | Target bitstring. 25 | """ 26 | # target string has a 1 for each True in the stabilizer half of the phase vector 27 | target = "".join(["1" if phase else "0" for phase in Clifford(circuit).stab_phase[::-1]]) 28 | return target 29 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/file_utils.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2023. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """File utilities for the device benchmarking.""" 14 | 15 | import yaml 16 | import datetime 17 | 18 | def import_yaml(fstr): 19 | with open(fstr, 'r') as stream: 20 | data_imp = yaml.safe_load(stream) 21 | 22 | return data_imp 23 | 24 | def timestamp_name(): 25 | return datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%S') 26 | 27 | def export_yaml(fstr, exp_data): 28 | with open(fstr, 'w') as fout: 29 | yaml.dump(exp_data, fout, default_flow_style=None) 30 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/graph_utils.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """Graph utilities for the device benchmarking.""" 14 | 15 | import rustworkx as rx 16 | import numpy as np 17 | import copy 18 | 19 | def paths_flatten(paths): 20 | """Flatten a list of paths from retworkx 21 | 22 | Args: 23 | paths: all_pairs_all_simple_paths 24 | 25 | Returns: 26 | flat list of lists of qubit chains 27 | """ 28 | return [list(val) for ps in paths.values() for vals in ps.values() for val in vals] 29 | 30 | def remove_permutations(paths): 31 | """remove permutations from the paths 32 | 33 | Args: 34 | paths: list of qubit chains 35 | 36 | Returns: 37 | list of qubit chains without permutations 38 | """ 39 | 40 | new_path = [] 41 | for path_i in paths: 42 | 43 | #check already in the new_path 44 | if path_i in new_path: 45 | continue 46 | 47 | #reverse and check 48 | path_i.reverse() 49 | if path_i in new_path: 50 | continue 51 | path_i.reverse() 52 | 53 | new_path.append(path_i) 54 | 55 | 56 | return new_path 57 | 58 | def path_to_edges(paths, coupling_map=None): 59 | """Converse a list of paths into a list of edges that are in the 60 | coupling_map if defined 61 | 62 | If already edges (length 2 path) then convert into the edge that's in the 63 | coupling map 64 | 65 | Args: 66 | paths: list of qubit chains 67 | 68 | Returns: 69 | list of qubit paths in terms of the edges to traverse. 70 | """ 71 | 72 | new_path = [] 73 | for path_i in paths: 74 | 75 | if len(path_i)>2: 76 | new_path.append([]) 77 | 78 | for i in range(len(path_i)-1): 79 | 80 | tmp_set = path_i[i:(i+2)] 81 | if coupling_map is not None: 82 | if tuple(tmp_set) not in coupling_map and tmp_set not in coupling_map: 83 | tmp_set.reverse() 84 | if tuple(tmp_set) not in coupling_map and tmp_set not in coupling_map: 85 | raise ValueError('Path not found in coupling map') 86 | 87 | if len(path_i)>2: 88 | new_path[-1].append(tmp_set) 89 | else: 90 | new_path.append(tmp_set) 91 | 92 | return new_path 93 | 94 | 95 | def build_sys_graph(nq, coupling_map, faulty_qubits=None): 96 | 97 | """Build a system graph 98 | 99 | Args: 100 | nq: number of qubits 101 | coupling_map: coupling map in list form 102 | faulty_qubits: list of faulty qubits (will remove from graph) 103 | 104 | Returns: 105 | undirected graph with no duplicate edges 106 | """ 107 | 108 | if faulty_qubits is not None: 109 | 110 | coupling_map2 = [] 111 | 112 | for i in coupling_map: 113 | if (i[0] not in faulty_qubits) and (i[1] not in faulty_qubits): 114 | coupling_map2.append(i) 115 | 116 | coupling_map = coupling_map2 117 | 118 | G = rx.PyDiGraph() 119 | G.add_nodes_from(range(nq)) 120 | G.add_edges_from_no_data([tuple(x) for x in coupling_map]); 121 | 122 | return G.to_undirected(multigraph=False) 123 | 124 | def get_iso_qubit_list(G): 125 | 126 | """Return a set of lists of isolated (separated by at least one idle qubit) 127 | qubits using graph coloring 128 | 129 | Args: 130 | G: system graph (assume G.to_undirected(multigraph=False) has been run) 131 | 132 | Returns: 133 | list of qubit lists 134 | """ 135 | 136 | qlists = {} 137 | node_dict = rx.graph_greedy_color(G) 138 | for i in node_dict: 139 | if node_dict[i] in qlists: 140 | qlists[node_dict[i]].append(i) 141 | else: 142 | qlists[node_dict[i]] = [i] 143 | 144 | qlists = list(qlists.values()) 145 | for i in range(len(qlists)): 146 | qlists[i] = list(np.sort(qlists[i])) 147 | 148 | return qlists 149 | 150 | def get_disjoint_edge_list(G): 151 | 152 | """Return a set of disjoint edges using graph coloring 153 | 154 | Args: 155 | G: system graph (assume G.to_undirected(multigraph=False) has been run) 156 | 157 | Returns: 158 | list of list of edges 159 | """ 160 | 161 | edge_lists = {} 162 | edge_dict = rx.graph_greedy_edge_color(G) 163 | for i in edge_dict: 164 | if edge_dict[i] in edge_lists: 165 | edge_lists[edge_dict[i]].append(G.edge_list()[i]) 166 | else: 167 | edge_lists[edge_dict[i]] = [G.edge_list()[i]] 168 | 169 | return list(edge_lists.values()) 170 | 171 | def get_separated_sets(G, node_sets, min_sep=1, nsets=-1): 172 | 173 | """Given a list node sets separate out into lists where 174 | the sets in each list are separated by min_sep 175 | 176 | This could be quite slow! 177 | 178 | Args: 179 | G: system graph 180 | node_sets: list of list of nodes 181 | min_sep: minimum separation between node sets 182 | nsets: number of sets to truncate at, if -1 then make all sets 183 | 184 | Returns: 185 | list of list of list of nodes each separated by min_sep 186 | """ 187 | 188 | node_sets_sep = [[]] 189 | cur_ind1 = 0 190 | cur_ind2 = 0 191 | 192 | node_sets_tmp = copy.deepcopy(node_sets) 193 | 194 | #get all node to node distances in a dictionary 195 | all_dists = rx.all_pairs_dijkstra_path_lengths(G, lambda a: 1) 196 | 197 | while (len(node_sets_tmp)>0): 198 | if cur_ind2>=len(node_sets_tmp): 199 | 200 | if nsets>0 and (cur_ind1+2)>nsets: 201 | break 202 | 203 | node_sets_sep.append([]) 204 | cur_ind1 += 1 205 | cur_ind2 = 0 206 | 207 | add_set = True 208 | for node_set in node_sets_sep[cur_ind1]: 209 | 210 | if not sets_min_dist(all_dists, node_set, node_sets_tmp[cur_ind2], min_sep): 211 | add_set = False 212 | cur_ind2 += 1 213 | break 214 | 215 | if add_set: 216 | node_sets_sep[cur_ind1].append(node_sets_tmp[cur_ind2]) 217 | node_sets_tmp.pop(cur_ind2) 218 | 219 | 220 | return node_sets_sep 221 | 222 | def sets_min_dist(dist_dict, set1, set2, min_sep): 223 | """Calculate if two sets are min_sep apart 224 | 225 | Args: 226 | dist_dict: dictionary of distances between nodes 227 | set1,2: the two sets 228 | min_sep: minimum separation 229 | 230 | Returns: 231 | True/False 232 | """ 233 | 234 | #dummy check 235 | if set(set1) & set(set2): 236 | return False 237 | 238 | for i in set1: 239 | for j in set2: 240 | if dist_dict[i][j] < min_sep: 241 | return False 242 | 243 | return True 244 | 245 | def create_graph_dict( 246 | coupling_map: list, 247 | nq: int 248 | ) -> dict: 249 | 250 | graph_dict = {i: [] for i in range(nq)} 251 | 252 | for edge in coupling_map: 253 | if edge[1] not in graph_dict[edge[0]]: 254 | graph_dict[edge[0]].append(edge[1]) 255 | 256 | if edge[0] not in graph_dict[edge[1]]: 257 | graph_dict[edge[1]].append(edge[0]) 258 | 259 | 260 | return graph_dict 261 | 262 | def iter_neighbors( 263 | graph_dict: dict, 264 | cur_node: int, 265 | err_map: dict, 266 | best_fid: list, 267 | fid_cutoff: float, 268 | cur_list: list, 269 | chain_fid: float, 270 | pathlen: int 271 | ) -> list: 272 | 273 | """ 274 | takes a list of paths through a graph and adds to 275 | it all the neighbor qubits of the last point as long 276 | as the graph does fold on itself. This version is different than the above 277 | in that it tracks a best fidelity and will skip paths 278 | that don't seem viable 279 | 280 | if the lists get long enough return the lists 281 | 282 | Args: 283 | graph_dict: dictionary of nodes and their neighbors 284 | cur_node: current node on the graph 285 | err_map: map of edge errors (AVERAGE gate error) 286 | best_fid: list of length 1 (so mutable) of the best fidelity 287 | fid_cutoff: the percentage (0->1) of the best fidelity at that chain length 288 | to cutoff the search 289 | cur_list: current path through graph 290 | chain_fid: fidelit of the current path 291 | pathlen: length of the path we are trying to find 292 | 293 | Returns: 294 | new_list: a list of all the paths appended to cur_list 295 | """ 296 | 297 | new_list = [] 298 | for i in graph_dict[cur_node]: 299 | #no backtracking 300 | if (len(cur_list) > 1 and i in cur_list): 301 | continue 302 | 303 | if '%d_%d'%(cur_node,i) in err_map: 304 | edge_err = err_map['%d_%d'%(cur_node,i)] 305 | else: 306 | edge_err = err_map['%d_%d'%(i,cur_node)] 307 | 308 | #if the edge does not seem viable skip 309 | new_fid = chain_fid*(1-5/4*edge_err) 310 | if new_fid < (fid_cutoff*best_fid[0])**((len(cur_list)+1)/pathlen): 311 | continue 312 | #add the current node to the list 313 | cur_list_tmp = cur_list.copy() 314 | cur_list_tmp.append(i) 315 | 316 | #check if the list is long enough 317 | if len(cur_list_tmp) < pathlen: 318 | #if not then continue to add to it 319 | tmp_new_list = iter_neighbors(graph_dict, 320 | i, 321 | err_map, 322 | best_fid, 323 | fid_cutoff, 324 | cur_list_tmp, 325 | new_fid, 326 | pathlen) 327 | for tmp_node in tmp_new_list: 328 | if len(tmp_node)!=0: 329 | new_list.append(tmp_node) 330 | else: 331 | #append the path to the list 332 | if new_fid > best_fid[0]: 333 | best_fid[0]=new_fid 334 | new_list.append(cur_list_tmp) 335 | return new_list -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/Readme.md: -------------------------------------------------------------------------------- 1 | # Fast Benchmarking 2 | 3 | The file fast_bench.py is a command line invocation to run a mirror qv suite on device(s) `python fast_bench.py`. Requires a config file (default is `config.yaml`) of the form 4 | ``` 5 | hgp: X/X/X 6 | backends: [ibm_sherbrooke, ibm_brisbane, ibm_torino] 7 | nrand: 10 8 | depths: [4,6,8,10,12] 9 | he: True 10 | dd: True 11 | opt_level: 1 12 | trials: 10 13 | shots: 200 14 | ``` 15 | Generates an output yaml (timestamped) with the results. There are two types of circuits for the benchmarking. 16 | Mirror QV circuits which are all-to-all and HE (hardware-efficient) Mirror QV circuits which are layers of random SU(4) assuming nearest neighbor on a chain. 17 | 18 | The output can be turned into plots with `bench_analyze.py`, e.g. `python bench_analyze.py -f MQV_2024-04-27_06_19_32.yaml -v max --plot` will product a plot of all the maximum results over the sets from the listed file. Plots are generated as pdf. 19 | 20 | Similarly, the file `fast_layer_fidelity.py` is a command line invocation to run a single layer fidelity experiment on specified device(s) `python fast_layer_fidelity.py`. The qubit chain selected for this is the reported 100Q on qiskit for each device(s). This file also requires a config file (default is `config.yaml`) of the form (unless overwritten by command line arguments) 21 | ``` 22 | hgp: X/X/X 23 | backends: [ibm_fez] 24 | channel: 'ibm_quantum' or 'imb_cloud' 25 | ``` 26 | 27 | Circuits are based on the code written for https://arxiv.org/abs/2303.02108 which was based on the earlier work by Proctor et al [Phys. Rev. Lett. 129, 150502 (2022)](https://doi.org/10.48550/arXiv.2112.09853). 28 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | # from . import framework 26 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/bench_analyze.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Analyze the benchmarking results 14 | """ 15 | 16 | import argparse 17 | import numpy as np 18 | import qiskit_device_benchmarking.utilities.file_utils as fu 19 | import matplotlib.pyplot as plt 20 | 21 | 22 | def generate_plot(out_data, config_data, args): 23 | 24 | """Generate a plot from the fast_bench data 25 | 26 | Generates a plot of the name result_plot_.pdf where XXX is the 27 | current date and time 28 | 29 | Args: 30 | out_data: data from the run 31 | config_data: configuration data from the run 32 | args: arguments passed to the parser 33 | 34 | Returns: 35 | None 36 | """ 37 | 38 | markers = ['o','x','.','s','^','v','*'] 39 | 40 | for i, backend in enumerate(out_data): 41 | plt.semilogy(out_data[backend][0],out_data[backend][1], label=backend, marker=markers[np.mod(i,len(markers))]) 42 | 43 | plt.legend() 44 | plt.xlabel('Depth') 45 | plt.ylabel('Success Probability (%s over sets)'%args.value) 46 | plt.ylim(top=1.0) 47 | plt.title('Running Mirror - HE: %s, DD: %s, Trials: %d'%(config_data['he'], 48 | config_data['dd'], 49 | config_data['trials'])) 50 | plt.grid(True) 51 | plt.savefig('result_plot_%s.pdf'%fu.timestamp_name()) 52 | plt.close() 53 | 54 | return 55 | 56 | 57 | if __name__ == '__main__': 58 | 59 | """Analyze a benchmarking run from `fast_bench.py` 60 | 61 | Args: 62 | Call -h for arguments 63 | 64 | """ 65 | 66 | parser = argparse.ArgumentParser(description = 'Analyze the results of a ' 67 | + 'benchmarking run.') 68 | parser.add_argument('-f', '--files', help='Comma separated list of files') 69 | parser.add_argument('-b', '--backends', help='Comma separated list of ' 70 | + 'backends to plot. If empty plot all.') 71 | parser.add_argument('-v', '--value', help='Statistical value to compute', 72 | choices=['mean','median', 'max', 'min'], default='mean') 73 | parser.add_argument('--plot', help='Generate a plot', action='store_true') 74 | args = parser.parse_args() 75 | 76 | #import from results files and concatenate into a larger results 77 | results_dict = {} 78 | for file in args.files.split(','): 79 | results_dict_new = fu.import_yaml(file) 80 | 81 | for backend in results_dict_new: 82 | 83 | if backend not in results_dict: 84 | results_dict[backend] = results_dict_new[backend] 85 | elif backend!='config': 86 | #backend in the results dict but maybe not that depth 87 | for depth in results_dict_new[backend]: 88 | if depth in results_dict[backend]: 89 | err_str = 'Depth %s already exists for backend %s, duplicate results'%(depth,backend) 90 | raise ValueError(err_str) 91 | else: 92 | 93 | #check the metadata is the same 94 | #TO DO 95 | 96 | results_dict[backend][depth] = results_dict_new[backend][depth] 97 | 98 | 99 | if args.backends is not None: 100 | backends_filt = args.backends.split(',') 101 | else: 102 | backends_filt = [] 103 | 104 | out_data = {} 105 | 106 | 107 | for backend in results_dict: 108 | 109 | if len(backends_filt)>0: 110 | if backend not in backends_filt: 111 | continue 112 | 113 | if backend=='config': 114 | continue 115 | print(backend) 116 | depth_list = [] 117 | depth_list_i = [] 118 | 119 | out_data[backend] = [] 120 | 121 | for depth in results_dict[backend]: 122 | if depth=='job_ids': 123 | continue 124 | depth_list_i.append(depth) 125 | if args.value=='mean': 126 | depth_list.append(np.mean(results_dict[backend][depth]['mean'])) 127 | elif args.value=='max': 128 | depth_list.append(np.max(results_dict[backend][depth]['mean'])) 129 | elif args.value=='min': 130 | depth_list.append(np.min(results_dict[backend][depth]['mean'])) 131 | else: 132 | depth_list.append(np.median(results_dict[backend][depth]['mean'])) 133 | 134 | print('Backend %s'%backend) 135 | print('Depths: %s'%depth_list_i) 136 | 137 | if args.value=='mean': 138 | print('Means: %s'%depth_list) 139 | elif args.value=='max': 140 | print('Max: %s'%depth_list) 141 | elif args.value=='min': 142 | print('Min: %s'%depth_list) 143 | else: 144 | print('Median: %s'%depth_list) 145 | 146 | out_data[backend].append(depth_list_i) 147 | out_data[backend].append(depth_list) 148 | 149 | 150 | if args.plot: 151 | 152 | generate_plot(out_data, results_dict['config'], args) 153 | 154 | elif args.plot: 155 | print('Need to run mean/max also') 156 | 157 | 158 | 159 | 160 | 161 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/count_analyze.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Analyze the fast_count results 14 | """ 15 | 16 | import argparse 17 | import numpy as np 18 | import qiskit_device_benchmarking.utilities.file_utils as fu 19 | import matplotlib.pyplot as plt 20 | 21 | 22 | def generate_plot(out_data, degree_data, args): 23 | 24 | """Generate a bar plot of the qubit numbers of each backend 25 | 26 | Generates a plot of the name count_plot_.pdf where XXX is the 27 | current date and time 28 | 29 | Args: 30 | out_data: data from the run (count data) 31 | degree_data: average degree 32 | args: arguments passed to the parser 33 | 34 | Returns: 35 | None 36 | """ 37 | 38 | count_data = np.array([out_data[i] for i in out_data]) 39 | degree_data = np.array([degree_data[i] for i in out_data]) 40 | backend_lbls = np.array([i for i in out_data]) 41 | sortinds = np.argsort(count_data) 42 | 43 | plt.bar(backend_lbls[sortinds], count_data[sortinds]) 44 | plt.xticks(rotation=45, ha='right') 45 | 46 | ax1 = plt.gca() 47 | 48 | if args.degree: 49 | ax2 = ax1.twinx() 50 | ax2.plot(range(len(sortinds)),degree_data[sortinds],marker='x', color='black') 51 | ax2.set_ylabel('Average Degree') 52 | 53 | 54 | 55 | plt.xlabel('Backend') 56 | plt.grid(axis='y') 57 | ax1.set_ylabel('Largest Connected Region') 58 | plt.title('CHSH Test on Each Edge to Determine Qubit Count') 59 | plt.savefig('count_plot_%s.pdf'%fu.timestamp_name(),bbox_inches='tight') 60 | plt.close() 61 | 62 | return 63 | 64 | 65 | if __name__ == '__main__': 66 | 67 | """Analyze a benchmarking run from `fast_bench.py` 68 | 69 | Args: 70 | Call -h for arguments 71 | 72 | """ 73 | 74 | parser = argparse.ArgumentParser(description = 'Analyze the results of a ' 75 | + 'benchmarking run.') 76 | parser.add_argument('-f', '--files', help='Comma separated list of files') 77 | parser.add_argument('-b', '--backends', help='Comma separated list of ' 78 | + 'backends to plot. If empty plot all.') 79 | parser.add_argument('--plot', help='Generate a plot', action='store_true') 80 | parser.add_argument('--degree', help='Add degree to the plot', action='store_true') 81 | args = parser.parse_args() 82 | 83 | #import from results files and concatenate into a larger results 84 | results_dict = {} 85 | for file in args.files.split(','): 86 | results_dict_new = fu.import_yaml(file) 87 | 88 | for backend in results_dict_new: 89 | 90 | if backend not in results_dict: 91 | results_dict[backend] = results_dict_new[backend] 92 | elif backend!='config': 93 | #backend in the results dict but maybe not that depth 94 | 95 | err_str = 'Backend %s already exists, duplicate results'%(backend) 96 | raise ValueError(err_str) 97 | 98 | 99 | 100 | if args.backends is not None: 101 | backends_filt = args.backends.split(',') 102 | else: 103 | backends_filt = [] 104 | 105 | count_data = {} 106 | degree_data = {} 107 | 108 | 109 | for backend in results_dict: 110 | 111 | if len(backends_filt)>0: 112 | if backend not in backends_filt: 113 | continue 114 | 115 | if backend=='config': 116 | continue 117 | 118 | 119 | 120 | count_data[backend] = results_dict[backend]['largest_region'] 121 | degree_data[backend] = results_dict[backend]['average_degree'] 122 | print('Backend %s, Largest Connected Region: %d'%(backend,count_data[backend])) 123 | print('Backend %s, Average Degree: %f'%(backend,degree_data[backend])) 124 | 125 | 126 | 127 | if args.plot: 128 | 129 | generate_plot(count_data, degree_data, args) 130 | 131 | elif args.plot: 132 | print('Need to run mean/max also') 133 | 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/fast_bench.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Fast benchmark via mirror circuits 14 | """ 15 | 16 | import argparse 17 | import numpy as np 18 | import rustworkx as rx 19 | from qiskit_ibm_runtime import QiskitRuntimeService 20 | from qiskit.transpiler import Target, CouplingMap 21 | from qiskit_experiments.framework import (ParallelExperiment, BatchExperiment) 22 | 23 | 24 | import qiskit_device_benchmarking.utilities.file_utils as fu 25 | import qiskit_device_benchmarking.utilities.graph_utils as gu 26 | from qiskit_device_benchmarking.bench_code.mrb import MirrorQuantumVolume 27 | 28 | import warnings 29 | 30 | from qiskit.circuit import Gate 31 | xslow = Gate(name='xslow', num_qubits=1, params=[]) 32 | 33 | def run_bench(hgp, backends, depths=[8], trials=10, 34 | nshots=100, he=True, dd=True, opt_level=3, act_name=''): 35 | 36 | """Run a benchmarking test (mirror QV) on a set of devices 37 | 38 | Args: 39 | hgp: hub/group/project 40 | backends: list of backends 41 | depths: list of mirror depths (square circuits) 42 | trials: number of randomizations 43 | nshots: number of shots 44 | he: hardware efficient True/False (False is original QV circ all to all, 45 | True assumes a line) 46 | dd: add dynamic decoupling 47 | opt_level: optimization level of the transpiler 48 | act_name: account name to be passed to the runtime service 49 | 50 | Returns: 51 | flat list of lists of qubit chains 52 | """ 53 | 54 | warnings.filterwarnings("error", message=".*run.*", category=DeprecationWarning, append=False) 55 | 56 | #load the service 57 | service = QiskitRuntimeService(name=act_name) 58 | job_list = [] 59 | result_dict = {} 60 | result_dict['config'] = {'hgp': hgp, 'depths': depths, 61 | 'trials': trials, 62 | 'nshots': nshots, 63 | 'dd': dd, 64 | 'he': he, 65 | 'pregenerated': False, 66 | 'opt_level': opt_level, 67 | 'act_name': act_name} 68 | 69 | 70 | print('Running Fast Bench with options %s'%result_dict['config']) 71 | 72 | #run all the circuits 73 | for backend in backends: 74 | print('Loading backend %s'%backend) 75 | result_dict[backend] = {} 76 | backend_real=service.backend(backend,instance=hgp) 77 | mqv_exp_list_d = [] 78 | for depth in depths: 79 | 80 | print('Generating Depth %d Circuits for Backend %s'%(depth, backend)) 81 | 82 | result_dict[backend][depth] = {} 83 | 84 | 85 | #compute the sets for this 86 | #NOTE: I want to replace this with fixed sets from 87 | #a config file!!! 88 | nq = backend_real.configuration().n_qubits 89 | coupling_map = backend_real.configuration().coupling_map 90 | G = gu.build_sys_graph(nq, coupling_map) 91 | paths = rx.all_pairs_all_simple_paths(G,depth,depth) 92 | paths = gu.paths_flatten(paths) 93 | new_sets = gu.get_separated_sets(G,paths,min_sep=2,nsets=1) 94 | 95 | mqv_exp_list = [] 96 | 97 | result_dict[backend][depth]['sets'] = new_sets[0] 98 | 99 | 100 | #Construct mirror QV circuits on each parallel set 101 | for qset in new_sets[0]: 102 | 103 | #generate the circuits 104 | mqv_exp = MirrorQuantumVolume(qubits=qset,backend=backend_real,trials=trials, 105 | pauli_randomize=True, he = he) 106 | 107 | 108 | mqv_exp.analysis.set_options(plot=False, 109 | calc_hop=False, 110 | analyzed_quantity='Success Probability') 111 | 112 | #Do this so it won't compile outside the qubit sets 113 | cust_map = [] 114 | for i in coupling_map: 115 | if i[0] in qset and i[1] in qset: 116 | cust_map.append(i) 117 | 118 | basis_gates = backend_real.configuration().basis_gates 119 | if 'xslow' in basis_gates: 120 | basis_gates.remove('xslow') 121 | if 'rx' in basis_gates: 122 | basis_gates.remove('rx') 123 | if 'rzz' in basis_gates: 124 | basis_gates.remove('rzz') 125 | cust_target = Target.from_configuration(basis_gates = basis_gates, 126 | num_qubits=nq, 127 | coupling_map=CouplingMap(cust_map)) 128 | 129 | mqv_exp.set_transpile_options(target=cust_target, optimization_level=opt_level) 130 | mqv_exp_list.append(mqv_exp) 131 | 132 | 133 | new_exp_mqv = ParallelExperiment(mqv_exp_list, backend=backend_real, flatten_results=False) 134 | if dd: 135 | #this forces the circuits to have DD on them 136 | print('Transpiling and DD') 137 | for i in mqv_exp_list: 138 | i.dd_circuits() 139 | 140 | 141 | mqv_exp_list_d.append(new_exp_mqv) 142 | 143 | new_exp_mqv = BatchExperiment(mqv_exp_list_d, backend=backend_real, flatten_results=False) 144 | new_exp_mqv.set_run_options(shots=nshots) 145 | job_list.append(new_exp_mqv.run()) 146 | result_dict[backend]['job_ids'] = job_list[-1].job_ids 147 | 148 | 149 | #get the jobs back 150 | for i, backend in enumerate(backends): 151 | 152 | print('Loading results for backend: %s'%backend) 153 | 154 | expdata = job_list[i] 155 | try: 156 | expdata.block_for_results() 157 | except: 158 | #remove backend from results 159 | print('Error loading backend %s results'%backend) 160 | result_dict.pop(backend) 161 | continue 162 | 163 | for j, depth in enumerate(depths): 164 | 165 | result_dict[backend][depth]['data'] = [] 166 | result_dict[backend][depth]['mean'] = [] 167 | result_dict[backend][depth]['std'] = [] 168 | 169 | for k in range(len(result_dict[backend][depth]['sets'])): 170 | 171 | result_dict[backend][depth]['data'].append([float(probi) for probi in list(expdata.child_data()[j].child_data()[k].artifacts()[0].data)]) 172 | result_dict[backend][depth]['mean'].append(float(np.mean(result_dict[backend][depth]['data'][-1]))) 173 | result_dict[backend][depth]['std'].append(float(np.std(result_dict[backend][depth]['data'][-1]))) 174 | 175 | fu.export_yaml('MQV_' + fu.timestamp_name() + '.yaml', result_dict) 176 | 177 | 178 | if __name__ == '__main__': 179 | parser = argparse.ArgumentParser(description = 'Run fast benchmark of ' 180 | + 'devices using mirror. Specify a config ' 181 | +' yaml and override settings on the command line') 182 | parser.add_argument('-c', '--config', help='config file name', 183 | default='config.yaml') 184 | parser.add_argument('-b', '--backend', help='Specify backend and override ' 185 | + 'backend_group') 186 | parser.add_argument('-bg', '--backend_group', 187 | help='specify backend group in config file', 188 | default='backends') 189 | parser.add_argument('--hgp', help='specify hgp') 190 | parser.add_argument('--he', help='Hardware efficient', action='store_true') 191 | parser.add_argument('--name', help='Account name', default='') 192 | args = parser.parse_args() 193 | 194 | #import from config 195 | config_dict = fu.import_yaml(args.config) 196 | print('Config File Found') 197 | print(config_dict) 198 | 199 | #override from the command line 200 | if args.backend is not None: 201 | backends = [args.backend] 202 | else: 203 | backends = config_dict[args.backend_group] 204 | 205 | if args.hgp is not None: 206 | hgp = args.hgp 207 | else: 208 | hgp = config_dict['hgp'] 209 | 210 | if args.he is True: 211 | he = True 212 | else: 213 | he = config_dict['he'] 214 | 215 | opt_level = config_dict['opt_level'] 216 | dd = config_dict['dd'] 217 | depths = config_dict['depths'] 218 | trials = config_dict['trials'] 219 | nshots = config_dict['shots'] 220 | 221 | #print(hgp, backends, he, opt_level, dd, depths, trials, nshots) 222 | 223 | run_bench(hgp, backends, depths=depths, trials=trials, 224 | nshots=nshots, he=he, dd=dd, opt_level=opt_level, act_name=args.name) 225 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/fast_count.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Fast benchmark of qubit count using the CHSH inequality 14 | """ 15 | 16 | import argparse 17 | import rustworkx as rx 18 | import networkx as nx 19 | from qiskit_ibm_runtime import QiskitRuntimeService 20 | from qiskit_experiments.framework import (ParallelExperiment, BatchExperiment) 21 | 22 | import qiskit_device_benchmarking.utilities.file_utils as fu 23 | import qiskit_device_benchmarking.utilities.graph_utils as gu 24 | from qiskit_device_benchmarking.bench_code.bell import CHSHExperiment 25 | 26 | def run_count(hgp, backends, nshots=100, act_name=''): 27 | 28 | """Run a chsh inequality on a number of devices 29 | 30 | Args: 31 | hgp: hub/group/project 32 | backends: list of backends 33 | nshots: number of shots 34 | act_name: account name to be passed to the runtime service 35 | 36 | Returns: 37 | flat list of all the edges 38 | """ 39 | 40 | #load the service 41 | service = QiskitRuntimeService(name=act_name) 42 | job_list = [] 43 | result_dict = {} 44 | result_dict['config'] = {'hgp': hgp, 45 | 'nshots': nshots, 46 | 'act_name': act_name} 47 | 48 | 49 | print('Running Fast Count with options %s'%result_dict['config']) 50 | 51 | #run all the circuits 52 | for backend in backends: 53 | print('Loading backend %s'%backend) 54 | result_dict[backend] = {} 55 | backend_real=service.backend(backend,instance=hgp) 56 | chsh_exp_list_b = [] 57 | 58 | 59 | #compute the sets for this 60 | #NOTE: I want to replace this with fixed sets from 61 | #a config file!!! 62 | nq = backend_real.configuration().n_qubits 63 | coupling_map = backend_real.configuration().coupling_map 64 | #build a set of gates 65 | G = gu.build_sys_graph(nq, coupling_map) 66 | #get all length 2 paths in the device 67 | paths = rx.all_pairs_all_simple_paths(G,2,2) 68 | #flatten those paths into a list from the rustwork x iterator 69 | paths = gu.paths_flatten(paths) 70 | #remove permutations 71 | paths = gu.remove_permutations(paths) 72 | #convert to the coupling map of the device 73 | paths = gu.path_to_edges(paths,coupling_map) 74 | #make into separate sets 75 | sep_sets = gu.get_separated_sets(G, paths, min_sep=2) 76 | 77 | 78 | 79 | result_dict[backend]['sets'] = sep_sets 80 | 81 | 82 | #Construct mirror QV circuits on each parallel set 83 | for qsets in sep_sets: 84 | 85 | chsh_exp_list = [] 86 | 87 | for qset in qsets: 88 | 89 | #generate the circuits 90 | chsh_exp = CHSHExperiment(physical_qubits=qset,backend=backend_real) 91 | 92 | 93 | chsh_exp.set_transpile_options(optimization_level=1) 94 | chsh_exp_list.append(chsh_exp) 95 | 96 | 97 | new_exp_chsh = ParallelExperiment(chsh_exp_list, 98 | backend=backend_real, 99 | flatten_results=False) 100 | 101 | chsh_exp_list_b.append(new_exp_chsh) 102 | 103 | new_exp_chsh = BatchExperiment(chsh_exp_list_b, backend=backend_real, 104 | flatten_results=False) 105 | 106 | new_exp_chsh.set_run_options(shots=nshots) 107 | job_list.append(new_exp_chsh.run()) 108 | result_dict[backend]['job_ids'] = job_list[-1].job_ids 109 | 110 | 111 | #get the jobs back 112 | for i, backend in enumerate(backends): 113 | 114 | print('Loading results for backend: %s'%backend) 115 | 116 | expdata = job_list[i] 117 | try: 118 | expdata.block_for_results() 119 | except: 120 | #remove backend from results 121 | print('Error loading backend %s results'%backend) 122 | result_dict.pop(backend) 123 | continue 124 | 125 | result_dict[backend]['chsh_values'] = {} 126 | 127 | for qsets_i, qsets in enumerate(result_dict[backend]['sets']): 128 | 129 | for qset_i, qset in enumerate(qsets): 130 | 131 | 132 | anal_res = expdata.child_data()[qsets_i].child_data()[qset_i].analysis_results()[0] 133 | qedge = '%d_%d'%(anal_res.device_components[0].index,anal_res.device_components[1].index) 134 | result_dict[backend]['chsh_values'][qedge] = anal_res.value 135 | 136 | 137 | #calculate number of connected qubits 138 | G = nx.Graph() 139 | 140 | #add all possible edges 141 | for i in result_dict[backend]['chsh_values']: 142 | if result_dict[backend]['chsh_values'][i]>=2: 143 | G.add_edge(int(i.split('_')[0]),int(i.split('_')[1])) 144 | 145 | 146 | #catch error if the graph is empty 147 | try: 148 | largest_cc = max(nx.connected_components(G), key=len) 149 | 150 | #look at the average degree of the largest region 151 | avg_degree = 0 152 | for i in largest_cc: 153 | avg_degree += nx.degree(G,i) 154 | 155 | avg_degree = avg_degree/len(largest_cc) 156 | 157 | except: 158 | 159 | largest_cc = {} 160 | avg_degree = 1 161 | 162 | 163 | 164 | result_dict[backend]['largest_region'] = len(largest_cc) 165 | result_dict[backend]['average_degree'] = avg_degree 166 | 167 | 168 | 169 | 170 | 171 | fu.export_yaml('CHSH_' + fu.timestamp_name() + '.yaml', result_dict) 172 | 173 | 174 | if __name__ == '__main__': 175 | parser = argparse.ArgumentParser(description = 'Run fast benchmark of ' 176 | + 'qubit count using chsh. Specify a config ' 177 | +' yaml and override settings on the command line') 178 | parser.add_argument('-c', '--config', help='config file name', 179 | default='config.yaml') 180 | parser.add_argument('-b', '--backend', help='Specify backend and override ' 181 | + 'backend_group') 182 | parser.add_argument('-bg', '--backend_group', 183 | help='specify backend group in config file', 184 | default='backends') 185 | parser.add_argument('--hgp', help='specify hgp') 186 | parser.add_argument('--shots', help='specify number of shots') 187 | parser.add_argument('--name', help='Account name', default='') 188 | args = parser.parse_args() 189 | 190 | #import from config 191 | config_dict = fu.import_yaml(args.config) 192 | print('Config File Found') 193 | print(config_dict) 194 | 195 | #override from the command line 196 | if args.backend is not None: 197 | backends = [args.backend] 198 | else: 199 | backends = config_dict[args.backend_group] 200 | 201 | if args.hgp is not None: 202 | hgp = args.hgp 203 | else: 204 | hgp = config_dict['hgp'] 205 | 206 | if args.shots is not None: 207 | nshots = int(args.shots) 208 | else: 209 | nshots = config_dict['shots'] 210 | 211 | 212 | 213 | #print(hgp, backends, he, opt_level, dd, depths, trials, nshots) 214 | 215 | run_count(hgp, backends, nshots=nshots, act_name=args.name) 216 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/fast_layer_fidelity.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # This code is part of Qiskit. 3 | # 4 | # (C) Copyright IBM 2024. 5 | # 6 | # This code is licensed under the Apache License, Version 2.0. You may 7 | # obtain a copy of this license in the LICENSE.txt file in the root directory 8 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 9 | # 10 | # Any modifications or derivative works of this code must retain this 11 | # copyright notice, and modified files need to carry a notice indicating 12 | # that they have been altered from the originals. 13 | 14 | """ 15 | Fast Layer Fidelity on the reported 100Q qiskit chain 16 | """ 17 | 18 | import argparse 19 | import numpy as np 20 | import matplotlib.pyplot as plt 21 | import pandas as pd 22 | from typing import Dict, List, Tuple 23 | import datetime 24 | import os 25 | 26 | from qiskit_ibm_runtime import QiskitRuntimeService 27 | from qiskit_ibm_runtime.ibm_backend import IBMBackend 28 | from qiskit_experiments.framework.experiment_data import ExperimentData 29 | from qiskit_experiments.library.randomized_benchmarking import LayerFidelity 30 | from qiskit.visualization import plot_gate_map 31 | from qiskit.transpiler import CouplingMap 32 | 33 | import qiskit_device_benchmarking.utilities.layer_fidelity_utils as lfu 34 | import qiskit_device_benchmarking.utilities.graph_utils as gu 35 | import qiskit_device_benchmarking.utilities.file_utils as fu 36 | 37 | 38 | def run_fast_lf(backends: List[str], 39 | nseeds: int, 40 | seed: int, 41 | cliff_lengths: List[int], 42 | nshots: int, 43 | act_name: str, 44 | hgp: str): 45 | 46 | # Make a general experiment folder 47 | parent_path = os.path.join(os.getcwd(), 'layer_fidelity') 48 | try: 49 | print(f'Creating folder {parent_path}') 50 | os.mkdir(parent_path) 51 | except: 52 | pass 53 | print(f'Changing directory to {parent_path}') 54 | os.chdir(parent_path) 55 | 56 | # Load the service 57 | print('Loading service') 58 | service = QiskitRuntimeService(name=act_name) 59 | 60 | for backend_name in backends: 61 | # Make an experiment folder each backend 62 | time = datetime.datetime.now().strftime('%Y-%m-%d-%H.%M.%S') 63 | directory = f"{time}_{backend_name}_layer_fidelity" 64 | path = os.path.join(parent_path, directory) 65 | print(f'Creating folder {path}') 66 | os.mkdir(path) 67 | print(f'Changing directory to {path}') 68 | os.chdir(path) 69 | 70 | # Get the real backend 71 | print(f'Getting backend {backend_name}') 72 | backend = service.backend(backend_name, instance=hgp) 73 | 74 | # Get 100Q chain from qiskit 75 | qchain = lfu.get_lf_chain(backend, 100) 76 | print(f'100Q chain for {backend_name} is: ', qchain) 77 | 78 | # Run LF 79 | print(f'Running LF on {backend_name}') 80 | exp_data = lfu.run_lf_chain( 81 | chain=qchain, 82 | backend=backend, 83 | nseeds=nseeds, 84 | seed=seed, 85 | cliff_lengths=cliff_lengths, 86 | nshots=nshots) 87 | 88 | # Fit 2Q experiment data 89 | print(f'Retrieving experiment results from {backend_name}') 90 | exp_data.block_for_results() 91 | 92 | # Get LF and EPLG data per length 93 | results_per_length = lfu.reconstruct_lf_per_length(exp_data, qchain, backend) 94 | results_per_length.to_csv(f'{backend_name}_lf_eplg_data.csv', float_format='%.15f') 95 | 96 | # Retrieve raw and fitted RB data 97 | rb_data_df = lfu.get_rb_data(exp_data) 98 | print(f'Saving 2Q data from {backend_name}') 99 | rb_data_df.to_csv(f'{backend_name}_full_rb_data.csv', float_format='%.15f') 100 | 101 | # Plot LF and EPLG data 102 | print(f'Making plots for {backend_name}') 103 | lfu.make_lf_eplg_plots( 104 | backend=backend, 105 | exp_data=exp_data, 106 | chain=qchain, 107 | machine=backend_name 108 | ) 109 | 110 | 111 | if __name__ == '__main__': 112 | parser = argparse.ArgumentParser(description = 'Run fast layer fidelity ' 113 | + 'on reported qikist chain. Specify a config ' 114 | +' yaml and override settings on the command line') 115 | parser.add_argument('-c', '--config', help='config file name', 116 | default='config.yaml') 117 | parser.add_argument('-b', '--backend', help='Specify backend and override ' 118 | + 'backend_group') 119 | parser.add_argument('-bg', '--backend_group', 120 | help='specify backend group in config file', 121 | default='backends') 122 | parser.add_argument('--hgp', help='specify hgp / qiskit instance') 123 | parser.add_argument('--name', help='Account name', default='') 124 | parser.add_argument('--nseeds', help='number of seeds', default=6) 125 | parser.add_argument('--seed', help='seed to use', default=42) 126 | parser.add_argument('--nshots', help='number of shots', default=200) 127 | parser.add_argument( 128 | '--cliff_lengths', 129 | help='list of clifford lenghts [...]', 130 | default=[1, 10, 20, 30, 40, 60, 80, 100, 150, 200, 400], 131 | ) 132 | args = parser.parse_args() 133 | 134 | #import from config 135 | config_dict = fu.import_yaml(args.config) 136 | print('Config File Found') 137 | print(config_dict) 138 | 139 | #override from the command line 140 | if args.backend is not None: 141 | backends = [args.backend] 142 | else: 143 | backends = config_dict[args.backend_group] 144 | if args.hgp is not None: 145 | hgp = args.hgp 146 | else: 147 | hgp = config_dict['hgp'] 148 | # set default values unless otherwise instructed on config_dict 149 | if 'nseeds' in config_dict.keys(): 150 | nseeds = config_dict['nseeds'] 151 | else: 152 | nseeds = args.nseeds 153 | if 'seed' in config_dict.keys(): 154 | seed = config_dict['seed'] 155 | else: 156 | seed = args.seed 157 | if 'cliff_lengths' in config_dict.keys(): 158 | cliff_lengths = config_dict['cliff_lengths'] 159 | else: 160 | cliff_lengths = args.cliff_lengths 161 | if 'nshots' in config_dict.keys(): 162 | nshots = config_dict['nshots'] 163 | else: 164 | nshots = args.nshots 165 | if 'act_name' in config_dict.keys(): 166 | act_name = config_dict['act_name'] 167 | else: 168 | act_name = args.name 169 | 170 | # Run fast layer fidelity on the list of backends 171 | print('Running fast layer fidelity on backend(s)') 172 | run_fast_lf(backends, nseeds, seed, cliff_lengths, nshots, act_name, hgp) 173 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/gen_circuits.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Generate circuits for fast benchmark 14 | """ 15 | 16 | import argparse 17 | from qiskit.transpiler import Target, CouplingMap 18 | from qiskit import qpy 19 | 20 | from qiskit_device_benchmarking.bench_code.mrb import MirrorQuantumVolume 21 | 22 | def gen_bench_circuits(depths, he, output, opt_level, ntrials, twoqgate): 23 | 24 | """ Pregenerate and transpile circuits for fast_bench 25 | Will generate the circuits as identity mirrors. Pauli's at 26 | front and back added when running 27 | 28 | Args: 29 | depths: the depths to generate for 30 | he: hardware efficient 31 | output: root file name 32 | opt_level: optimization level of the transpilation 33 | ntrials: number of circuits to generate 34 | twoqgate: two qubit gate 35 | 36 | Returns: 37 | None 38 | """ 39 | 40 | print(depths) 41 | print(he) 42 | print(output) 43 | print(opt_level) 44 | print(ntrials) 45 | print(twoqgate) 46 | 47 | 48 | for depth in depths: 49 | 50 | print('Generating Depth %d Circuits'%(depth)) 51 | 52 | 53 | 54 | #Construct mirror QV circuits on each parallel set 55 | 56 | #generate the circuits 57 | mqv_exp = MirrorQuantumVolume(qubits=list(range(depth)), trials=ntrials, 58 | split_inverse=True,pauli_randomize=False, 59 | middle_pauli_randomize=False, calc_probabilities=False, 60 | he = he) 61 | 62 | 63 | 64 | #Do this so it won't compile outside the qubit sets 65 | cust_map = [[i,i+1] for i in range(depth-1)] 66 | 67 | cust_target = Target.from_configuration(basis_gates = ['rz','sx','x','id',twoqgate], 68 | num_qubits=depth, 69 | coupling_map=CouplingMap(cust_map)) 70 | 71 | mqv_exp.set_transpile_options(target=cust_target, optimization_level=opt_level) 72 | circs = mqv_exp._transpiled_circuits() 73 | 74 | ngates = 0 75 | ngates_sing = 0 76 | for circ in circs: 77 | gate_count = circ.count_ops() 78 | ngates += gate_count[twoqgate] 79 | for i in gate_count: 80 | if i!=twoqgate and i!='rz': 81 | ngates_sing += gate_count[i] 82 | 83 | 84 | print('Total number of 2Q gates per circuit average: %f'%(ngates/len(circs))) 85 | print('Total number of 1Q gates (no RZ) per circuit average: %f'%(ngates_sing/len(circs))) 86 | 87 | with open('%s_%d.qpy'%(output,depth), 'wb') as fd: 88 | qpy.dump(circs, fd) 89 | 90 | 91 | 92 | 93 | 94 | if __name__ == '__main__': 95 | parser = argparse.ArgumentParser(description = 'Generate circuits' 96 | + 'for fast benchmark and save to qpy' 97 | + ' optimized on a line') 98 | parser.add_argument('-d', '--depths', help='depths to generate as a list') 99 | parser.add_argument('--he', help='Hardware efficient', action='store_true') 100 | parser.add_argument('-o', '--output', help='Output filename') 101 | parser.add_argument('-ol', '--opt_level', help='Optimization Level', default=3) 102 | parser.add_argument('-n', '--ntrials', help='Number of circuits', default=10) 103 | parser.add_argument('-g', '--twoqgate', help='Two qubit gate', default='cz') 104 | args = parser.parse_args() 105 | 106 | 107 | gen_bench_circuits([int(i) for i in args.depths.split(',')], args.he, 108 | args.output, int(args.opt_level), 109 | int(args.ntrials), args.twoqgate) 110 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Linters 2 | black~=22.0 3 | pylint~=3.0.2 4 | astroid~=3.0.1 # Must be kept aligned to what pylint wants 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.17 2 | scipy>=1.4 3 | qiskit>=1.0 4 | qiskit-experiments>=0.6 5 | qiskit-ibm-runtime>=0.28 6 | matplotlib>=3.4 7 | rustworkx 8 | networkx 9 | pandas 10 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | "The Qiskit Device Benchmarking setup file." 14 | 15 | import os 16 | from setuptools import setup, find_packages 17 | 18 | with open("requirements.txt", encoding="utf-8") as f: 19 | REQUIREMENTS = f.read().splitlines() 20 | 21 | version_path = os.path.abspath( 22 | os.path.join(os.path.join(os.path.dirname(__file__), "qiskit_device_benchmarking"), "VERSION.txt") 23 | ) 24 | with open(version_path, "r", encoding="utf-8") as fd: 25 | version = fd.read().rstrip() 26 | 27 | # Read long description from README. 28 | README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md") 29 | with open(README_PATH, encoding="utf-8") as readme_file: 30 | README = readme_file.read() 31 | 32 | setup( 33 | name="qiskit-device-benchmarking", 34 | version=version, 35 | description="Software for benchmarking devices through qiskit", 36 | long_description=README, 37 | long_description_content_type="text/markdown", 38 | url="https://github.com/Qiskit-Community/qiskit-device-benchmarking", 39 | author="Qiskit Development Team", 40 | author_email="qiskit@us.ibm.com", 41 | license="Apache 2.0", 42 | classifiers=[ 43 | "Environment :: Console", 44 | "License :: OSI Approved :: Apache Software License", 45 | "Intended Audience :: Developers", 46 | "Intended Audience :: Science/Research", 47 | "Operating System :: Microsoft :: Windows", 48 | "Operating System :: MacOS", 49 | "Operating System :: POSIX :: Linux", 50 | "Programming Language :: Python :: 3 :: Only", 51 | "Programming Language :: Python :: 3.8", 52 | "Programming Language :: Python :: 3.9", 53 | "Programming Language :: Python :: 3.10", 54 | "Programming Language :: Python :: 3.11", 55 | "Programming Language :: Python :: 3.12", 56 | "Topic :: Scientific/Engineering", 57 | ], 58 | keywords="qiskit sdk quantum", 59 | packages=find_packages(exclude=["test*"]), 60 | install_requires=REQUIREMENTS, 61 | include_package_data=True, 62 | python_requires=">=3.8", 63 | project_urls={ 64 | "Bug Tracker": "https://github.com/Qiskit-Community/qiskit-device-benchmarking/issues", 65 | "Source Code": "https://github.com/Qiskit-Community/qiskit-device-benchmarking", 66 | }, 67 | zip_safe=False 68 | ) 69 | --------------------------------------------------------------------------------