├── qiskit_device_benchmarking ├── VERSION.txt ├── clops │ ├── __init__.py │ └── README.md ├── mirror_test │ ├── __init__.py │ ├── README.md │ ├── mirror_test.py │ ├── mirror_pub.py │ ├── get_optimal_path.py │ └── mirror_circuits.py ├── bench_code │ ├── mcm_rb │ │ ├── Readme.md │ │ └── __init__.py │ ├── dynamic_circuits_rb │ │ ├── Readme.md │ │ ├── __init__.py │ │ └── dc_rb_experiment.py │ ├── prb │ │ ├── Readme.md │ │ ├── __init__.py │ │ ├── pur_rb.py │ │ └── purrb_analysis.py │ ├── mrb │ │ ├── Readme.md │ │ ├── __init__.py │ │ ├── mirror_qv_analysis.py │ │ └── mirror_rb_analysis.py │ ├── __init__.py │ └── bell │ │ ├── __init__.py │ │ └── bell_experiment.py ├── utilities │ ├── __init__.py │ ├── file_utils.py │ ├── clifford_utils.py │ ├── run_grid.py │ └── graph_utils.py ├── verification │ ├── __init__.py │ ├── Readme.md │ ├── gen_circuits.py │ ├── count_analyze.py │ ├── bench_analyze.py │ ├── fast_layer_fidelity.py │ ├── fast_count.py │ └── fast_bench.py └── __init__.py ├── pyproject.toml ├── requirements-dev.txt ├── requirements.txt ├── .github └── workflows │ └── lint.yaml ├── setup.py ├── notebooks └── README.md ├── .gitignore ├── README.md ├── tests └── test_benchmarks.py └── LICENSE /qiskit_device_benchmarking/VERSION.txt: -------------------------------------------------------------------------------- 1 | 0.1.0 2 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/clops/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Linters 2 | black~=22.0 3 | pylint~=3.0.2 4 | astroid~=3.0.1 # Must be kept aligned to what pylint wants 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.17 2 | scipy>=1.4 3 | qiskit>=1.0 4 | qiskit-experiments>=0.6 5 | qiskit-ibm-runtime>=0.28 6 | matplotlib>=3.4 7 | rustworkx 8 | networkx 9 | pandas 10 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mcm_rb/Readme.md: -------------------------------------------------------------------------------- 1 | ## Mid-circuit measurement RB 2 | 3 | Code for running Mid-circuit measurement RB (notebook [here](../../../notebooks/mcm_rb.ipynb). Representative of the code used in [arXiv:2207.04836](https://arxiv.org/abs/2207.04836) and based on the qiskit-experiments framework. 4 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/dynamic_circuits_rb/Readme.md: -------------------------------------------------------------------------------- 1 | ## Dynamic circuits RB 2 | 3 | Code for running Dynamic circuits RB (notebook [here](../../../notebooks/dynamic_circuits_rb.ipynb). Representative of the code used in [arXiv:2408.07677](https://arxiv.org/abs/2408.07677) and based on the qiskit-experiments framework. 4 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/Readme.md: -------------------------------------------------------------------------------- 1 | ## Purity RB 2 | 3 | Code for running purity RB (notebook [here](https://github.com/qiskit-community/qiskit-device-benchmarking/blob/main/notebooks/device_rb.ipynb)). Purity RB appends post rotations 4 | to the RB sequences to calculate Tr(rho^2) as described in the supplement of [arXiv:2302.10881](https://arxiv.org/abs/2302.10881) and previously in the Ignis code base. 5 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/Readme.md: -------------------------------------------------------------------------------- 1 | # Mirror QV and RB 2 | 3 | Code written for https://arxiv.org/abs/2303.02108 and based on the earlier works T. Proctor, S. Seritan, K. Rudinger, E. Nielsen, R. Blume-Kohout, and K. Young, 4 | [Physical Review Letters 129, 150502 (2022)](https://doi.org/10.48550/arXiv.2112.09853) and T. Proctor, K. Rudinger, K. Young, E. Nielsen, and R. Blume-Kohout, [Nature Physics 18, 75 (2022)](https://doi.org/10.48550/arXiv.2008.11294). 5 | 6 | Code is based on the qiskit-experiments framework. 7 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mcm_rb/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | =============================================================== 3 | Mid-circuit measurement Randomized Benchmarking (:mod:`mcm_rb`) 4 | =============================================================== 5 | 6 | Classes 7 | ======= 8 | 9 | .. autosummary:: 10 | :toctree: ../stubs/ 11 | 12 | McmRB 13 | McmRBAnalysis 14 | """ 15 | 16 | from .mcm_rb_experiment import McmRB, McmRBAnalysis, SubDecayFit 17 | 18 | __all__ = [ 19 | McmRB, 20 | McmRBAnalysis, 21 | SubDecayFit, 22 | ] 23 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: [pull_request] 3 | jobs: 4 | build: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - name: Install Python 9 | uses: actions/setup-python@v5 10 | with: 11 | python-version: "3.13" 12 | - name: Install dependencies 13 | run: | 14 | python -m pip install --upgrade pip 15 | pip install ruff 16 | # Update output format to enable automatic inline annotations. 17 | - name: Run Ruff 18 | run: ruff check --output-format=github . 19 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/dynamic_circuits_rb/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | =============================================================== 3 | Dynamic Circuits Randomized Benchmarking (:mod:`dynamic_circuits_rb`) 4 | =============================================================== 5 | 6 | Classes 7 | ======= 8 | 9 | .. autosummary:: 10 | :toctree: ../stubs/ 11 | 12 | DynamicCircuitsRB 13 | DynamicCircuitsRBAnalysis 14 | """ 15 | 16 | from .dc_rb_experiment import DynamicCircuitsRB, DynamicCircuitsRBAnalysis 17 | 18 | __all__ = [ 19 | DynamicCircuitsRB, 20 | DynamicCircuitsRBAnalysis, 21 | ] 22 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Purity RB 14 | 15 | .. currentmodule:: qiskit_experiments_internal.library.quantum_volume 16 | 17 | Classes 18 | ======= 19 | .. autosummary:: 20 | ::undoc-members: 21 | 22 | PurityRB 23 | PurityRBAnalysis 24 | 25 | """ 26 | 27 | from .pur_rb import PurityRB 28 | from .purrb_analysis import PurityRBAnalysis 29 | 30 | __all__ = [ 31 | PurityRB, 32 | PurityRBAnalysis, 33 | ] 34 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | # from . import framework 26 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | # from . import framework 26 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | # from . import framework 26 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """ 14 | ============================================== 15 | Qiskit Device Benchmarking (:mod:`qiskit_device_benchmarking`) 16 | ============================================== 17 | 18 | .. currentmodule:: qiskit_device_benchmarking 19 | 20 | Qiskit Device Benchmarking is a collection of code files to help 21 | users run benchmarking experiments.. 22 | """ 23 | 24 | # Modules 25 | # from . import utilities 26 | # from . import bench_code 27 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/file_utils.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2023. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """File utilities for the device benchmarking.""" 14 | 15 | import yaml 16 | import datetime 17 | 18 | 19 | def import_yaml(fstr): 20 | with open(fstr, "r") as stream: 21 | data_imp = yaml.safe_load(stream) 22 | 23 | return data_imp 24 | 25 | 26 | def timestamp_name(): 27 | return datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S") 28 | 29 | 30 | def export_yaml(fstr, exp_data): 31 | with open(fstr, "w") as fout: 32 | yaml.dump(exp_data, fout, default_flow_style=None) 33 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/bell/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Variants of the Bell experiments 14 | 15 | .. currentmodule:: qiskit_experiments_internal.library.quantum_volume 16 | 17 | Classes 18 | ======= 19 | .. autosummary:: 20 | ::undoc-members: 21 | 22 | Bell 23 | 24 | """ 25 | 26 | from .bell_experiment import BellExperiment 27 | from .bell_experiment import BellAnalysis 28 | from .bell_experiment import CHSHAnalysis 29 | from .bell_experiment import CHSHExperiment 30 | 31 | __all__ = [ 32 | BellExperiment, 33 | BellAnalysis, 34 | CHSHAnalysis, 35 | CHSHExperiment, 36 | ] 37 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/README.md: -------------------------------------------------------------------------------- 1 | # Mirror Circuit Benchmark 2 | 3 | This is meant to offer a straightforward test of Estimator primitives and the ability to 4 | deliver accurate expectation values for utility-scale circuits. It uses Trotterized time- 5 | evolution of a 1D Ising chain as the test circuit, but with the circuit mirrored so that 6 | its effective action is equivalent to the identity. This makes it trivial to detect 7 | whether the returned expectation values are accurate. 8 | 9 | ## Usage example 10 | 11 | ```python 12 | from qiskit_ibm_runtime import QiskitRuntimeService 13 | from qiskit_device_benchmarking.mirror_test.mirror_test import submit_mirror_test, analyze_mirror_result 14 | 15 | service = QiskitRuntimeService(channel="ibm_quantum", 16 | instance="your-hub/group/project") 17 | 18 | backend = service.backend("your-favorite-ibm-quantum-computer") 19 | 20 | job = submit_mirror_test(backend, num_qubits=100, num_gates=5000) 21 | 22 | # wait for job to complete execution, then... 23 | result = job.result() 24 | analyze_mirror_result(result, accuracy_threshold=0.1, make_plots=True) 25 | ``` 26 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/__init__.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2022. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Variants on the quantum volume experiment 14 | 15 | .. currentmodule:: qiskit_experiments_internal.library.quantum_volume 16 | 17 | Classes 18 | ======= 19 | .. autosummary:: 20 | ::undoc-members: 21 | 22 | MirrorQuantumVolume 23 | MirrorQuantumVolumeAnalysis 24 | 25 | """ 26 | 27 | from .mirror_qv import MirrorQuantumVolume 28 | from .mirror_qv_analysis import MirrorQuantumVolumeAnalysis 29 | from .mirror_rb_experiment import MirrorRB 30 | from .mirror_rb_analysis import MirrorRBAnalysis 31 | 32 | __all__ = [ 33 | MirrorQuantumVolume, 34 | MirrorQuantumVolumeAnalysis, 35 | MirrorRB, 36 | MirrorRBAnalysis, 37 | ] 38 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/clifford_utils.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2023. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """Clifford utilities supplementing the ones in qiskit-experiments.""" 14 | 15 | from qiskit.quantum_info import Clifford 16 | from qiskit.circuit import QuantumCircuit 17 | 18 | 19 | def compute_target_bitstring(circuit: QuantumCircuit) -> str: 20 | """For a Pauli circuit C, which consists only of Clifford gates, compute C|0>. 21 | Args: 22 | circuit: A Pauli QuantumCircuit. 23 | Returns: 24 | Target bitstring. 25 | """ 26 | # target string has a 1 for each True in the stabilizer half of the phase vector 27 | target = "".join( 28 | ["1" if phase else "0" for phase in Clifford(circuit).stab_phase[::-1]] 29 | ) 30 | return target 31 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/Readme.md: -------------------------------------------------------------------------------- 1 | # Fast Benchmarking 2 | 3 | The file fast_bench.py is a command line invocation to run a mirror qv suite on device(s) `python fast_bench.py`. Requires a config file (default is `config.yaml`) of the form 4 | ``` 5 | hgp: X/X/X 6 | backends: [ibm_sherbrooke, ibm_brisbane, ibm_torino] 7 | nrand: 10 8 | depths: [4,6,8,10,12] 9 | he: True 10 | dd: True 11 | opt_level: 1 12 | trials: 10 13 | shots: 200 14 | ``` 15 | Generates an output yaml (timestamped) with the results. There are two types of circuits for the benchmarking. 16 | Mirror QV circuits which are all-to-all and HE (hardware-efficient) Mirror QV circuits which are layers of random SU(4) assuming nearest neighbor on a chain. 17 | 18 | The output can be turned into plots with `bench_analyze.py`, e.g. `python bench_analyze.py -f MQV_2024-04-27_06_19_32.yaml -v max --plot` will product a plot of all the maximum results over the sets from the listed file. Plots are generated as pdf. 19 | 20 | Similarly, the file `fast_layer_fidelity.py` is a command line invocation to run a single layer fidelity experiment on specified device(s) `python fast_layer_fidelity.py`. The qubit chain selected for this is the reported 100Q on qiskit for each device(s). This file also requires a config file (default is `config.yaml`) of the form (unless overwritten by command line arguments) 21 | ``` 22 | hgp: X/X/X 23 | backends: [ibm_fez] 24 | channel: 'ibm_quantum' or 'imb_cloud' 25 | ``` 26 | 27 | Circuits are based on the code written for https://arxiv.org/abs/2303.02108 which was based on the earlier work by Proctor et al [Phys. Rev. Lett. 129, 150502 (2022)](https://doi.org/10.48550/arXiv.2112.09853). 28 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | "The Qiskit Device Benchmarking setup file." 14 | 15 | import os 16 | from setuptools import setup, find_packages 17 | 18 | with open("requirements.txt", encoding="utf-8") as f: 19 | REQUIREMENTS = f.read().splitlines() 20 | 21 | version_path = os.path.abspath( 22 | os.path.join( 23 | os.path.join(os.path.dirname(__file__), "qiskit_device_benchmarking"), 24 | "VERSION.txt", 25 | ) 26 | ) 27 | with open(version_path, "r", encoding="utf-8") as fd: 28 | version = fd.read().rstrip() 29 | 30 | # Read long description from README. 31 | README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md") 32 | with open(README_PATH, encoding="utf-8") as readme_file: 33 | README = readme_file.read() 34 | 35 | setup( 36 | name="qiskit-device-benchmarking", 37 | version=version, 38 | description="Software for benchmarking devices through qiskit", 39 | long_description=README, 40 | long_description_content_type="text/markdown", 41 | url="https://github.com/Qiskit-Community/qiskit-device-benchmarking", 42 | author="Qiskit Development Team", 43 | author_email="qiskit@us.ibm.com", 44 | license="Apache 2.0", 45 | classifiers=[ 46 | "Environment :: Console", 47 | "License :: OSI Approved :: Apache Software License", 48 | "Intended Audience :: Developers", 49 | "Intended Audience :: Science/Research", 50 | "Operating System :: Microsoft :: Windows", 51 | "Operating System :: MacOS", 52 | "Operating System :: POSIX :: Linux", 53 | "Programming Language :: Python :: 3 :: Only", 54 | "Programming Language :: Python :: 3.8", 55 | "Programming Language :: Python :: 3.9", 56 | "Programming Language :: Python :: 3.10", 57 | "Programming Language :: Python :: 3.11", 58 | "Programming Language :: Python :: 3.12", 59 | "Topic :: Scientific/Engineering", 60 | ], 61 | keywords="qiskit sdk quantum", 62 | packages=find_packages(exclude=["test*"]), 63 | install_requires=REQUIREMENTS, 64 | include_package_data=True, 65 | python_requires=">=3.8", 66 | project_urls={ 67 | "Bug Tracker": "https://github.com/Qiskit-Community/qiskit-device-benchmarking/issues", 68 | "Source Code": "https://github.com/Qiskit-Community/qiskit-device-benchmarking", 69 | }, 70 | zip_safe=False, 71 | ) 72 | -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # Qiskit Device Benchmarking Notebooks 2 | 3 | This folder contains example notebooks for running benchmarks. 4 | 5 | - [Layer Fidelity](layer_fidelity.ipynb): Example notebook for running the layer fidelity using the generation code in qiskit-experiments. In this particular example the code uses the qiskit reported errors to guess the best chain of qubits for running the layer fidelity. However, it can be easily adjusted to run on an arbitrary chain. 6 | 7 | - [Layer Fidelity Single Chain](layer_fidelity_single_chain.ipynb): Example notebook for running a layer fidelity experiment on a single chain. The default chain is the one reported on qiskit (100Q long chain) but this notebook can be easily modified to run on any arbitrary chain. A nice feature of this notebook is that it allows the user to find the best subchain within a larger chain, that is, if layer fidelity is run on a 100Q long chain, the user can easily find the best x = [4,5,6,...,98,99,100] long subchain within that chain. This notebook uses helper functions from module `layer_fidelity_utils.py` and should be less code heavy. 8 | 9 | - [Bell State Tomography](bell_state_tomography.ipynb): Example notebook for running parallel state tomography using qiskit-experiments. 10 | 11 | - [Device RB](device_rb.ipynb): Example notebook for running full device 2Q RB and Purity RB. 12 | 13 | - [Device (Direct) RB](device_rb_w_lf.ipynb): Example notebook for running full device 2Q RB using layer fidelity (direct RB). This reduces the number of single qubit gates per 2Q gate in RB. Includes an example of running unitary layer RB which allows for RZZ gate benchmarking. 14 | 15 | - [System Characterization](system_char.ipynb): Notebook to do general (non-RB) characterization of a system. Runs Coherence (T1/T2), measurement fidelity, hellinger fidelities of Bell states produced with repeated two-qubit gates and ZZ. 16 | 17 | - [Extract Benchmarks](extract_benchmarks.ipynb): Example notebook for extracting and plotting benchmarks and properties from a list of devices. This information includes LF, EPLG, 2Q errors, 1Q errors, T1s, T2s, and readout errors, but can be easily modified to include any other properties. 18 | 19 | - [MCM RB](mcm_rb.ipynb): Example notebook for running Mid-circuit measurement RB experiment. 20 | 21 | - [Dynamic circuits RB](dynamic_circuits_rb.ipynb): Example notebook for running dynamic circuits RB experiment. 22 | 23 | - [Layer Fidelity Placement](layer_fidelity_placement.ipynb): Example notebook of using layer fidelity to build an updated error map of the device that is more reflective of layered circuits. Also gives an example of a heuristic algorithm for finding the best N-qubit chain based on the error map. 24 | 25 | - [Clifford Benchmarking](clifford_xeb_lf.ipynb): Notebook that runs Clifford benchmarking of a fully entangled brickwork circuit. First, the code uses layer fidelity to build an updated error map (similar to [Layer Fidelity Placement](layer_fidelity_placement.ipynb)), finds the best chain and runs the Clifford benchmark on that chain for the full circuit. Optionally, the user can run a non-Clifford version of the same circuit and analyze the output with XEB. 26 | 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | .DS_Store 163 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/clops/README.md: -------------------------------------------------------------------------------- 1 | # CLOPS Benchmark 2 | 3 | This benchmark measures Circuit Layer Operations Per Seconds (CLOPS) of 4 | parameterized utility scale hardware efficient circuits. 5 | CLOPS measures the steady state throughput of a large quantity of 6 | these parameterized circuits that are 7 | of width 100 qubits with 100 layers of gates. 8 | Each layer consists of two qubit gates across as many qubits 9 | as possible that can be done in parallel, followed by a single qubit 10 | gate(s) on every qubit to allow any arbitrary rotation, with those 11 | rotations being parameterized. 12 | Parameters are applied to the circuit to generate a large number of 13 | instantiated circuits to be executed on the quantum computer. It is 14 | up to the vendor on how to optimally execute these circuits for 15 | maximal throughput. 16 | 17 | CLOPS now supports the new `gen3-turbo` flag for execution path available 18 | on some of our devices. 19 | 20 | ## Example 21 | 22 | ```python 23 | from qiskit_ibm_runtime import QiskitRuntimeService 24 | from qiskit_device_benchmarking.clops.clops_benchmark import clops_benchmark 25 | 26 | service = QiskitRuntimeService() 27 | 28 | # Run clops with default settings (twirled circuits, 1000 circuits in run, 29 | # 100 wide by 100 layers, etc) Note this is done in a session and currently 30 | # takes about 10 minutes to run 31 | my_clops_run = clops_benchmark(service, "your-favorite-ibm-quantum-computer") 32 | 33 | # To run clops with the new new `gen3-turbo` path, you can specify the 34 | # execution path. For the new faster path you should increase the number 35 | # of circuits to 5,000 36 | my_clops_run = clops_benchmark(service, "machine supporting gen3-turbo", execution_path='gen3-turbo', num_circuits = 5000) 37 | 38 | # We can check the attributes of the benchmark run 39 | print(my_clops_run.job_attributes) 40 | {'backend_name': 'ibm_brisbane', 'width': 100, 'layers': 100, 'shots': 100, 'rep_delay': 0.00025, 'num_circuits': 1000, 'circuit_type': 'twirled', 'batch_size': None, 'pipelines': 1} 41 | 42 | # There is a standard qiskit job and we can check its status, job_id, etc 43 | print(my_clops_run.job.status()) 44 | QUEUED 45 | 46 | # The clops method will calculate the clops value for the run 47 | # Note this call will block until the result is ready 48 | print("Measured clops of", my_clops_run.job_attributes['backend_name'], "is", my_clops_run.clops()) 49 | Measured clops of ibm_brisbane is 30256 50 | ``` 51 | 52 | 53 | 54 | ## Variations 55 | 56 | 57 | The benchmark code provides several 58 | ways to measure CLOPS depending on the capability of the quantum computer. 59 | 60 | The "twirling" method uses the native parameterization of the Sampler 61 | primitive to parameterize the circuit, and optimal batching of the 62 | circuits is assumed to be done by the Sampler, freeing the user 63 | from having to optimize the batch size. The only requirement is 64 | that the total number of circuits executed needs to be chosen to 65 | get the system into a steady state to measure CLOPS. 66 | 67 | The "parameterized" method is similar, but instead sends an already 68 | parameterized circuit to the Sampler primitive, along with enough 69 | parameters to execute the specified number of circuits. Batching 70 | again is handled by the Sampler. This method requires larger bandwidth 71 | to send in all of the necessary parameters. Currently on IBM systems 72 | you will need to limit the number of circuits to approximately 160 to 73 | fit within API job input limits. 74 | 75 | The "instantiated" method (not yet implemented) is for systems that cannot natively 76 | handle parameterized circuits. In this case the circuit parameters 77 | are bound locally and then sent to the quantum computer for execution. 78 | This method requires the user to specify the desired size of each 79 | batch of circuits (so that they can be sent together the quantum computer) 80 | as well as the number of local parallel pipelines to bind parameters and 81 | create payloads in parallel. The user will need to tune both of these 82 | parameters to try and optimize performance of the system. This will 83 | tend to be much slower than on systems that natively support parameterized 84 | circuits 85 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Qiskit Device Benchmarking 2 | 3 | *Qiskit Device Benchmarking* is a respository for code to run various device level benchmarks through Qiskit. The repository endevours to accomplish several goals, including, but not limited to: 4 | - Code examples for users to replicate reported benchmarking metrics through the Qiskit backend. These will likely be notebooks to run code in the [Qiskit Experiments](https://github.com/Qiskit-Extensions/qiskit-experiments) repo, but some of the code may reside here. 5 | - More in-depth benchmarking code that was discussed in papers and has not been integrated into Qiskit Experiments. 6 | - Fast circuit validation tests. 7 | 8 | The repository is not intended to define a benchmark standard. This code base is not guaranteed to be stable and may have breaking changes. 9 | 10 | # Structure 11 | 12 | At the top level we have notebooks that gives users examples on how to run various benchmarks. 13 | - [Notebooks](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/notebooks): Jupyter notebooks for running benchmarks 14 | 15 | Under a top level folder `qiskit_device_benchmarking` we have repository code files that can be imported from python: 16 | - [Utilities](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/utilities): Benchmarking utility/helper code not found elsewhere in qiskit. If these prove useful they will be pushed into standard qiskit or qiskit-experiments. 17 | - [Benchmarking Code](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/bench_code): General folder for benchmarking code, which may include standalone code and extensions to qiskit-experiments for custom benchmarks. 18 | - [Verification](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/verification): Fast verification via mirror circuits using a command line program. 19 | 20 | # Paper Code 21 | 22 | For clarity here we provide links from various papers to the code in this repo. Not necessarily the exact code used in these manuscripts, but representative of what was run. 23 | 24 | - [Layer Fidelity](https://arxiv.org/abs/2311.05933): David C. McKay, Ian Hincks, Emily J. Pritchett, Malcolm Carroll, Luke C. G. Govia, Seth T. Merkel. Benchmarking Quantum Processor Performance at Scale (2023). [Code](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/notebooks/layer_fidelity.ipynb) 25 | - [Mirror QV](https://arxiv.org/abs/2303.02108): Mirko Amico, Helena Zhang, Petar Jurcevic, Lev S. Bishop, Paul Nation, Andrew Wack, David C. McKay. Defining Standard Strategies for Quantum Benchmarks (2023). [Code](https://github.com/qiskit-community/qiskit-device-benchmarking/tree/main/qiskit_device_benchmarking/bench_code/mrb) 26 | - [Mid-circuit measurement RB](https://arxiv.org/abs/2207.04836): Luke C. G. Govia, Petar Jurcevic, Christopher J. Wood, Naoki Kanazawa, Seth T. Merkel, David C. McKay. A randomized benchmarking suite for mid-circuit measurements (2022). [Code](notebooks/mcm_rb.ipynb) 27 | - [Dynamic circuits RB](https://arxiv.org/abs/2408.07677): Liran Shirizly, Luke C. G. Govia, David C. McKay. Randomized Benchmarking Protocol for Dynamic Circuits (2024). [Code](notebooks/dynamic_circuits_rb.ipynb) 28 | - [Clifford Benchmarking](https://arxiv.org/abs/2503.05943): Seth Merkel, Timothy Proctor, Samuele Ferracin, Jordan Hines, Samantha Barron, Luke C. G. Govia, David McKay. When Clifford benchmarks are sufficient; estimating application performance with scalable proxy circuits (2025). [Code](notebooks/clifford_xeb_lf.ipynb) 29 | 30 | # Installation 31 | 32 | ``` 33 | git clone git@github.com:qiskit-community/qiskit-device-benchmarking.git 34 | cd qiskit-device-benchmarking 35 | pip install . 36 | ``` 37 | 38 | # Run Tests 39 | 40 | ``` 41 | pip install pytest 42 | pytest 43 | ``` 44 | 45 | # Lint 46 | 47 | ``` 48 | pip install ruff 49 | ruff check # Lint files 50 | ruff format # Format files 51 | ``` 52 | 53 | # Contribution Guidelines 54 | 55 | Please open a github issue or pull request if you would like to contribute. 56 | 57 | # License 58 | 59 | [Apache License 2.0](LICENSE.txt) 60 | 61 | # Acknowledgements 62 | 63 | Portions of the code in this repository was developed via sponsorship by the Army Research Office ``QCISS Program'' under Grant Number W911NF-21-1-0002. 64 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/gen_circuits.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Generate circuits for fast benchmark 14 | """ 15 | 16 | import argparse 17 | from qiskit.transpiler import Target, CouplingMap 18 | from qiskit import qpy 19 | 20 | from qiskit_device_benchmarking.bench_code.mrb import MirrorQuantumVolume 21 | 22 | 23 | def gen_bench_circuits(depths, he, output, opt_level, ntrials, twoqgate): 24 | """Pregenerate and transpile circuits for fast_bench 25 | Will generate the circuits as identity mirrors. Pauli's at 26 | front and back added when running 27 | 28 | Args: 29 | depths: the depths to generate for 30 | he: hardware efficient 31 | output: root file name 32 | opt_level: optimization level of the transpilation 33 | ntrials: number of circuits to generate 34 | twoqgate: two qubit gate 35 | 36 | Returns: 37 | None 38 | """ 39 | 40 | print(depths) 41 | print(he) 42 | print(output) 43 | print(opt_level) 44 | print(ntrials) 45 | print(twoqgate) 46 | 47 | for depth in depths: 48 | print("Generating Depth %d Circuits" % (depth)) 49 | 50 | # Construct mirror QV circuits on each parallel set 51 | 52 | # generate the circuits 53 | mqv_exp = MirrorQuantumVolume( 54 | qubits=list(range(depth)), 55 | trials=ntrials, 56 | split_inverse=True, 57 | pauli_randomize=False, 58 | middle_pauli_randomize=False, 59 | calc_probabilities=False, 60 | he=he, 61 | ) 62 | 63 | # Do this so it won't compile outside the qubit sets 64 | cust_map = [[i, i + 1] for i in range(depth - 1)] 65 | 66 | cust_target = Target.from_configuration( 67 | basis_gates=["rz", "sx", "x", "id", twoqgate], 68 | num_qubits=depth, 69 | coupling_map=CouplingMap(cust_map), 70 | ) 71 | 72 | mqv_exp.set_transpile_options(target=cust_target, optimization_level=opt_level) 73 | circs = mqv_exp._transpiled_circuits() 74 | 75 | ngates = 0 76 | ngates_sing = 0 77 | for circ in circs: 78 | gate_count = circ.count_ops() 79 | ngates += gate_count[twoqgate] 80 | for i in gate_count: 81 | if i != twoqgate and i != "rz": 82 | ngates_sing += gate_count[i] 83 | 84 | print( 85 | "Total number of 2Q gates per circuit average: %f" % (ngates / len(circs)) 86 | ) 87 | print( 88 | "Total number of 1Q gates (no RZ) per circuit average: %f" 89 | % (ngates_sing / len(circs)) 90 | ) 91 | 92 | with open("%s_%d.qpy" % (output, depth), "wb") as fd: 93 | qpy.dump(circs, fd) 94 | 95 | 96 | if __name__ == "__main__": 97 | parser = argparse.ArgumentParser( 98 | description="Generate circuits" 99 | + "for fast benchmark and save to qpy" 100 | + " optimized on a line" 101 | ) 102 | parser.add_argument("-d", "--depths", help="depths to generate as a list") 103 | parser.add_argument("--he", help="Hardware efficient", action="store_true") 104 | parser.add_argument("-o", "--output", help="Output filename") 105 | parser.add_argument("-ol", "--opt_level", help="Optimization Level", default=3) 106 | parser.add_argument("-n", "--ntrials", help="Number of circuits", default=10) 107 | parser.add_argument("-g", "--twoqgate", help="Two qubit gate", default="cz") 108 | args = parser.parse_args() 109 | 110 | gen_bench_circuits( 111 | [int(i) for i in args.depths.split(",")], 112 | args.he, 113 | args.output, 114 | int(args.opt_level), 115 | int(args.ntrials), 116 | args.twoqgate, 117 | ) 118 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/count_analyze.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Analyze the fast_count results 14 | """ 15 | 16 | import argparse 17 | import numpy as np 18 | import qiskit_device_benchmarking.utilities.file_utils as fu 19 | import matplotlib.pyplot as plt 20 | 21 | 22 | def generate_plot(out_data, degree_data, args): 23 | """Generate a bar plot of the qubit numbers of each backend 24 | 25 | Generates a plot of the name count_plot_.pdf where XXX is the 26 | current date and time 27 | 28 | Args: 29 | out_data: data from the run (count data) 30 | degree_data: average degree 31 | args: arguments passed to the parser 32 | 33 | Returns: 34 | None 35 | """ 36 | 37 | count_data = np.array([out_data[i] for i in out_data]) 38 | degree_data = np.array([degree_data[i] for i in out_data]) 39 | backend_lbls = np.array([i for i in out_data]) 40 | sortinds = np.argsort(count_data) 41 | 42 | plt.bar(backend_lbls[sortinds], count_data[sortinds]) 43 | plt.xticks(rotation=45, ha="right") 44 | 45 | ax1 = plt.gca() 46 | 47 | if args.degree: 48 | ax2 = ax1.twinx() 49 | ax2.plot(range(len(sortinds)), degree_data[sortinds], marker="x", color="black") 50 | ax2.set_ylabel("Average Degree") 51 | 52 | plt.xlabel("Backend") 53 | plt.grid(axis="y") 54 | ax1.set_ylabel("Largest Connected Region") 55 | plt.title("CHSH Test on Each Edge to Determine Qubit Count") 56 | plt.savefig("count_plot_%s.pdf" % fu.timestamp_name(), bbox_inches="tight") 57 | plt.close() 58 | 59 | return 60 | 61 | 62 | if __name__ == "__main__": 63 | """Analyze a benchmarking run from `fast_bench.py` 64 | 65 | Args: 66 | Call -h for arguments 67 | 68 | """ 69 | 70 | parser = argparse.ArgumentParser( 71 | description="Analyze the results of a " + "benchmarking run." 72 | ) 73 | parser.add_argument("-f", "--files", help="Comma separated list of files") 74 | parser.add_argument( 75 | "-b", 76 | "--backends", 77 | help="Comma separated list of " + "backends to plot. If empty plot all.", 78 | ) 79 | parser.add_argument("--plot", help="Generate a plot", action="store_true") 80 | parser.add_argument("--degree", help="Add degree to the plot", action="store_true") 81 | args = parser.parse_args() 82 | 83 | # import from results files and concatenate into a larger results 84 | results_dict = {} 85 | for file in args.files.split(","): 86 | results_dict_new = fu.import_yaml(file) 87 | 88 | for backend in results_dict_new: 89 | if backend not in results_dict: 90 | results_dict[backend] = results_dict_new[backend] 91 | elif backend != "config": 92 | # backend in the results dict but maybe not that depth 93 | 94 | err_str = "Backend %s already exists, duplicate results" % (backend) 95 | raise ValueError(err_str) 96 | 97 | if args.backends is not None: 98 | backends_filt = args.backends.split(",") 99 | else: 100 | backends_filt = [] 101 | 102 | count_data = {} 103 | degree_data = {} 104 | 105 | for backend in results_dict: 106 | if len(backends_filt) > 0: 107 | if backend not in backends_filt: 108 | continue 109 | 110 | if backend == "config": 111 | continue 112 | 113 | count_data[backend] = results_dict[backend]["largest_region"] 114 | degree_data[backend] = results_dict[backend]["average_degree"] 115 | print( 116 | "Backend %s, Largest Connected Region: %d" % (backend, count_data[backend]) 117 | ) 118 | print("Backend %s, Average Degree: %f" % (backend, degree_data[backend])) 119 | 120 | if args.plot: 121 | generate_plot(count_data, degree_data, args) 122 | 123 | elif args.plot: 124 | print("Need to run mean/max also") 125 | -------------------------------------------------------------------------------- /tests/test_benchmarks.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from qiskit.providers.jobstatus import JobStatus 4 | from qiskit_experiments.framework import AnalysisStatus 5 | from qiskit_ibm_runtime.fake_provider import FakeFez 6 | 7 | from qiskit_device_benchmarking.clops.clops_benchmark import ( 8 | create_hardware_aware_circuit, 9 | ) 10 | from qiskit_device_benchmarking.bench_code.bell import CHSHExperiment 11 | 12 | # from qiskit_device_benchmarking.bench_code.dynamic_circuits_rb import DynamicCircuitsRB 13 | # from qiskit_device_benchmarking.bench_code.mcm_rb_experiment import McmRB 14 | from qiskit_device_benchmarking.bench_code.mrb.mirror_qv import MirrorQuantumVolume 15 | from qiskit_device_benchmarking.bench_code.mrb.mirror_rb_experiment import MirrorRB 16 | from qiskit_device_benchmarking.bench_code.prb.pur_rb import PurityRB 17 | 18 | 19 | @pytest.fixture 20 | def backend(): 21 | return FakeFez() 22 | 23 | 24 | def test_clops_hardware_aware_circuit(backend): 25 | qc, params = create_hardware_aware_circuit( 26 | width=100, layers=100, backend=backend, parameterized=False 27 | ) 28 | 29 | assert qc.num_qubits == 100 30 | assert qc.depth() == 100 31 | assert not params 32 | 33 | qc, params = create_hardware_aware_circuit( 34 | width=100, layers=100, backend=backend, parameterized=True 35 | ) 36 | 37 | assert qc.num_qubits == 100 38 | assert qc.depth() == 100 39 | assert params 40 | 41 | 42 | # def test_mirror_pub(backend): 43 | # pub_options = MirrorPubOptions() 44 | # pub_options.num_qubits = 100 45 | # pub_options.target_num_2q_gates = 4986 46 | # pub_options.theta = 0 47 | # pub_options.path_strategy = "eplg_chain" 48 | # 49 | # pubs = pub_options.get_pubs(backend) 50 | # 51 | # for circuit, obs, params in pubs: 52 | # assert circuit.num_qubits == 100 53 | # assert circuit.depth() == 4986 54 | 55 | 56 | def test_chsh_experiment(backend): 57 | exp = CHSHExperiment([0, 1]) 58 | exp_data = exp.run(backend=backend).block_for_results() 59 | s = exp_data.analysis_results("S", dataframe=True).iloc[0] 60 | assert exp_data.job_status() == JobStatus.DONE 61 | assert exp_data.analysis_status() == AnalysisStatus.DONE 62 | assert s.value 63 | 64 | 65 | # This test attempts to open matplotlib, which it should not be doing with this 66 | # code. This code needs to be resolved for the test to be re-introduced. 67 | # 68 | # def test_bell_experiment(backend): 69 | # layered_coupling_map = [[(0, 1), (1, 0)]] 70 | # exp = BellExperiment(layered_coupling_map, backend=backend) 71 | # exp_data = exp.run(backend=backend).block_for_results() 72 | # hf = exp_data.analysis_results(dataframe=True) 73 | # assert exp_data.job_status() == JobStatus.DONE 74 | # assert exp_data.analysis_status() == AnalysisStatus.DONE 75 | # 76 | # fidelity = hf.iloc[0].value.fidelity 77 | # assert fidelity 78 | 79 | 80 | # ImportError causes tests to fail, error needs to be resolved for tests 81 | # to be re-introduced. 82 | # 83 | # def test_dynamic_circuits_rb(): 84 | # backend = FakeFractionalBackend() 85 | # exp = DynamicCircuitsRB(physical_qubits=backend.coupling_map.physical_qubits, backend=backend) 86 | # exp_data = exp.run(backend=backend).block_for_results() 87 | # assert exp_data.job_status() == JobStatus.DONE 88 | # assert exp_data.analysis_status() == AnalysisStatus.DONE 89 | # 90 | # 91 | # def test_mcm_rb(backend): 92 | # exp = McmRB( 93 | # clif_qubit_sets=[(0, 1), (1, 0)], 94 | # meas_qubit_sets=[(0, 1), (1, 0)], 95 | # backend=backend 96 | # ) 97 | # exp_data = exp.run(backend=backend).block_for_results() 98 | # assert exp_data.job_status() == JobStatus.DONE 99 | # assert exp_data.analysis_status() == AnalysisStatus.DONE 100 | 101 | 102 | def test_mirror_qv(backend): 103 | exp = MirrorQuantumVolume(qubits=[0, 1], backend=backend) 104 | exp_data = exp.run(backend=backend).block_for_results() 105 | mean_success_probability = exp_data.analysis_results( 106 | "mean_success_probability", dataframe=True 107 | ).iloc[0] 108 | assert exp_data.job_status() == JobStatus.DONE 109 | assert exp_data.analysis_status() == AnalysisStatus.DONE 110 | assert mean_success_probability.value 111 | 112 | 113 | def test_mirror_rb(backend): 114 | exp = MirrorRB(physical_qubits=[0, 1, 2], lengths=[2], backend=backend) 115 | exp_data = exp.run(backend=backend).block_for_results() 116 | assert exp_data.job_status() == JobStatus.DONE 117 | assert exp_data.analysis_status() == AnalysisStatus.DONE 118 | 119 | 120 | def test_purity_rb(backend): 121 | exp = PurityRB(physical_qubits=[0, 1], lengths=[1], backend=backend) 122 | exp_data = exp.run(backend=backend).block_for_results() 123 | alpha = exp_data.analysis_results("alpha", dataframe=True).iloc[0] 124 | EPC = exp_data.analysis_results("EPC", dataframe=True).iloc[0] 125 | EPG_cz = exp_data.analysis_results("EPG_cz", dataframe=True).iloc[0] 126 | assert exp_data.job_status() == JobStatus.DONE 127 | assert exp_data.analysis_status() == AnalysisStatus.DONE 128 | assert alpha.value 129 | assert EPC.value 130 | assert EPG_cz.value 131 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/mirror_qv_analysis.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Quantum Volume analysis class. 14 | """ 15 | 16 | import numpy as np 17 | from uncertainties import unumpy as unp 18 | from uncertainties import ufloat 19 | 20 | from qiskit_experiments.exceptions import AnalysisError 21 | from qiskit_experiments.data_processing import DataProcessor 22 | from qiskit_experiments.framework import ( 23 | BaseAnalysis, 24 | AnalysisResultData, 25 | Options, 26 | ExperimentData, 27 | ) 28 | from qiskit_experiments.framework.containers import ArtifactData 29 | 30 | # import this data processor from rb_analysis 31 | from qiskit_device_benchmarking.bench_code.mrb.mirror_rb_analysis import ( 32 | _ComputeQuantities, 33 | ) 34 | 35 | 36 | class MirrorQuantumVolumeAnalysis(BaseAnalysis): 37 | r"""A class to analyze mirror quantum volume experiments. 38 | 39 | # section: overview 40 | Calculate the success (fraction of target measured) and polarization 41 | Optionally calcuate an effective HOP 42 | """ 43 | 44 | def _initialize(self, experiment_data: ExperimentData): 45 | """Initialize curve analysis by setting up the data processor for Mirror 46 | RB data. 47 | 48 | Args: 49 | experiment_data: Experiment data to analyze. 50 | """ 51 | 52 | target_bs = [] 53 | self.depth = None 54 | self.ntrials = 0 55 | for circ_result in experiment_data.data(): 56 | target_bs.append(circ_result["metadata"]["target_bitstring"]) 57 | trial_depth = circ_result["metadata"]["depth"] 58 | self.ntrials += 1 59 | if self.depth is None: 60 | self.depth = trial_depth 61 | elif trial_depth != self.depth: 62 | raise AnalysisError( 63 | "QuantumVolume circuits do not all have the same depth." 64 | ) 65 | 66 | num_qubits = self.depth 67 | 68 | self.set_options( 69 | data_processor=DataProcessor( 70 | input_key="counts", 71 | data_actions=[ 72 | _ComputeQuantities( 73 | analyzed_quantity=self.options.analyzed_quantity, 74 | num_qubits=num_qubits, 75 | target_bs=target_bs, 76 | ) 77 | ], 78 | ) 79 | ) 80 | 81 | @classmethod 82 | def _default_options(cls) -> Options: 83 | """Return default analysis options. 84 | 85 | Analysis Options: 86 | plot (bool): Set ``True`` to create figure for fit result. 87 | ax(AxesSubplot): Optional. A matplotlib axis object to draw. 88 | """ 89 | options = super()._default_options() 90 | options.plot = False 91 | options.ax = None 92 | options.calc_hop = True 93 | 94 | # By default, effective polarization is plotted (see arXiv:2112.09853). We can 95 | # also plot success probability or adjusted success probability (see PyGSTi). 96 | # Do this by setting options to "Success Probability" or "Adjusted Success Probability" 97 | options.analyzed_quantity = "Effective Polarization" 98 | 99 | options.set_validator( 100 | field="analyzed_quantity", 101 | validator_value=[ 102 | "Success Probability", 103 | "Adjusted Success Probability", 104 | "Effective Polarization", 105 | ], 106 | ) 107 | 108 | return options 109 | 110 | def _run_analysis( 111 | self, 112 | experiment_data: ExperimentData, 113 | ): 114 | results = [] 115 | artifacts = [] 116 | 117 | # Prepare for fitting 118 | self._initialize(experiment_data) 119 | 120 | processed = self.options.data_processor(experiment_data.data()) 121 | yvals = unp.nominal_values(processed).flatten() 122 | 123 | success_prob_result = AnalysisResultData( 124 | "mean_success_probability", 125 | value=ufloat(nominal_value=np.mean(yvals), std_dev=np.std(yvals)), 126 | quality="good", 127 | extra={ 128 | "depth": self.depth, 129 | "trials": self.ntrials, 130 | }, 131 | ) 132 | 133 | artifacts.append( 134 | ArtifactData( 135 | name="data", 136 | data=yvals, 137 | ) 138 | ) 139 | 140 | if self.options.plot: 141 | # figure out what to do 142 | figures = None 143 | else: 144 | figures = None 145 | 146 | results.append(success_prob_result) 147 | 148 | return results + artifacts, figures 149 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/bench_analyze.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Analyze the benchmarking results 14 | """ 15 | 16 | import argparse 17 | import numpy as np 18 | import qiskit_device_benchmarking.utilities.file_utils as fu 19 | import matplotlib.pyplot as plt 20 | 21 | 22 | def generate_plot(out_data, config_data, args): 23 | """Generate a plot from the fast_bench data 24 | 25 | Generates a plot of the name result_plot_.pdf where XXX is the 26 | current date and time 27 | 28 | Args: 29 | out_data: data from the run 30 | config_data: configuration data from the run 31 | args: arguments passed to the parser 32 | 33 | Returns: 34 | None 35 | """ 36 | 37 | markers = ["o", "x", ".", "s", "^", "v", "*"] 38 | 39 | for i, backend in enumerate(out_data): 40 | plt.semilogy( 41 | out_data[backend][0], 42 | out_data[backend][1], 43 | label=backend, 44 | marker=markers[np.mod(i, len(markers))], 45 | ) 46 | 47 | plt.legend() 48 | plt.xlabel("Depth") 49 | plt.ylabel("Success Probability (%s over sets)" % args.value) 50 | plt.ylim(top=1.0) 51 | plt.title( 52 | "Running Mirror - HE: %s, DD: %s, Trials: %d" 53 | % (config_data["he"], config_data["dd"], config_data["trials"]) 54 | ) 55 | plt.grid(True) 56 | plt.savefig("result_plot_%s.pdf" % fu.timestamp_name()) 57 | plt.close() 58 | 59 | return 60 | 61 | 62 | if __name__ == "__main__": 63 | """Analyze a benchmarking run from `fast_bench.py` 64 | 65 | Args: 66 | Call -h for arguments 67 | 68 | """ 69 | 70 | parser = argparse.ArgumentParser( 71 | description="Analyze the results of a " + "benchmarking run." 72 | ) 73 | parser.add_argument("-f", "--files", help="Comma separated list of files") 74 | parser.add_argument( 75 | "-b", 76 | "--backends", 77 | help="Comma separated list of " + "backends to plot. If empty plot all.", 78 | ) 79 | parser.add_argument( 80 | "-v", 81 | "--value", 82 | help="Statistical value to compute", 83 | choices=["mean", "median", "max", "min"], 84 | default="mean", 85 | ) 86 | parser.add_argument("--plot", help="Generate a plot", action="store_true") 87 | args = parser.parse_args() 88 | 89 | # import from results files and concatenate into a larger results 90 | results_dict = {} 91 | for file in args.files.split(","): 92 | results_dict_new = fu.import_yaml(file) 93 | 94 | for backend in results_dict_new: 95 | if backend not in results_dict: 96 | results_dict[backend] = results_dict_new[backend] 97 | elif backend != "config": 98 | # backend in the results dict but maybe not that depth 99 | for depth in results_dict_new[backend]: 100 | if depth in results_dict[backend]: 101 | err_str = ( 102 | "Depth %s already exists for backend %s, duplicate results" 103 | % (depth, backend) 104 | ) 105 | raise ValueError(err_str) 106 | else: 107 | # check the metadata is the same 108 | # TO DO 109 | 110 | results_dict[backend][depth] = results_dict_new[backend][depth] 111 | 112 | if args.backends is not None: 113 | backends_filt = args.backends.split(",") 114 | else: 115 | backends_filt = [] 116 | 117 | out_data = {} 118 | 119 | for backend in results_dict: 120 | if len(backends_filt) > 0: 121 | if backend not in backends_filt: 122 | continue 123 | 124 | if backend == "config": 125 | continue 126 | print(backend) 127 | depth_list = [] 128 | depth_list_i = [] 129 | 130 | out_data[backend] = [] 131 | 132 | for depth in results_dict[backend]: 133 | if depth == "job_ids": 134 | continue 135 | depth_list_i.append(depth) 136 | if args.value == "mean": 137 | depth_list.append(np.mean(results_dict[backend][depth]["mean"])) 138 | elif args.value == "max": 139 | depth_list.append(np.max(results_dict[backend][depth]["mean"])) 140 | elif args.value == "min": 141 | depth_list.append(np.min(results_dict[backend][depth]["mean"])) 142 | else: 143 | depth_list.append(np.median(results_dict[backend][depth]["mean"])) 144 | 145 | print("Backend %s" % backend) 146 | print("Depths: %s" % depth_list_i) 147 | 148 | if args.value == "mean": 149 | print("Means: %s" % depth_list) 150 | elif args.value == "max": 151 | print("Max: %s" % depth_list) 152 | elif args.value == "min": 153 | print("Min: %s" % depth_list) 154 | else: 155 | print("Median: %s" % depth_list) 156 | 157 | out_data[backend].append(depth_list_i) 158 | out_data[backend].append(depth_list) 159 | 160 | if args.plot: 161 | generate_plot(out_data, results_dict["config"], args) 162 | 163 | elif args.plot: 164 | print("Need to run mean/max also") 165 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/fast_layer_fidelity.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # This code is part of Qiskit. 3 | # 4 | # (C) Copyright IBM 2024. 5 | # 6 | # This code is licensed under the Apache License, Version 2.0. You may 7 | # obtain a copy of this license in the LICENSE.txt file in the root directory 8 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 9 | # 10 | # Any modifications or derivative works of this code must retain this 11 | # copyright notice, and modified files need to carry a notice indicating 12 | # that they have been altered from the originals. 13 | 14 | """ 15 | Fast Layer Fidelity on the reported 100Q qiskit chain 16 | """ 17 | 18 | import argparse 19 | from typing import List 20 | import datetime 21 | import os 22 | 23 | from qiskit_ibm_runtime import QiskitRuntimeService 24 | 25 | import qiskit_device_benchmarking.utilities.layer_fidelity_utils as lfu 26 | import qiskit_device_benchmarking.utilities.file_utils as fu 27 | 28 | 29 | def run_fast_lf( 30 | backends: List[str], 31 | nseeds: int, 32 | seed: int, 33 | cliff_lengths: List[int], 34 | nshots: int, 35 | act_name: str, 36 | hgp: str, 37 | ): 38 | # Make a general experiment folder 39 | parent_path = os.path.join(os.getcwd(), "layer_fidelity") 40 | try: 41 | print(f"Creating folder {parent_path}") 42 | os.mkdir(parent_path) 43 | except FileExistsError: 44 | pass 45 | print(f"Changing directory to {parent_path}") 46 | os.chdir(parent_path) 47 | 48 | # Load the service 49 | print("Loading service") 50 | service = QiskitRuntimeService(name=act_name) 51 | 52 | for backend_name in backends: 53 | # Make an experiment folder each backend 54 | time = datetime.datetime.now().strftime("%Y-%m-%d-%H.%M.%S") 55 | directory = f"{time}_{backend_name}_layer_fidelity" 56 | path = os.path.join(parent_path, directory) 57 | print(f"Creating folder {path}") 58 | os.mkdir(path) 59 | print(f"Changing directory to {path}") 60 | os.chdir(path) 61 | 62 | # Get the real backend 63 | print(f"Getting backend {backend_name}") 64 | backend = service.backend(backend_name, instance=hgp) 65 | 66 | # Get 100Q chain from qiskit 67 | qchain = lfu.get_lf_chain(backend, 100) 68 | print(f"100Q chain for {backend_name} is: ", qchain) 69 | 70 | # Run LF 71 | print(f"Running LF on {backend_name}") 72 | exp_data = lfu.run_lf_chain( 73 | chain=qchain, 74 | backend=backend, 75 | nseeds=nseeds, 76 | seed=seed, 77 | cliff_lengths=cliff_lengths, 78 | nshots=nshots, 79 | ) 80 | 81 | # Fit 2Q experiment data 82 | print(f"Retrieving experiment results from {backend_name}") 83 | exp_data.block_for_results() 84 | 85 | # Get LF and EPLG data per length 86 | results_per_length = lfu.reconstruct_lf_per_length(exp_data, qchain, backend) 87 | results_per_length.to_csv( 88 | f"{backend_name}_lf_eplg_data.csv", float_format="%.15f" 89 | ) 90 | 91 | # Retrieve raw and fitted RB data 92 | rb_data_df = lfu.get_rb_data(exp_data) 93 | print(f"Saving 2Q data from {backend_name}") 94 | rb_data_df.to_csv(f"{backend_name}_full_rb_data.csv", float_format="%.15f") 95 | 96 | # Plot LF and EPLG data 97 | print(f"Making plots for {backend_name}") 98 | lfu.make_lf_eplg_plots( 99 | backend=backend, exp_data=exp_data, chain=qchain, machine=backend_name 100 | ) 101 | 102 | 103 | if __name__ == "__main__": 104 | parser = argparse.ArgumentParser( 105 | description="Run fast layer fidelity " 106 | + "on reported qikist chain. Specify a config " 107 | + " yaml and override settings on the command line" 108 | ) 109 | parser.add_argument( 110 | "-c", "--config", help="config file name", default="config.yaml" 111 | ) 112 | parser.add_argument( 113 | "-b", "--backend", help="Specify backend and override " + "backend_group" 114 | ) 115 | parser.add_argument( 116 | "-bg", 117 | "--backend_group", 118 | help="specify backend group in config file", 119 | default="backends", 120 | ) 121 | parser.add_argument("--hgp", help="specify hgp / qiskit instance") 122 | parser.add_argument("--name", help="Account name", default="") 123 | parser.add_argument("--nseeds", help="number of seeds", default=6) 124 | parser.add_argument("--seed", help="seed to use", default=42) 125 | parser.add_argument("--nshots", help="number of shots", default=200) 126 | parser.add_argument( 127 | "--cliff_lengths", 128 | help="list of clifford lenghts [...]", 129 | default=[1, 10, 20, 30, 40, 60, 80, 100, 150, 200, 400], 130 | ) 131 | args = parser.parse_args() 132 | 133 | # import from config 134 | config_dict = fu.import_yaml(args.config) 135 | print("Config File Found") 136 | print(config_dict) 137 | 138 | # override from the command line 139 | if args.backend is not None: 140 | backends = [args.backend] 141 | else: 142 | backends = config_dict[args.backend_group] 143 | if args.hgp is not None: 144 | hgp = args.hgp 145 | else: 146 | hgp = config_dict["hgp"] 147 | # set default values unless otherwise instructed on config_dict 148 | if "nseeds" in config_dict.keys(): 149 | nseeds = config_dict["nseeds"] 150 | else: 151 | nseeds = args.nseeds 152 | if "seed" in config_dict.keys(): 153 | seed = config_dict["seed"] 154 | else: 155 | seed = args.seed 156 | if "cliff_lengths" in config_dict.keys(): 157 | cliff_lengths = config_dict["cliff_lengths"] 158 | else: 159 | cliff_lengths = args.cliff_lengths 160 | if "nshots" in config_dict.keys(): 161 | nshots = config_dict["nshots"] 162 | else: 163 | nshots = args.nshots 164 | if "act_name" in config_dict.keys(): 165 | act_name = config_dict["act_name"] 166 | else: 167 | act_name = args.name 168 | 169 | # Run fast layer fidelity on the list of backends 170 | print("Running fast layer fidelity on backend(s)") 171 | run_fast_lf(backends, nseeds, seed, cliff_lengths, nshots, act_name, hgp) 172 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/mirror_test.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | from typing import Optional, List 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | from scipy import stats 15 | from qiskit_ibm_runtime import ( 16 | EstimatorV2 as Estimator, 17 | EstimatorOptions, 18 | IBMBackend, 19 | RuntimeJobV2 as RuntimeJob, 20 | ) 21 | from qiskit_ibm_runtime.utils.noise_learner_result import LayerError 22 | from qiskit.primitives import PrimitiveResult 23 | 24 | from .mirror_pub import MirrorPubOptions 25 | 26 | 27 | def submit_mirror_test( 28 | backend: IBMBackend, 29 | num_gates: int = 4986, 30 | num_qubits: int = 100, 31 | theta: float = 0, 32 | path: Optional[tuple[int, ...]] = None, 33 | path_strategy: str = "eplg_chain", 34 | noise_model: Optional[List[LayerError]] = None, 35 | execution_path: Optional[str] = None, 36 | ) -> RuntimeJob: 37 | """ 38 | Constructs a mirror circuit test based upon a 1D Ising model simulation. The 1D model 39 | is executed on a line of qubits. The particular line can be selected automatically by 40 | passing `num_qubits` along with a `path_strategy`, or manually by specifying a `path` 41 | as a list of edges (q_i, q_j) in the connectivity graph of the backend. `num_gates` 42 | will control the number of distinct time-steps in the Trotter evolution of the model. 43 | The `theta` parameter controls the rotation angle of the layer of 1Q gates inserted 44 | between the 2Q gate layers. Non-zero values of `theta` will ensure entanglement growth 45 | in successive time steps. 46 | 47 | You can avoid re-learning noise models by passing in an already learned `noise_model` 48 | from a prior `NoiseLearner` execution. 49 | 50 | Args: 51 | backend: the IBM backend to submit the benchmark to. 52 | num_gates: proxy for number of Trotter time steps in the 1D Ising model circuit. 53 | num_qubits: determines the width of the benchmark circuit. 54 | theta: Controls rotation angle of 1Q gates in Trotter step. Non-zero values will 55 | spread entanglement. 56 | path: a list of edges (q_i, q_j) in the connectivity graph of the backend that 57 | defines the 1D chain of the Ising model 58 | path_strategy: one of "eplg_chain", "vf2_optimal", or None. "eplg_chain" will use 59 | the same chain as found by the EPLG benchmark. "vf2_optimal" will choose a 60 | chain using the same heuristics as the vf2 layout pass in Qiskit (also known 61 | as "mapomatic"). A value of None will simply select an appropriate length chain 62 | from the longest possible chain on the device. 63 | noise_model: A noise model from a prior NoiseLearner or Estimator job on the same 64 | layers as used in the benchmark circuit. 65 | execution_path: A value to pass to the experimental "execution_path" option of the 66 | Estimator. 67 | 68 | Returns: 69 | A RuntimeJob corresopnding to the Estimator query of the benchmark. 70 | """ 71 | pub_options = MirrorPubOptions() 72 | pub_options.num_qubits = num_qubits 73 | pub_options.target_num_2q_gates = num_gates 74 | pub_options.theta = theta 75 | if path is not None: 76 | pub_options.path = path 77 | pub_options.path_strategy = None 78 | else: 79 | pub_options.path_strategy = path_strategy 80 | 81 | pubs = pub_options.get_pubs(backend) 82 | 83 | options = EstimatorOptions() 84 | # turn on T-REX and ZNE 85 | options.resilience_level = 2 86 | 87 | # dynamical decoupling 88 | options.dynamical_decoupling.enable = True 89 | options.dynamical_decoupling.sequence_type = "XpXm" 90 | 91 | # twirling 92 | options.twirling.enable_gates = True 93 | options.twirling.num_randomizations = 1000 94 | options.twirling.shots_per_randomization = 64 95 | 96 | # PEA 97 | options.resilience.zne.amplifier = "pea" 98 | options.resilience.zne.noise_factors = [1, 1.6, 1.9, 2.8] 99 | 100 | if noise_model is not None: 101 | options.resilience.layer_noise_model = noise_model 102 | else: 103 | options.resilience.layer_noise_learning.shots_per_randomization = 64 104 | options.resilience.layer_noise_learning.num_randomizations = 50 105 | options.resilience.layer_noise_learning.layer_pair_depths = [0, 6, 16, 32, 64] 106 | 107 | # experimental options 108 | options.experimental = {"execution": {"fast_parametric_update": True}} 109 | if execution_path: 110 | options.experimental["execution_path"] = execution_path 111 | 112 | estimator = Estimator(backend, options=options) 113 | return estimator.run(pubs) 114 | 115 | 116 | def analyze_mirror_result( 117 | result: PrimitiveResult, accuracy_threshold: float = 0.1, make_plots: bool = False 118 | ): 119 | """ 120 | Analyze the outcome of a mirror test job. Pass the `PrimitiveResult` object produced 121 | by `job.result()` from the job generated by `submit_mirror_test`. This method will 122 | calculate statistics of the distribution of errors from the ideal expectation values. 123 | When `make_plots=True`, will plot the CDF of the errors. 124 | """ 125 | assert len(result) == 1, "Expected a length 1 PrimitiveResult" 126 | evs = result[0].data.evs 127 | evs_shape = evs.shape 128 | # we expect a shape of the form (1, 1, N) 129 | assert len(evs_shape) == 3, "Failed data shape check" 130 | assert evs_shape[0] == 1, "Failed data shape check" 131 | assert evs_shape[1] == 1, "Failed data shape check" 132 | 133 | evs = evs.flatten() 134 | N = len(evs) 135 | ev_errors = np.abs(1 - evs) 136 | ev_errors.sort() 137 | 138 | median_error = np.median(ev_errors) 139 | mean_error = np.average(ev_errors) 140 | print(f"Median error: {median_error}") 141 | print(f"Mean error: {mean_error}") 142 | 143 | # find fraction within the accuracy threshold of the ideal value 144 | fraction = np.argmax(ev_errors > accuracy_threshold) / N 145 | print( 146 | f"Fraction within {int(accuracy_threshold * 100)}% of ideal value: {fraction}" 147 | ) 148 | 149 | if make_plots: 150 | ev_cdf = stats.ecdf(ev_errors) 151 | xpts = np.logspace(-3, 0, 101) 152 | plt.semilogx(xpts, ev_cdf.cdf.evaluate(xpts)) 153 | plt.xlabel(r"$|\left< Z_i \right> - Exact|$") 154 | plt.xlim((xpts[0], xpts[-1])) 155 | plt.ylabel("Fraction of observables") 156 | plt.grid() 157 | 158 | return median_error, mean_error, fraction 159 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/run_grid.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Make a new error map by running layer fidelity on a grid of 14 | qubits 15 | """ 16 | 17 | import argparse 18 | import warnings 19 | from qiskit_ibm_runtime import QiskitRuntimeService 20 | 21 | import qiskit_device_benchmarking.utilities.file_utils as fu 22 | import qiskit_device_benchmarking.utilities.graph_utils as gu 23 | import qiskit_device_benchmarking.utilities.layer_fidelity_utils as lfu 24 | from qiskit_experiments.library.randomized_benchmarking import LayerFidelity 25 | 26 | def run_grid( 27 | backend_name, 28 | trials=3, 29 | nshots=200, 30 | act_name="", 31 | seed=42, 32 | write_file="" 33 | ): 34 | """Run a layer fidelity on a grid 35 | 36 | 37 | Args: 38 | backend_name: backend to run on 39 | trials: number of randomizations 40 | nshots: number of shots 41 | act_name: account name to be passed to the runtime service 42 | seed: randomization seed 43 | write_file: file to write the new error map to 44 | 45 | Returns: 46 | prints the dictionary to the cmd line 47 | """ 48 | 49 | warnings.filterwarnings( 50 | "error", message=".*run.*", category=DeprecationWarning, append=False 51 | ) 52 | 53 | # load the service 54 | service = QiskitRuntimeService(name=act_name) 55 | backend = service.backend(backend_name, use_fractional_gates=False) 56 | coupling_map = backend.coupling_map 57 | 58 | # Get two qubit gate 59 | if "ecr" in backend.configuration().basis_gates: 60 | twoq_gate = "ecr" 61 | elif "cz" in backend.configuration().basis_gates: 62 | twoq_gate = "cz" 63 | else: 64 | twoq_gate = "cx" 65 | 66 | # Get one qubit basis gates 67 | oneq_gates = [] 68 | for i in backend.configuration().basis_gates: 69 | # put in a case to handle rx and rzz 70 | if i.casefold() == "rx" or i.casefold() == "rzz": 71 | continue 72 | if i.casefold() != twoq_gate.casefold(): 73 | oneq_gates.append(i) 74 | 75 | # first get the grid chains, these are hard coded in the layer fidelity utilities module 76 | grid_chains = lfu.get_grids(backend) 77 | 78 | # there are two sets of chains that can be run in four disjoint experiments 79 | print("Decomposing grid chain into disjoint layers") 80 | layers = [[] for i in range(4)] 81 | grid_chain_flt = [[], []] 82 | for i in range(2): 83 | all_pairs = gu.path_to_edges(grid_chains[i], coupling_map) 84 | for j, pair_lst in enumerate(all_pairs): 85 | grid_chain_flt[i] += grid_chains[i][j] 86 | sub_pairs = [tuple(pair) for pair in pair_lst] # make this is a list of tuples 87 | layers[2 * i] += sub_pairs[0::2] 88 | layers[2 * i + 1] += sub_pairs[1::2] 89 | 90 | # Check that each list is in the coupling map and is disjoint 91 | for layer in layers: 92 | for qpair in layer: 93 | if tuple(qpair) not in coupling_map: 94 | raise ValueError(f"Gate on {qpair} does not exist") 95 | 96 | for k in layer: 97 | if k == qpair: 98 | continue 99 | 100 | if k[0] in qpair or k[1] in qpair: 101 | print(f"Warning: sets are not disjoint for gate {k} and {qpair}") 102 | 103 | 104 | # generate two experiments 105 | lfexps = [] 106 | for i in range(2): 107 | lfexps.append( 108 | LayerFidelity( 109 | physical_qubits=grid_chain_flt[i], 110 | two_qubit_layers=layers[2 * i : (2 * i + 2)], 111 | lengths=[1, 10, 20, 30, 40, 60, 80, 100, 150, 200, 400], 112 | backend=backend, 113 | num_samples=trials, 114 | seed=seed, 115 | two_qubit_gate=twoq_gate, 116 | one_qubit_basis_gates=oneq_gates, 117 | ) 118 | ) 119 | 120 | # set maximum number of circuits per job to avoid errors due to too large payload 121 | lfexps[i].experiment_options.max_circuits = 300 122 | 123 | # Run the LF experiment (generate circuits and submit the job) 124 | exp_data_lst = [] 125 | for i in range(2): 126 | exp_data_lst.append(lfexps[i].run(shots=nshots)) 127 | print( 128 | f"Run experiment: ID={exp_data_lst[i].experiment_id} with jobs {exp_data_lst[i].job_ids}]" 129 | ) 130 | 131 | # make an updated error map 132 | updated_err_dicts = [] 133 | err_dict = lfu.make_error_dict(backend, twoq_gate) 134 | 135 | for i in range(2): 136 | # get the results from the experiment 137 | df = exp_data_lst[i].analysis_results(dataframe=True) 138 | for j in range(2): 139 | updated_err_dicts.append(lfu.df_to_error_dict(df, layers[2 * i + j])) 140 | 141 | 142 | err_dict = lfu.update_error_dict(err_dict, updated_err_dicts) 143 | 144 | err_dict_ex = {'err_dict': err_dict, 'config': {}} 145 | 146 | err_dict_ex['config']['backend'] = backend_name 147 | err_dict_ex['config']['jobs'] = [exp_data_lst[i].job_ids for i in range(2)] 148 | err_dict_ex['config']['samples'] = trials 149 | err_dict_ex['config']['seed'] = seed 150 | err_dict_ex['config']['nshots'] = nshots 151 | 152 | fu.export_yaml(write_file, err_dict_ex) 153 | 154 | 155 | if __name__ == "__main__": 156 | parser = argparse.ArgumentParser( 157 | description="Run layer fidelity on a " 158 | + "prespecified grid of qubits to " 159 | + " generate a new error map yaml" 160 | ) 161 | parser.add_argument( 162 | "-b", "--backend", help="Specify backend", required=True 163 | ) 164 | parser.add_argument("--name", help="Account name", default="") 165 | parser.add_argument("--nshots", help="Number of shots to use", default=200) 166 | parser.add_argument("--trials", help="Number of randomizations", default=3) 167 | parser.add_argument("--seed", help="Seed to use", default=42) 168 | parser.add_argument("--file", help="file to write to") 169 | args = parser.parse_args() 170 | 171 | if args.file is not None: 172 | out_file = args.file 173 | else: 174 | out_file = "%s_grid_err_"%args.backend + fu.timestamp_name() + ".yaml" 175 | 176 | run_grid( 177 | backend_name=args.backend, 178 | trials=args.trials, 179 | nshots=args.nshots, 180 | act_name=args.name, 181 | seed=args.seed, 182 | write_file=out_file 183 | ) 184 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/mirror_pub.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | from functools import lru_cache 12 | from typing import Literal, Optional 13 | 14 | import networkx as nx 15 | import numpy as np 16 | from qiskit.primitives.containers.bindings_array import BindingsArray 17 | from qiskit.primitives.containers.estimator_pub import EstimatorPub 18 | from qiskit.primitives.containers.observables_array import ObservablesArray 19 | 20 | from .mirror_circuits import mirror_trotter_pub_1d 21 | from .get_optimal_path import get_optimal_path 22 | 23 | 24 | def mirror_pub( 25 | num_theta: int, 26 | backend, 27 | num_qubits: Optional[int] = None, 28 | num_steps: int = None, 29 | target_num_2q_gates: int = None, 30 | num_magnetization: int = 1, 31 | repeat_magnetization: bool = False, 32 | repeat_theta: bool = False, 33 | path: tuple[int] = None, 34 | theta: float = np.pi / 4, 35 | ) -> EstimatorPub: 36 | if num_steps is None and target_num_2q_gates is None: 37 | raise ValueError("Must specify either num steps or target 2q gates") 38 | if num_steps is not None and target_num_2q_gates is not None: 39 | raise ValueError("Can only specify either num steps or target 2q gates") 40 | 41 | if path is None: 42 | coupling_map = tuple(tuple(edge) for edge in backend.coupling_map) 43 | maximal_path = get_longest_path(coupling_map) 44 | 45 | if num_qubits is None: 46 | num_qubits = len(maximal_path) 47 | path = tuple(maximal_path[:num_qubits]) 48 | else: 49 | if len(path) < num_qubits: 50 | raise ValueError( 51 | f"Not enough qubits specified in path, {len(path)} < {num_qubits}" 52 | ) 53 | path = path[:num_qubits] 54 | 55 | if target_num_2q_gates is not None: 56 | num_steps = int(np.round(0.5 * target_num_2q_gates / num_qubits)) 57 | if num_steps is not None: 58 | target_num_2q_gates = int(np.round(2 * num_steps * num_qubits)) 59 | 60 | if num_steps == 0: 61 | num_steps = 1 62 | if target_num_2q_gates == 0: 63 | target_num_2q_gates = int(np.round(2 * num_steps * num_qubits)) 64 | 65 | if num_steps < 1 or target_num_2q_gates < 1: 66 | raise ValueError( 67 | f"Must have at least one step and 2q gate, got: {num_steps} steps and {target_num_2q_gates} 2q gates" 68 | ) 69 | 70 | if num_theta == 1: 71 | theta = (theta,) 72 | else: 73 | theta = tuple(np.linspace(0, theta, num=num_theta)) 74 | 75 | if num_magnetization == 1: 76 | magnetization = (1.0,) 77 | else: 78 | magnetization = tuple(np.linspace(0, 1, num=num_magnetization)) 79 | 80 | if repeat_magnetization: 81 | magnetization = (1.0,) * num_magnetization 82 | if repeat_theta: 83 | theta = (1.0,) * num_theta 84 | 85 | pub = mirror_trotter_pub_1d( 86 | num_steps=num_steps, 87 | path=path, 88 | backend=backend, 89 | theta_values=theta, 90 | magnetization_values=magnetization, 91 | ) 92 | 93 | if not (_gates := list(set(backend.basis_gates).intersection(["cx", "ecr", "cz"]))): 94 | raise ValueError("2q gate not recognized") 95 | else: 96 | gate_name = _gates[0] 97 | num_2q_gates = pub[0].count_ops().get(gate_name, 0) 98 | 99 | pub[0].metadata["circuit_depth"] = pub[0].depth(lambda instr: len(instr.qubits) > 1) 100 | pub[0].metadata["theta"] = theta 101 | pub[0].metadata["path"] = path 102 | pub[0].metadata["magnetization"] = magnetization 103 | pub[0].metadata["num_steps"] = num_steps 104 | pub[0].metadata["num_qubits"] = num_qubits 105 | pub[0].metadata["num_2q_gates"] = num_2q_gates 106 | pub[0].metadata["num_2q_gates_per_step_per_qubit"] = ( 107 | num_2q_gates / num_steps 108 | ) / num_qubits 109 | 110 | pars_from_circ = tuple(pub[0].parameters) 111 | if str(pars_from_circ[0]) != "A": 112 | raise ValueError("Assumed parameter order violated") 113 | 114 | pub = list(pub) 115 | pub[1] = ObservablesArray(pub[1]) 116 | pub[2] = BindingsArray({pars_from_circ: pub[2]}) 117 | pub = tuple(pub) 118 | pub = EstimatorPub(*pub) 119 | 120 | pub.circuit.metadata["bindings_array_shape"] = pub.parameter_values.shape 121 | pub.circuit.metadata["observables_array_shape"] = pub.observables.shape 122 | 123 | return pub 124 | 125 | 126 | @lru_cache() 127 | def get_longest_path(coupling_map): 128 | graph = nx.Graph(list(coupling_map)) 129 | maximal_path = max(nx.all_simple_paths(graph, 13, 113), key=len) 130 | return maximal_path 131 | 132 | 133 | class MirrorPubOptions: 134 | num_qubits: Optional[int] = None 135 | target_num_2q_gates: Optional[int] = 1000 136 | num_steps: Optional[int] = None 137 | num_magnetization: int = 1 138 | num_theta: int = 1 139 | theta: float = np.pi / 4 140 | repeat_theta: bool = False 141 | repeat_magnetization: bool = False 142 | num_pubs: int = 1 143 | path: Optional[tuple[int, ...]] = None 144 | path_strategy: Literal[None, "vf2_optimal", "eplg_chain"] = None 145 | 146 | def get_pubs(self, backend) -> list[EstimatorPub]: 147 | pub = mirror_pub( 148 | backend=backend, 149 | num_qubits=self.num_qubits, 150 | target_num_2q_gates=self.target_num_2q_gates, 151 | num_steps=self.num_steps, 152 | num_theta=self.num_theta, 153 | num_magnetization=self.num_magnetization, 154 | repeat_magnetization=self.repeat_magnetization, 155 | repeat_theta=self.repeat_theta, 156 | path=self.get_path(backend), 157 | theta=self.theta, 158 | ) 159 | 160 | return [pub] * self.num_pubs 161 | 162 | def get_path(self, backend): 163 | if self.path_strategy is None: 164 | if self.path: 165 | return self.path 166 | else: 167 | coupling_map = tuple(tuple(edge) for edge in backend.coupling_map) 168 | return get_longest_path(coupling_map)[: self.num_qubits] 169 | 170 | elif self.path_strategy == "eplg_chain": 171 | if self.num_qubits > 100: 172 | raise ValueError("ELPG chain only defined up to 100 qubits") 173 | eplg_chain = next( 174 | q_list["qubits"] 175 | for q_list in backend.properties().general_qlists 176 | if q_list["name"] == "lf_100" 177 | ) 178 | return eplg_chain[: self.num_qubits] 179 | 180 | elif self.path_strategy == "vf2_optimal": 181 | weights_dict = { 182 | "t1": 1.0, 183 | "t2": 0, 184 | "readout_error": 1.0, 185 | "faulty": 0, 186 | "gate_err_2q": 1.0, 187 | } 188 | 189 | path = get_optimal_path( 190 | weights_dict=weights_dict, 191 | backend=backend, 192 | num_qubits=self.num_qubits, 193 | time_limit=1e2, 194 | seed=42, 195 | max_trials=1000, 196 | ) 197 | 198 | return path 199 | else: 200 | raise ValueError(f"Unreconized path_strategy value {self.path_strategy}") 201 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/get_optimal_path.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | import numpy as np 12 | import pandas as pd 13 | from qiskit import QuantumCircuit 14 | from qiskit_ibm_runtime.models.exceptions import BackendPropertyError 15 | from qiskit.transpiler import AnalysisPass, CouplingMap, PassManager 16 | from qiskit.transpiler.passes import VF2Layout 17 | from qiskit.transpiler.passes.layout.vf2_utils import ErrorMap 18 | from qiskit_ibm_runtime import IBMBackend 19 | 20 | 21 | def build_error_dataframe(backend: IBMBackend) -> pd.DataFrame: 22 | data = [] 23 | props = backend.properties() 24 | gate_name_2q = list({"ecr", "cx", "cz"}.intersection(backend.basis_gates))[0] 25 | 26 | for q in range(backend.num_qubits): 27 | data.append( 28 | { 29 | "metric": "faulty", 30 | "value": np.nan if not props.is_qubit_operational(q) else 0, 31 | "qubits": (q, q), 32 | "sign": +1, 33 | } 34 | ) 35 | 36 | for q in range(backend.num_qubits): 37 | try: 38 | t1 = props.t1(q) 39 | except BackendPropertyError: 40 | t1 = np.nan 41 | data.append( 42 | { 43 | "metric": "t1", 44 | "value": t1, 45 | "qubits": (q, q), 46 | "sign": -1, 47 | } 48 | ) 49 | 50 | for q in range(backend.num_qubits): 51 | try: 52 | t2 = props.t2(q) 53 | except BackendPropertyError: 54 | t2 = np.nan 55 | data.append( 56 | { 57 | "metric": "t2", 58 | "value": t2, 59 | "qubits": (q, q), 60 | "sign": -1, 61 | } 62 | ) 63 | 64 | for q in range(backend.num_qubits): 65 | try: 66 | ro_err = props.readout_error(q) 67 | except BackendPropertyError: 68 | ro_err = np.nan 69 | data.append( 70 | { 71 | "metric": "readout_error", 72 | "value": ro_err, 73 | "qubits": (q, q), 74 | "sign": +1, 75 | } 76 | ) 77 | 78 | for edge in list(backend.coupling_map): 79 | try: 80 | gate_err_2q = props.gate_error(gate_name_2q, edge) 81 | if gate_err_2q == 1.0: 82 | gate_err_2q = np.nan 83 | except BackendPropertyError: 84 | gate_err_2q = np.nan 85 | data.append( 86 | { 87 | "metric": "gate_err_2q", 88 | "value": gate_err_2q, 89 | "qubits": edge, 90 | "sign": +1, 91 | } 92 | ) 93 | 94 | return pd.DataFrame(data) 95 | 96 | 97 | def compute_error_dataframe( 98 | df: pd.DataFrame, weights: dict[str, float] 99 | ) -> pd.DataFrame: 100 | df = df.copy() 101 | 102 | if set(df["metric"]) != set(weights.keys()): 103 | missing_keys = set(df["metric"]) - set(weights.keys()) 104 | raise ValueError(f"Missing weights for: {missing_keys}") 105 | 106 | # Drop any qubits which have missing properties 107 | bad_edges = list(df[df["value"].isna()]["qubits"]) 108 | bad_qubits = set([item for sublist in bad_edges for item in sublist]) 109 | df = df[ 110 | df["qubits"].map( 111 | lambda q: (q[0] not in bad_qubits) and (q[1] not in bad_qubits) 112 | ) 113 | ] 114 | 115 | # Sign indicates whether a metric should be small or large 116 | df["value"] *= df["sign"] 117 | df.drop(columns=["sign"], inplace=True) 118 | 119 | # Normalize values about the means 120 | df_mean = df.groupby(["metric"]).agg({"value": "mean"}) 121 | df = df.set_index(["metric", "qubits"]) - df_mean 122 | df_std = df.groupby(["metric"]).agg({"value": "std"}) 123 | df_std[df_std["value"] == 0.0] = 1.0 124 | df = df / df_std 125 | 126 | # Apply weights for properties 127 | df_weights = pd.DataFrame([{"metric": m, "value": v} for m, v in weights.items()]) 128 | df_weights = df_weights.set_index("metric") 129 | df = df * df_weights 130 | df.reset_index(inplace=True) 131 | 132 | ## Aggregate over metrics 133 | df = df.groupby(["qubits"]).agg({"value": "mean"}).reset_index() 134 | df["value"] -= df["value"].min() 135 | 136 | return df 137 | 138 | 139 | def build_error_map(backend, weights_dict, symmetrize: bool = True): 140 | df_props = build_error_dataframe(backend) 141 | df_err = compute_error_dataframe(df_props, weights_dict) 142 | 143 | HI_ERR_CONST = 1e3 144 | 145 | err_dict = {} 146 | for edge in list(backend.coupling_map): 147 | err_dict[edge] = HI_ERR_CONST 148 | err_dict[tuple(reversed(edge))] = HI_ERR_CONST 149 | for q in range(backend.num_qubits): 150 | err_dict[(q, q)] = HI_ERR_CONST 151 | 152 | for _, (qubits, err_rate) in df_err.iterrows(): 153 | err_dict[qubits] = err_rate 154 | err_dict[tuple(reversed(qubits))] = err_rate 155 | 156 | error_map = ErrorMap(len(err_dict)) 157 | for edge, err_rate in err_dict.items(): 158 | error_map.add_error(edge, err_rate) 159 | 160 | return df_err, error_map 161 | 162 | 163 | class VF2WeightedLayout(AnalysisPass): 164 | def __init__(self, weights_dict: dict[str, float], backend: IBMBackend): 165 | super().__init__() 166 | self._backend = backend 167 | self._weights_dict = weights_dict 168 | 169 | def run(self, dag): 170 | df, error_map = build_error_map(self._backend, self._weights_dict) 171 | self.property_set["vf2_avg_error_map"] = error_map 172 | self.property_set["vf2_dataframe"] = df 173 | return dag 174 | 175 | 176 | def dummy_path_circuit(num_qubits: int) -> QuantumCircuit: 177 | qc = QuantumCircuit(num_qubits) 178 | 179 | for i in range(0, num_qubits - 1, 2): 180 | qc.cz(i, i + 1) 181 | for i in range(1, num_qubits - 1, 2): 182 | qc.cz(i, i + 1) 183 | 184 | return qc 185 | 186 | 187 | def symmetrize_coupling_map(cm: CouplingMap) -> CouplingMap: 188 | edge_list = set() 189 | for edge in list(cm): 190 | edge_list |= {tuple(edge)} 191 | edge_list |= {tuple(reversed(edge))} 192 | return CouplingMap(edge_list) 193 | 194 | 195 | def get_optimal_path( 196 | weights_dict: dict[str, float], 197 | backend: IBMBackend, 198 | num_qubits: int, 199 | seed: int = 42, 200 | time_limit: float = 30.0, 201 | max_trials: int = -1, 202 | call_limit=None, 203 | ): 204 | pm = PassManager( 205 | [ 206 | VF2WeightedLayout(weights_dict=weights_dict, backend=backend), 207 | VF2Layout( 208 | strict_direction=False, 209 | seed=seed, 210 | coupling_map=symmetrize_coupling_map(backend.coupling_map), 211 | time_limit=time_limit, 212 | max_trials=max_trials, 213 | call_limit=call_limit, 214 | ), 215 | ] 216 | ) 217 | pm.run(dummy_path_circuit(num_qubits)) 218 | qubit_mapping = { 219 | k._index: v for k, v in pm.property_set["layout"].get_virtual_bits().items() 220 | } 221 | mapped_path = [qubit_mapping[i] for i in range(num_qubits)] 222 | return tuple(mapped_path) 223 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/fast_count.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Fast benchmark of qubit count using the CHSH inequality 14 | """ 15 | 16 | import argparse 17 | import rustworkx as rx 18 | import networkx as nx 19 | from qiskit_ibm_runtime import QiskitRuntimeService 20 | from qiskit_experiments.framework import ParallelExperiment, BatchExperiment 21 | 22 | import qiskit_device_benchmarking.utilities.file_utils as fu 23 | import qiskit_device_benchmarking.utilities.graph_utils as gu 24 | from qiskit_device_benchmarking.bench_code.bell import CHSHExperiment 25 | 26 | 27 | def run_count(hgp, backends, nshots=100, act_name=""): 28 | """Run a chsh inequality on a number of devices 29 | 30 | Args: 31 | hgp: hub/group/project 32 | backends: list of backends 33 | nshots: number of shots 34 | act_name: account name to be passed to the runtime service 35 | 36 | Returns: 37 | flat list of all the edges 38 | """ 39 | 40 | # load the service 41 | service = QiskitRuntimeService(name=act_name) 42 | job_list = [] 43 | result_dict = {} 44 | result_dict["config"] = {"hgp": hgp, "nshots": nshots, "act_name": act_name} 45 | 46 | print("Running Fast Count with options %s" % result_dict["config"]) 47 | 48 | # run all the circuits 49 | for backend in backends: 50 | print("Loading backend %s" % backend) 51 | result_dict[backend] = {} 52 | backend_real = service.backend(backend, instance=hgp) 53 | chsh_exp_list_b = [] 54 | 55 | # compute the sets for this 56 | # NOTE: I want to replace this with fixed sets from 57 | # a config file!!! 58 | nq = backend_real.configuration().n_qubits 59 | coupling_map = backend_real.configuration().coupling_map 60 | # build a set of gates 61 | G = gu.build_sys_graph(nq, coupling_map) 62 | # get all length 2 paths in the device 63 | paths = rx.all_pairs_all_simple_paths(G, 2, 2) 64 | # flatten those paths into a list from the rustwork x iterator 65 | paths = gu.paths_flatten(paths) 66 | # remove permutations 67 | paths = gu.remove_permutations(paths) 68 | # convert to the coupling map of the device 69 | paths = gu.path_to_edges(paths, coupling_map) 70 | # make into separate sets 71 | sep_sets = gu.get_separated_sets(G, paths, min_sep=2) 72 | 73 | result_dict[backend]["sets"] = sep_sets 74 | 75 | # Construct mirror QV circuits on each parallel set 76 | for qsets in sep_sets: 77 | chsh_exp_list = [] 78 | 79 | for qset in qsets: 80 | # generate the circuits 81 | chsh_exp = CHSHExperiment(physical_qubits=qset, backend=backend_real) 82 | 83 | chsh_exp.set_transpile_options(optimization_level=1) 84 | chsh_exp_list.append(chsh_exp) 85 | 86 | new_exp_chsh = ParallelExperiment( 87 | chsh_exp_list, backend=backend_real, flatten_results=False 88 | ) 89 | 90 | chsh_exp_list_b.append(new_exp_chsh) 91 | 92 | new_exp_chsh = BatchExperiment( 93 | chsh_exp_list_b, backend=backend_real, flatten_results=False 94 | ) 95 | 96 | new_exp_chsh.set_run_options(shots=nshots) 97 | job_list.append(new_exp_chsh.run()) 98 | result_dict[backend]["job_ids"] = job_list[-1].job_ids 99 | 100 | # get the jobs back 101 | for i, backend in enumerate(backends): 102 | print("Loading results for backend: %s" % backend) 103 | 104 | expdata = job_list[i] 105 | try: 106 | expdata.block_for_results() 107 | except Exception: 108 | # remove backend from results 109 | print("Error loading backend %s results" % backend) 110 | result_dict.pop(backend) 111 | continue 112 | 113 | result_dict[backend]["chsh_values"] = {} 114 | 115 | for qsets_i, qsets in enumerate(result_dict[backend]["sets"]): 116 | for qset_i, qset in enumerate(qsets): 117 | anal_res = ( 118 | expdata.child_data()[qsets_i] 119 | .child_data()[qset_i] 120 | .analysis_results()[0] 121 | ) 122 | qedge = "%d_%d" % ( 123 | anal_res.device_components[0].index, 124 | anal_res.device_components[1].index, 125 | ) 126 | result_dict[backend]["chsh_values"][qedge] = anal_res.value 127 | 128 | # calculate number of connected qubits 129 | G = nx.Graph() 130 | 131 | # add all possible edges 132 | for i in result_dict[backend]["chsh_values"]: 133 | if result_dict[backend]["chsh_values"][i] >= 2: 134 | G.add_edge(int(i.split("_")[0]), int(i.split("_")[1])) 135 | 136 | # catch error if the graph is empty 137 | try: 138 | largest_cc = max(nx.connected_components(G), key=len) 139 | 140 | # look at the average degree of the largest region 141 | avg_degree = 0 142 | for i in largest_cc: 143 | avg_degree += nx.degree(G, i) 144 | 145 | avg_degree = avg_degree / len(largest_cc) 146 | 147 | except Exception: 148 | largest_cc = {} 149 | avg_degree = 1 150 | 151 | result_dict[backend]["largest_region"] = len(largest_cc) 152 | result_dict[backend]["average_degree"] = avg_degree 153 | 154 | fu.export_yaml("CHSH_" + fu.timestamp_name() + ".yaml", result_dict) 155 | 156 | 157 | if __name__ == "__main__": 158 | parser = argparse.ArgumentParser( 159 | description="Run fast benchmark of " 160 | + "qubit count using chsh. Specify a config " 161 | + " yaml and override settings on the command line" 162 | ) 163 | parser.add_argument( 164 | "-c", "--config", help="config file name", default="config.yaml" 165 | ) 166 | parser.add_argument( 167 | "-b", "--backend", help="Specify backend and override " + "backend_group" 168 | ) 169 | parser.add_argument( 170 | "-bg", 171 | "--backend_group", 172 | help="specify backend group in config file", 173 | default="backends", 174 | ) 175 | parser.add_argument("--hgp", help="specify hgp") 176 | parser.add_argument("--shots", help="specify number of shots") 177 | parser.add_argument("--name", help="Account name", default="") 178 | args = parser.parse_args() 179 | 180 | # import from config 181 | config_dict = fu.import_yaml(args.config) 182 | print("Config File Found") 183 | print(config_dict) 184 | 185 | # override from the command line 186 | if args.backend is not None: 187 | backends = [args.backend] 188 | else: 189 | backends = config_dict[args.backend_group] 190 | 191 | if args.hgp is not None: 192 | hgp = args.hgp 193 | else: 194 | hgp = config_dict["hgp"] 195 | 196 | if args.shots is not None: 197 | nshots = int(args.shots) 198 | else: 199 | nshots = config_dict["shots"] 200 | 201 | # print(hgp, backends, he, opt_level, dd, depths, trials, nshots) 202 | 203 | run_count(hgp, backends, nshots=nshots, act_name=args.name) 204 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/pur_rb.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Purity RB Experiment class. 14 | """ 15 | 16 | import numpy as np 17 | from numpy.random import Generator 18 | from numpy.random.bit_generator import BitGenerator, SeedSequence 19 | from numbers import Integral 20 | from typing import Union, Iterable, Optional, List, Sequence 21 | 22 | from qiskit import QuantumCircuit 23 | from qiskit.quantum_info import Clifford 24 | from qiskit.providers.backend import Backend 25 | from qiskit.circuit import CircuitInstruction, Barrier 26 | from qiskit_experiments.library.randomized_benchmarking import StandardRB 27 | 28 | from .purrb_analysis import PurityRBAnalysis 29 | 30 | 31 | SequenceElementType = Union[Clifford, Integral, QuantumCircuit] 32 | 33 | 34 | class PurityRB(StandardRB): 35 | """An experiment to characterize the error rate of a gate set on a device. 36 | using purity RB 37 | 38 | # section: overview 39 | 40 | Randomized Benchmarking (RB) is an efficient and robust method 41 | for estimating the average error rate of a set of quantum gate operations. 42 | See `Qiskit Textbook 43 | `_ 44 | for an explanation on the RB method. 45 | 46 | A standard RB experiment generates sequences of random Cliffords 47 | such that the unitary computed by the sequences is the identity. 48 | After running the sequences on a backend, it calculates the probabilities to get back to 49 | the ground state, fits an exponentially decaying curve, and estimates 50 | the Error Per Clifford (EPC), as described in Refs. [1, 2]. 51 | 52 | .. note:: 53 | In 0.5.0, the default value of ``optimization_level`` in ``transpile_options`` changed 54 | from ``0`` to ``1`` for RB experiments. That may result in shorter RB circuits 55 | hence slower decay curves than before. 56 | 57 | # section: analysis_ref 58 | :class:`RBAnalysis` 59 | 60 | # section: manual 61 | :doc:`/manuals/verification/randomized_benchmarking` 62 | 63 | # section: reference 64 | .. ref_arxiv:: 1 1009.3639 65 | .. ref_arxiv:: 2 1109.6887 66 | """ 67 | 68 | def __init__( 69 | self, 70 | physical_qubits: Sequence[int], 71 | lengths: Iterable[int], 72 | backend: Optional[Backend] = None, 73 | num_samples: int = 3, 74 | seed: Optional[Union[int, SeedSequence, BitGenerator, Generator]] = None, 75 | full_sampling: Optional[bool] = False, 76 | ): 77 | """Initialize a standard randomized benchmarking experiment. 78 | 79 | Args: 80 | physical_qubits: List of physical qubits for the experiment. 81 | lengths: A list of RB sequences lengths. 82 | backend: The backend to run the experiment on. 83 | num_samples: Number of samples to generate for each sequence length. 84 | seed: Optional, seed used to initialize ``numpy.random.default_rng``. 85 | when generating circuits. The ``default_rng`` will be initialized 86 | with this seed value every time :meth:`circuits` is called. 87 | full_sampling: If True all Cliffords are independently sampled for all lengths. 88 | If False for sample of lengths longer sequences are constructed 89 | by appending additional samples to shorter sequences. 90 | The default is False. 91 | 92 | Raises: 93 | QiskitError: If any invalid argument is supplied. 94 | """ 95 | # Initialize base experiment (RB) 96 | super().__init__( 97 | physical_qubits, lengths, backend, num_samples, seed, full_sampling 98 | ) 99 | 100 | # override the analysis 101 | self.analysis = PurityRBAnalysis() 102 | self.analysis.set_options(outcome="0" * self.num_qubits) 103 | self.analysis.plotter.set_figure_options( 104 | xlabel="Clifford Length", 105 | ylabel="Purity", 106 | ) 107 | 108 | def circuits(self) -> List[QuantumCircuit]: 109 | """Return a list of RB circuits. 110 | 111 | Returns: 112 | A list of :class:`QuantumCircuit`. 113 | """ 114 | # Sample random Clifford sequences 115 | sequences = self._sample_sequences() 116 | # Convert each sequence into circuit and append the inverse to the end. 117 | # and the post-rotations 118 | circuits = self._sequences_to_circuits(sequences) 119 | # Add metadata for each circuit 120 | # trial links all from the same trial 121 | # needed for post processing the purity RB 122 | for circ_i, circ in enumerate(circuits): 123 | circ.metadata = { 124 | "xval": len(sequences[int(circ_i / 3**self.num_qubits)]), 125 | "trial": int(circ_i / 3**self.num_qubits), 126 | "group": "Clifford", 127 | } 128 | return circuits 129 | 130 | def _sequences_to_circuits( 131 | self, sequences: List[Sequence[SequenceElementType]] 132 | ) -> List[QuantumCircuit]: 133 | """Convert an RB sequence into circuit and append the inverse to the end and 134 | then the post rotations for purity RB 135 | 136 | Returns: 137 | A list of purity RB circuits. 138 | """ 139 | synthesis_opts = self._get_synthesis_options() 140 | 141 | # post rotations as cliffords 142 | post_rot = [] 143 | for i in range(3**self.num_qubits): 144 | ##find clifford 145 | qc = QuantumCircuit(self.num_qubits) 146 | for j in range(self.num_qubits): 147 | qg_ind = np.mod(int(i / 3**j), 3) 148 | if qg_ind == 1: 149 | qc.sx(j) 150 | elif qg_ind == 2: 151 | qc.sdg(j) 152 | qc.sx(j) 153 | qc.s(j) 154 | 155 | post_rot.append(self._to_instruction(Clifford(qc), synthesis_opts)) 156 | 157 | # Circuit generation 158 | circuits = [] 159 | for i, seq in enumerate(sequences): 160 | if ( 161 | self.experiment_options.full_sampling 162 | or i % len(self.experiment_options.lengths) == 0 163 | ): 164 | prev_elem, prev_seq = self._StandardRB__identity_clifford(), [] 165 | 166 | circ = QuantumCircuit(self.num_qubits) 167 | for elem in seq: 168 | circ.append(self._to_instruction(elem, synthesis_opts), circ.qubits) 169 | circ._append(CircuitInstruction(Barrier(self.num_qubits), circ.qubits)) 170 | 171 | # Compute inverse, compute only the difference from the previous shorter sequence 172 | prev_elem = self._StandardRB__compose_clifford_seq( 173 | prev_elem, seq[len(prev_seq) :] 174 | ) 175 | prev_seq = seq 176 | inv = self._StandardRB__adjoint_clifford(prev_elem) 177 | 178 | circ.append(self._to_instruction(inv, synthesis_opts), circ.qubits) 179 | 180 | # copy the circuit and apply post rotations 181 | for j in range(3**self.num_qubits): 182 | circ2 = circ.copy() 183 | circ2.append(post_rot[j], circ.qubits) 184 | circ2.measure_all() # includes insertion of the barrier before measurement 185 | circuits.append(circ2) 186 | 187 | return circuits 188 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/verification/fast_bench.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Fast benchmark via mirror circuits 14 | """ 15 | 16 | import argparse 17 | import numpy as np 18 | import rustworkx as rx 19 | from qiskit_ibm_runtime import QiskitRuntimeService 20 | from qiskit.transpiler import Target, CouplingMap 21 | from qiskit_experiments.framework import ParallelExperiment, BatchExperiment 22 | 23 | 24 | import qiskit_device_benchmarking.utilities.file_utils as fu 25 | import qiskit_device_benchmarking.utilities.graph_utils as gu 26 | from qiskit_device_benchmarking.bench_code.mrb import MirrorQuantumVolume 27 | 28 | import warnings 29 | 30 | from qiskit.circuit import Gate 31 | 32 | xslow = Gate(name="xslow", num_qubits=1, params=[]) 33 | 34 | 35 | def run_bench( 36 | hgp, 37 | backends, 38 | depths=[8], 39 | trials=10, 40 | nshots=100, 41 | he=True, 42 | dd=True, 43 | opt_level=3, 44 | act_name="", 45 | ): 46 | """Run a benchmarking test (mirror QV) on a set of devices 47 | 48 | Args: 49 | hgp: hub/group/project 50 | backends: list of backends 51 | depths: list of mirror depths (square circuits) 52 | trials: number of randomizations 53 | nshots: number of shots 54 | he: hardware efficient True/False (False is original QV circ all to all, 55 | True assumes a line) 56 | dd: add dynamic decoupling 57 | opt_level: optimization level of the transpiler 58 | act_name: account name to be passed to the runtime service 59 | 60 | Returns: 61 | flat list of lists of qubit chains 62 | """ 63 | 64 | warnings.filterwarnings( 65 | "error", message=".*run.*", category=DeprecationWarning, append=False 66 | ) 67 | 68 | # load the service 69 | service = QiskitRuntimeService(name=act_name) 70 | job_list = [] 71 | result_dict = {} 72 | result_dict["config"] = { 73 | "hgp": hgp, 74 | "depths": depths, 75 | "trials": trials, 76 | "nshots": nshots, 77 | "dd": dd, 78 | "he": he, 79 | "pregenerated": False, 80 | "opt_level": opt_level, 81 | "act_name": act_name, 82 | } 83 | 84 | print("Running Fast Bench with options %s" % result_dict["config"]) 85 | 86 | # run all the circuits 87 | for backend in backends: 88 | print("Loading backend %s" % backend) 89 | result_dict[backend] = {} 90 | backend_real = service.backend(backend, instance=hgp) 91 | mqv_exp_list_d = [] 92 | for depth in depths: 93 | print("Generating Depth %d Circuits for Backend %s" % (depth, backend)) 94 | 95 | result_dict[backend][depth] = {} 96 | 97 | # compute the sets for this 98 | # NOTE: I want to replace this with fixed sets from 99 | # a config file!!! 100 | nq = backend_real.configuration().n_qubits 101 | coupling_map = backend_real.configuration().coupling_map 102 | G = gu.build_sys_graph(nq, coupling_map) 103 | paths = rx.all_pairs_all_simple_paths(G, depth, depth) 104 | paths = gu.paths_flatten(paths) 105 | new_sets = gu.get_separated_sets(G, paths, min_sep=2, nsets=1) 106 | 107 | mqv_exp_list = [] 108 | 109 | result_dict[backend][depth]["sets"] = new_sets[0] 110 | 111 | # Construct mirror QV circuits on each parallel set 112 | for qset in new_sets[0]: 113 | # generate the circuits 114 | mqv_exp = MirrorQuantumVolume( 115 | qubits=qset, 116 | backend=backend_real, 117 | trials=trials, 118 | pauli_randomize=True, 119 | he=he, 120 | ) 121 | 122 | mqv_exp.analysis.set_options( 123 | plot=False, calc_hop=False, analyzed_quantity="Success Probability" 124 | ) 125 | 126 | # Do this so it won't compile outside the qubit sets 127 | cust_map = [] 128 | for i in coupling_map: 129 | if i[0] in qset and i[1] in qset: 130 | cust_map.append(i) 131 | 132 | basis_gates = backend_real.configuration().basis_gates 133 | if "xslow" in basis_gates: 134 | basis_gates.remove("xslow") 135 | if "rx" in basis_gates: 136 | basis_gates.remove("rx") 137 | if "rzz" in basis_gates: 138 | basis_gates.remove("rzz") 139 | cust_target = Target.from_configuration( 140 | basis_gates=basis_gates, 141 | num_qubits=nq, 142 | coupling_map=CouplingMap(cust_map), 143 | ) 144 | 145 | mqv_exp.set_transpile_options( 146 | target=cust_target, optimization_level=opt_level 147 | ) 148 | mqv_exp_list.append(mqv_exp) 149 | 150 | new_exp_mqv = ParallelExperiment( 151 | mqv_exp_list, backend=backend_real, flatten_results=False 152 | ) 153 | if dd: 154 | # this forces the circuits to have DD on them 155 | print("Transpiling and DD") 156 | for i in mqv_exp_list: 157 | i.dd_circuits() 158 | 159 | mqv_exp_list_d.append(new_exp_mqv) 160 | 161 | new_exp_mqv = BatchExperiment( 162 | mqv_exp_list_d, backend=backend_real, flatten_results=False 163 | ) 164 | new_exp_mqv.set_run_options(shots=nshots) 165 | job_list.append(new_exp_mqv.run()) 166 | result_dict[backend]["job_ids"] = job_list[-1].job_ids 167 | 168 | # get the jobs back 169 | for i, backend in enumerate(backends): 170 | print("Loading results for backend: %s" % backend) 171 | 172 | expdata = job_list[i] 173 | try: 174 | expdata.block_for_results() 175 | except Exception: 176 | # remove backend from results 177 | print("Error loading backend %s results" % backend) 178 | result_dict.pop(backend) 179 | continue 180 | 181 | for j, depth in enumerate(depths): 182 | result_dict[backend][depth]["data"] = [] 183 | result_dict[backend][depth]["mean"] = [] 184 | result_dict[backend][depth]["std"] = [] 185 | 186 | for k in range(len(result_dict[backend][depth]["sets"])): 187 | result_dict[backend][depth]["data"].append( 188 | [ 189 | float(probi) 190 | for probi in list( 191 | expdata.child_data()[j].child_data()[k].artifacts()[0].data 192 | ) 193 | ] 194 | ) 195 | result_dict[backend][depth]["mean"].append( 196 | float(np.mean(result_dict[backend][depth]["data"][-1])) 197 | ) 198 | result_dict[backend][depth]["std"].append( 199 | float(np.std(result_dict[backend][depth]["data"][-1])) 200 | ) 201 | 202 | fu.export_yaml("MQV_" + fu.timestamp_name() + ".yaml", result_dict) 203 | 204 | 205 | if __name__ == "__main__": 206 | parser = argparse.ArgumentParser( 207 | description="Run fast benchmark of " 208 | + "devices using mirror. Specify a config " 209 | + " yaml and override settings on the command line" 210 | ) 211 | parser.add_argument( 212 | "-c", "--config", help="config file name", default="config.yaml" 213 | ) 214 | parser.add_argument( 215 | "-b", "--backend", help="Specify backend and override " + "backend_group" 216 | ) 217 | parser.add_argument( 218 | "-bg", 219 | "--backend_group", 220 | help="specify backend group in config file", 221 | default="backends", 222 | ) 223 | parser.add_argument("--hgp", help="specify hgp") 224 | parser.add_argument("--he", help="Hardware efficient", action="store_true") 225 | parser.add_argument("--name", help="Account name", default="") 226 | args = parser.parse_args() 227 | 228 | # import from config 229 | config_dict = fu.import_yaml(args.config) 230 | print("Config File Found") 231 | print(config_dict) 232 | 233 | # override from the command line 234 | if args.backend is not None: 235 | backends = [args.backend] 236 | else: 237 | backends = config_dict[args.backend_group] 238 | 239 | if args.hgp is not None: 240 | hgp = args.hgp 241 | else: 242 | hgp = config_dict["hgp"] 243 | 244 | if args.he is True: 245 | he = True 246 | else: 247 | he = config_dict["he"] 248 | 249 | opt_level = config_dict["opt_level"] 250 | dd = config_dict["dd"] 251 | depths = config_dict["depths"] 252 | trials = config_dict["trials"] 253 | nshots = config_dict["shots"] 254 | 255 | # print(hgp, backends, he, opt_level, dd, depths, trials, nshots) 256 | 257 | run_bench( 258 | hgp, 259 | backends, 260 | depths=depths, 261 | trials=trials, 262 | nshots=nshots, 263 | he=he, 264 | dd=dd, 265 | opt_level=opt_level, 266 | act_name=args.name, 267 | ) 268 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/prb/purrb_analysis.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Purity RB analysis class. 14 | """ 15 | 16 | from typing import List, Dict, Union 17 | 18 | from qiskit.result import sampled_expectation_value 19 | 20 | from qiskit_experiments.curve_analysis import ScatterTable 21 | import qiskit_experiments.curve_analysis as curve 22 | from qiskit_experiments.framework import AnalysisResultData 23 | from qiskit_experiments.library.randomized_benchmarking import RBAnalysis 24 | from qiskit_experiments.library.randomized_benchmarking.rb_analysis import ( 25 | _calculate_epg, 26 | _exclude_1q_error, 27 | ) 28 | 29 | 30 | class PurityRBAnalysis(RBAnalysis): 31 | r"""A class to analyze purity randomized benchmarking experiments. 32 | 33 | # section: overview 34 | This analysis takes only single series. 35 | This series is fit by the exponential decay function. 36 | From the fit :math:`\alpha` value this analysis estimates the error per Clifford (EPC). 37 | 38 | When analysis option ``gate_error_ratio`` is provided, this analysis also estimates 39 | errors of individual gates assembling a Clifford gate. 40 | In computation of two-qubit EPC, this analysis can also decompose 41 | the contribution from the underlying single qubit depolarizing channels when 42 | ``epg_1_qubit`` analysis option is provided [1]. 43 | 44 | # section: fit_model 45 | .. math:: 46 | 47 | F(x) = a \alpha^x + b 48 | 49 | # section: fit_parameters 50 | defpar a: 51 | desc: Height of decay curve. 52 | init_guess: Determined by :math:`1 - b`. 53 | bounds: [0, 1] 54 | defpar b: 55 | desc: Base line. 56 | init_guess: Determined by :math:`(1/2)^n` where :math:`n` is number of qubit. 57 | bounds: [0, 1] 58 | defpar \alpha: 59 | desc: Depolarizing parameter. 60 | init_guess: Determined by :func:`~.guess.rb_decay`. 61 | bounds: [0, 1] 62 | 63 | # section: reference 64 | .. ref_arxiv:: 1 1712.06550 65 | 66 | """ 67 | 68 | def __init__(self): 69 | super().__init__() 70 | 71 | def _run_data_processing( 72 | self, 73 | raw_data: List[Dict], 74 | category: str = "raw", 75 | ) -> ScatterTable: 76 | """Perform data processing from the experiment result payload. 77 | 78 | For purity this converts the counts into Trace(rho^2) and then runs the 79 | rest of the standard RB fitters 80 | 81 | For now this does it by spoofing a new counts dictionary and then 82 | calling the super _run_data_processing 83 | 84 | Args: 85 | raw_data: Payload in the experiment data. 86 | category: Category string of the output dataset. 87 | 88 | Returns: 89 | Processed data that will be sent to the formatter method. 90 | 91 | Raises: 92 | DataProcessorError: When key for x values is not found in the metadata. 93 | ValueError: When data processor is not provided. 94 | """ 95 | 96 | # figure out the number of qubits... has to be 1 or 2 for now 97 | if self.options.outcome == "0": 98 | nq = 1 99 | elif self.options.outcome == "00": 100 | nq = 2 101 | else: 102 | raise ValueError("Only supporting 1 or 2Q purity") 103 | 104 | ntrials = int(len(raw_data) / 3**nq) 105 | raw_data2 = [] 106 | nshots = int(sum(raw_data[0]["counts"].values())) 107 | 108 | for i in range(ntrials): 109 | trial_raw = [d for d in raw_data if d["metadata"]["trial"] == i] 110 | 111 | raw_data2.append(trial_raw[0]) 112 | 113 | purity = 1 / 2**nq 114 | if nq == 1: 115 | for ii in range(3): 116 | purity += ( 117 | sampled_expectation_value(trial_raw[ii]["counts"], "Z") ** 2 118 | / 2**nq 119 | ) 120 | else: 121 | for ii in range(9): 122 | purity += ( 123 | sampled_expectation_value(trial_raw[ii]["counts"], "ZZ") ** 2 124 | / 2**nq 125 | ) 126 | purity += ( 127 | sampled_expectation_value(trial_raw[ii]["counts"], "IZ") ** 2 128 | / 2**nq 129 | / 3 ** (nq - 1) 130 | ) 131 | purity += ( 132 | sampled_expectation_value(trial_raw[ii]["counts"], "ZI") ** 2 133 | / 2**nq 134 | / 3 ** (nq - 1) 135 | ) 136 | 137 | raw_data2[-1]["counts"] = { 138 | "0" * nq: int(purity * nshots * 10), 139 | "1" * nq: int((1 - purity) * nshots * 10), 140 | } 141 | 142 | return super()._run_data_processing(raw_data2, category) 143 | 144 | def _create_analysis_results( 145 | self, 146 | fit_data: curve.CurveFitResult, 147 | quality: str, 148 | **metadata, 149 | ) -> List[AnalysisResultData]: 150 | """Create analysis results for important fit parameters. 151 | 152 | Args: 153 | fit_data: Fit outcome. 154 | quality: Quality of fit outcome. 155 | 156 | Returns: 157 | List of analysis result data. 158 | """ 159 | outcomes = curve.CurveAnalysis._create_analysis_results( 160 | self, fit_data, quality, **metadata 161 | ) 162 | num_qubits = len(self._physical_qubits) 163 | 164 | # Calculate EPC 165 | # For purity we need to correct by 166 | alpha = fit_data.ufloat_params["alpha"] ** 0.5 167 | scale = (2**num_qubits - 1) / (2**num_qubits) 168 | epc = scale * (1 - alpha) 169 | 170 | outcomes.append( 171 | AnalysisResultData( 172 | name="EPC", 173 | value=epc, 174 | chisq=fit_data.reduced_chisq, 175 | quality=quality, 176 | extra=metadata, 177 | ) 178 | ) 179 | 180 | # Correction for 1Q depolarizing channel if EPGs are provided 181 | if self.options.epg_1_qubit and num_qubits == 2: 182 | epc = _exclude_1q_error( 183 | epc=epc, 184 | qubits=self._physical_qubits, 185 | gate_counts_per_clifford=self._gate_counts_per_clifford, 186 | extra_analyses=self.options.epg_1_qubit, 187 | ) 188 | outcomes.append( 189 | AnalysisResultData( 190 | name="EPC_corrected", 191 | value=epc, 192 | chisq=fit_data.reduced_chisq, 193 | quality=quality, 194 | extra=metadata, 195 | ) 196 | ) 197 | 198 | # Calculate EPG 199 | if self._gate_counts_per_clifford is not None and self.options.gate_error_ratio: 200 | epg_dict = _calculate_epg( 201 | epc=epc, 202 | qubits=self._physical_qubits, 203 | gate_error_ratio=self.options.gate_error_ratio, 204 | gate_counts_per_clifford=self._gate_counts_per_clifford, 205 | ) 206 | if epg_dict: 207 | for gate, epg_val in epg_dict.items(): 208 | outcomes.append( 209 | AnalysisResultData( 210 | name=f"EPG_{gate}", 211 | value=epg_val, 212 | chisq=fit_data.reduced_chisq, 213 | quality=quality, 214 | extra=metadata, 215 | ) 216 | ) 217 | 218 | return outcomes 219 | 220 | def _generate_fit_guesses( 221 | self, 222 | user_opt: curve.FitOptions, 223 | curve_data: curve.ScatterTable, 224 | ) -> Union[curve.FitOptions, List[curve.FitOptions]]: 225 | """Create algorithmic initial fit guess from analysis options and curve data. 226 | 227 | Args: 228 | user_opt: Fit options filled with user provided guess and bounds. 229 | curve_data: Formatted data collection to fit. 230 | 231 | Returns: 232 | List of fit options that are passed to the fitter function. 233 | """ 234 | user_opt.bounds.set_if_empty( 235 | a=(0, 1), 236 | alpha=(0, 1), 237 | b=(0, 1), 238 | ) 239 | 240 | b_guess = 1 / 2 ** len(self._physical_qubits) 241 | if len(curve_data.x) > 3: 242 | alpha_guess = curve.guess.rb_decay( 243 | curve_data.x[0:3], curve_data.y[0:3], b=b_guess 244 | ) 245 | else: 246 | alpha_guess = curve.guess.rb_decay(curve_data.x, curve_data.y, b=b_guess) 247 | 248 | alpha_guess = alpha_guess**2 249 | 250 | if alpha_guess < 0.6: 251 | a_guess = curve_data.y[0] - b_guess 252 | else: 253 | a_guess = (curve_data.y[0] - b_guess) / (alpha_guess ** curve_data.x[0]) 254 | 255 | user_opt.p0.set_if_empty( 256 | b=b_guess, 257 | a=a_guess, 258 | alpha=alpha_guess, 259 | ) 260 | 261 | return user_opt 262 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/utilities/graph_utils.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2024. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | 13 | """Graph utilities for the device benchmarking.""" 14 | 15 | import rustworkx as rx 16 | import numpy as np 17 | import copy 18 | 19 | 20 | def paths_flatten(paths): 21 | """Flatten a list of paths from retworkx 22 | 23 | Args: 24 | paths: all_pairs_all_simple_paths 25 | 26 | Returns: 27 | flat list of lists of qubit chains 28 | """ 29 | return [list(val) for ps in paths.values() for vals in ps.values() for val in vals] 30 | 31 | 32 | def remove_permutations(paths): 33 | """remove permutations from the paths 34 | 35 | Args: 36 | paths: list of qubit chains 37 | 38 | Returns: 39 | list of qubit chains without permutations 40 | """ 41 | 42 | new_path = [] 43 | for path_i in paths: 44 | # check already in the new_path 45 | if path_i in new_path: 46 | continue 47 | 48 | # reverse and check 49 | path_i.reverse() 50 | if path_i in new_path: 51 | continue 52 | path_i.reverse() 53 | 54 | new_path.append(path_i) 55 | 56 | return new_path 57 | 58 | 59 | def path_to_edges(paths, coupling_map=None): 60 | """Converse a list of paths into a list of edges that are in the 61 | coupling_map if defined 62 | 63 | If already edges (length 2 path) then convert into the edge that's in the 64 | coupling map 65 | 66 | Args: 67 | paths: list of qubit chains 68 | 69 | Returns: 70 | list of qubit paths in terms of the edges to traverse. 71 | """ 72 | 73 | new_path = [] 74 | for path_i in paths: 75 | if len(path_i) > 2: 76 | new_path.append([]) 77 | 78 | for i in range(len(path_i) - 1): 79 | tmp_set = path_i[i : (i + 2)] 80 | if coupling_map is not None: 81 | if tuple(tmp_set) not in coupling_map and tmp_set not in coupling_map: 82 | tmp_set.reverse() 83 | if ( 84 | tuple(tmp_set) not in coupling_map 85 | and tmp_set not in coupling_map 86 | ): 87 | raise ValueError("Path not found in coupling map") 88 | 89 | if len(path_i) > 2: 90 | new_path[-1].append(tmp_set) 91 | else: 92 | new_path.append(tmp_set) 93 | 94 | return new_path 95 | 96 | 97 | def build_sys_graph(nq, coupling_map, faulty_qubits=None): 98 | """Build a system graph 99 | 100 | Args: 101 | nq: number of qubits 102 | coupling_map: coupling map in list form 103 | faulty_qubits: list of faulty qubits (will remove from graph) 104 | 105 | Returns: 106 | undirected graph with no duplicate edges 107 | """ 108 | 109 | if faulty_qubits is not None: 110 | coupling_map2 = [] 111 | 112 | for i in coupling_map: 113 | if (i[0] not in faulty_qubits) and (i[1] not in faulty_qubits): 114 | coupling_map2.append(i) 115 | 116 | coupling_map = coupling_map2 117 | 118 | G = rx.PyDiGraph() 119 | G.add_nodes_from(range(nq)) 120 | G.add_edges_from_no_data([tuple(x) for x in coupling_map]) 121 | return G.to_undirected(multigraph=False) 122 | 123 | 124 | def get_iso_qubit_list(G): 125 | """Return a set of lists of isolated (separated by at least one idle qubit) 126 | qubits using graph coloring 127 | 128 | Args: 129 | G: system graph (assume G.to_undirected(multigraph=False) has been run) 130 | 131 | Returns: 132 | list of qubit lists 133 | """ 134 | 135 | qlists = {} 136 | node_dict = rx.graph_greedy_color(G) 137 | for i in node_dict: 138 | if node_dict[i] in qlists: 139 | qlists[node_dict[i]].append(i) 140 | else: 141 | qlists[node_dict[i]] = [i] 142 | 143 | qlists = list(qlists.values()) 144 | for i in range(len(qlists)): 145 | qlists[i] = list(np.sort(qlists[i])) 146 | 147 | return qlists 148 | 149 | 150 | def get_disjoint_edge_list(G): 151 | """Return a set of disjoint edges using graph coloring 152 | 153 | Args: 154 | G: system graph (assume G.to_undirected(multigraph=False) has been run) 155 | 156 | Returns: 157 | list of list of edges 158 | """ 159 | 160 | edge_lists = {} 161 | edge_dict = rx.graph_greedy_edge_color(G) 162 | for i in edge_dict: 163 | if edge_dict[i] in edge_lists: 164 | edge_lists[edge_dict[i]].append(G.edge_list()[i]) 165 | else: 166 | edge_lists[edge_dict[i]] = [G.edge_list()[i]] 167 | 168 | return list(edge_lists.values()) 169 | 170 | 171 | def get_separated_sets(G, node_sets, min_sep=1, nsets=-1): 172 | """Given a list node sets separate out into lists where 173 | the sets in each list are separated by min_sep 174 | 175 | This could be quite slow! 176 | 177 | Args: 178 | G: system graph 179 | node_sets: list of list of nodes 180 | min_sep: minimum separation between node sets 181 | nsets: number of sets to truncate at, if -1 then make all sets 182 | 183 | Returns: 184 | list of list of list of nodes each separated by min_sep 185 | """ 186 | 187 | node_sets_sep = [[]] 188 | cur_ind1 = 0 189 | cur_ind2 = 0 190 | 191 | node_sets_tmp = copy.deepcopy(node_sets) 192 | 193 | # get all node to node distances in a dictionary 194 | all_dists = rx.all_pairs_dijkstra_path_lengths(G, lambda a: 1) 195 | 196 | while len(node_sets_tmp) > 0: 197 | if cur_ind2 >= len(node_sets_tmp): 198 | if nsets > 0 and (cur_ind1 + 2) > nsets: 199 | break 200 | 201 | node_sets_sep.append([]) 202 | cur_ind1 += 1 203 | cur_ind2 = 0 204 | 205 | add_set = True 206 | for node_set in node_sets_sep[cur_ind1]: 207 | if not sets_min_dist(all_dists, node_set, node_sets_tmp[cur_ind2], min_sep): 208 | add_set = False 209 | cur_ind2 += 1 210 | break 211 | 212 | if add_set: 213 | node_sets_sep[cur_ind1].append(node_sets_tmp[cur_ind2]) 214 | node_sets_tmp.pop(cur_ind2) 215 | 216 | return node_sets_sep 217 | 218 | 219 | def sets_min_dist(dist_dict, set1, set2, min_sep): 220 | """Calculate if two sets are min_sep apart 221 | 222 | Args: 223 | dist_dict: dictionary of distances between nodes 224 | set1,2: the two sets 225 | min_sep: minimum separation 226 | 227 | Returns: 228 | True/False 229 | """ 230 | 231 | # dummy check 232 | if set(set1) & set(set2): 233 | return False 234 | 235 | for i in set1: 236 | for j in set2: 237 | if dist_dict[i][j] < min_sep: 238 | return False 239 | 240 | return True 241 | 242 | 243 | def create_graph_dict(coupling_map: list, nq: int) -> dict: 244 | graph_dict = {i: [] for i in range(nq)} 245 | 246 | for edge in coupling_map: 247 | if edge[1] not in graph_dict[edge[0]]: 248 | graph_dict[edge[0]].append(edge[1]) 249 | 250 | if edge[0] not in graph_dict[edge[1]]: 251 | graph_dict[edge[1]].append(edge[0]) 252 | 253 | return graph_dict 254 | 255 | 256 | def iter_neighbors( 257 | graph_dict: dict, 258 | cur_node: int, 259 | err_map: dict, 260 | best_fid: list, 261 | fid_cutoff: float, 262 | cur_list: list, 263 | chain_fid: float, 264 | pathlen: int, 265 | ) -> list: 266 | """ 267 | takes a list of paths through a graph and adds to 268 | it all the neighbor qubits of the last point as long 269 | as the graph does fold on itself. This version is different than the above 270 | in that it tracks a best fidelity and will skip paths 271 | that don't seem viable 272 | 273 | if the lists get long enough return the lists 274 | 275 | Args: 276 | graph_dict: dictionary of nodes and their neighbors 277 | cur_node: current node on the graph 278 | err_map: map of edge errors (AVERAGE gate error) 279 | best_fid: list of length 1 (so mutable) of the best fidelity 280 | fid_cutoff: the percentage (0->1) of the best fidelity at that chain length 281 | to cutoff the search 282 | cur_list: current path through graph 283 | chain_fid: fidelit of the current path 284 | pathlen: length of the path we are trying to find 285 | 286 | Returns: 287 | new_list: a list of all the paths appended to cur_list 288 | """ 289 | 290 | new_list = [] 291 | for i in graph_dict[cur_node]: 292 | # no backtracking 293 | if len(cur_list) > 1 and i in cur_list: 294 | continue 295 | 296 | if "%d_%d" % (cur_node, i) in err_map: 297 | edge_err = err_map["%d_%d" % (cur_node, i)] 298 | else: 299 | edge_err = err_map["%d_%d" % (i, cur_node)] 300 | 301 | # if the edge does not seem viable skip 302 | new_fid = chain_fid * (1 - 5 / 4 * edge_err) 303 | if new_fid < (fid_cutoff * best_fid[0]) ** ((len(cur_list) + 1) / pathlen): 304 | continue 305 | # add the current node to the list 306 | cur_list_tmp = cur_list.copy() 307 | cur_list_tmp.append(i) 308 | 309 | # check if the list is long enough 310 | if len(cur_list_tmp) < pathlen: 311 | # if not then continue to add to it 312 | tmp_new_list = iter_neighbors( 313 | graph_dict, 314 | i, 315 | err_map, 316 | best_fid, 317 | fid_cutoff, 318 | cur_list_tmp, 319 | new_fid, 320 | pathlen, 321 | ) 322 | for tmp_node in tmp_new_list: 323 | if len(tmp_node) != 0: 324 | new_list.append(tmp_node) 325 | else: 326 | # append the path to the list 327 | if new_fid > best_fid[0]: 328 | best_fid[0] = new_fid 329 | new_list.append(cur_list_tmp) 330 | return new_list 331 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/bell/bell_experiment.py: -------------------------------------------------------------------------------- 1 | from typing import List, Tuple, Sequence 2 | import numpy as np 3 | import matplotlib 4 | import matplotlib.pyplot as plt 5 | 6 | from qiskit.circuit import QuantumCircuit 7 | from qiskit.result import marginal_counts 8 | 9 | from qiskit_experiments.framework import ( 10 | BaseExperiment, 11 | BaseAnalysis, 12 | Options, 13 | ExperimentData, 14 | AnalysisResultData, 15 | ) 16 | 17 | 18 | class CHSHExperiment(BaseExperiment): 19 | """Custom experiment class template.""" 20 | 21 | def __init__(self, physical_qubits: Sequence[int], backend=None): 22 | """Initialize a chsh bell experiment 23 | 24 | Args: 25 | physical_qubits: List of physical qubits for the experiment. 26 | backend: The backend to run the experiment on. 27 | 28 | Raises: 29 | QiskitError: If any invalid argument is supplied. 30 | """ 31 | 32 | super().__init__(physical_qubits, analysis=CHSHAnalysis(), backend=backend) 33 | 34 | def circuits(self) -> List[QuantumCircuit]: 35 | """Generate the list of circuits to be run.""" 36 | 37 | # Four circuits for this experiment 38 | # Assume the ideal basis for this inequality 39 | circuits = [] 40 | for i in range(4): 41 | qc = QuantumCircuit(2) 42 | qc.h(0) 43 | qc.cx(0, 1) 44 | 45 | # rotate the 2nd qubit by pi/4 (optimal for the inequality) 46 | qc.rx(np.pi / 4, 1) 47 | 48 | # measure in Z, ZY, YZ, YY 49 | if np.mod(i, 2): 50 | qc.sx(0) 51 | if np.mod(int(i / 2), 2): 52 | qc.sx(0) 53 | qc.measure_all() 54 | circuits.append(qc) 55 | 56 | return circuits 57 | 58 | @classmethod 59 | def _default_experiment_options(cls) -> Options: 60 | """Set default experiment options here.""" 61 | options = super()._default_experiment_options() 62 | options.update_options( 63 | shots=300, 64 | ) 65 | return options 66 | 67 | 68 | class CHSHAnalysis(BaseAnalysis): 69 | """Custom analysis class template.""" 70 | 71 | @classmethod 72 | def _default_options(cls) -> Options: 73 | """Set default analysis options. Plotting is on by default.""" 74 | 75 | options = super()._default_options() 76 | options.dummy_analysis_option = None 77 | options.plot = False 78 | options.ax = None 79 | return options 80 | 81 | def _estate(self, counts): 82 | # from a counts dictionary determine the correlation function E 83 | 84 | shots = np.sum([counts[i] for i in counts]) 85 | return ( 86 | counts.get("11", 0) 87 | + counts.get("00", 0) 88 | - counts.get("10", 0) 89 | - counts.get("01", 0) 90 | ) / shots 91 | 92 | def _run_analysis( 93 | self, 94 | experiment_data: ExperimentData, 95 | ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: 96 | """Run the analysis.""" 97 | 98 | # Process the data here 99 | 100 | res = experiment_data.data() 101 | 102 | aa = [1, -1, -1, -1] 103 | S = np.sum([aa[i] * self._estate(res[i]["counts"]) for i in range(4)]) 104 | 105 | analysis_results = [AnalysisResultData(name="S", value=S)] 106 | 107 | return analysis_results, None 108 | 109 | 110 | class BellExperiment(BaseExperiment): 111 | """Custom experiment class template.""" 112 | 113 | def __init__(self, layered_coupling_map, cxnum=5, backend=None): 114 | """Initialize the experiment.""" 115 | 116 | physical_qubits = [] 117 | for layer in layered_coupling_map: 118 | for pair in layer: 119 | if pair[0] not in physical_qubits: 120 | physical_qubits.append(pair[0]) 121 | if pair[1] not in physical_qubits: 122 | physical_qubits.append(pair[1]) 123 | physical_qubits = range(backend.configuration().num_qubits) 124 | 125 | self.layered_coupling_map = layered_coupling_map 126 | self.cxnum = cxnum 127 | super().__init__(physical_qubits, analysis=BellAnalysis(), backend=backend) 128 | 129 | def circuits(self) -> List[QuantumCircuit]: 130 | """Generate the list of circuits to be run.""" 131 | conf = self.backend.configuration() 132 | circuits = make_bell_circs(self.layered_coupling_map, conf, cxnum=self.cxnum) 133 | 134 | return circuits 135 | 136 | @classmethod 137 | def _default_experiment_options(cls) -> Options: 138 | """Set default experiment options here.""" 139 | options = super()._default_experiment_options() 140 | options.update_options( 141 | shots=2048, 142 | ) 143 | return options 144 | 145 | 146 | class BellAnalysis(BaseAnalysis): 147 | """Custom analysis class template.""" 148 | 149 | @classmethod 150 | def _default_options(cls) -> Options: 151 | """Set default analysis options. Plotting is on by default.""" 152 | 153 | options = super()._default_options() 154 | options.dummy_analysis_option = None 155 | options.plot = True 156 | options.ax = None 157 | return options 158 | 159 | def _run_analysis( 160 | self, 161 | experiment_data: ExperimentData, 162 | ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: 163 | """Run the analysis.""" 164 | 165 | # Process the data here 166 | from qiskit.quantum_info import hellinger_fidelity 167 | import pandas as pd 168 | 169 | res = experiment_data.data() 170 | 171 | cxnum = res[0]["metadata"]["cxnum"] 172 | if cxnum % 2 == 1: # usual case of making a Bell state 173 | target = {"00": 0.5, "11": 0.5} 174 | else: # even number of CX should be an identity 175 | target = {"00": 0.5, "01": 0.5} 176 | 177 | fid = [] 178 | cmap = [] 179 | for datum in res: 180 | coupling_map = datum["metadata"]["coupling_map"] 181 | # cxnum 182 | counts = datum["counts"] 183 | tmp = extract_ind_counts(coupling_map, counts, measure_idle=False) 184 | for cr, val in tmp.items(): 185 | cmap.append([int(bit) for bit in cr.split("_")]) 186 | fid.append(hellinger_fidelity(val, target)) 187 | 188 | df = {"connection": cmap, "fidelity": fid} 189 | fidelity_data = pd.DataFrame(df).sort_values(by="connection") 190 | 191 | analysis_results = [ 192 | AnalysisResultData(name="hellinger_fidelities", value=fidelity_data) 193 | ] 194 | figures = [] 195 | if self.options.plot: 196 | figures.append(self._plot(fidelity_data)) 197 | 198 | return analysis_results, figures 199 | 200 | def _plot(self, data): 201 | fig, ax = plt.subplots() 202 | data.sort_values(by="connection").plot( 203 | x="connection", y="fidelity", kind="bar", ax=ax 204 | ) 205 | return fig 206 | 207 | 208 | def flatten_bits(crs): 209 | # it is important to follow bits in int format to match the arrangement 210 | if len(crs) == 0: 211 | return [] 212 | else: 213 | bits = [int(cr[0]) for cr in crs] 214 | bits.extend([int(cr[1]) for cr in crs]) 215 | return bits 216 | 217 | 218 | def make_bell_circs(layered_coupling_map, conf, cxnum): 219 | """ 220 | run simultaneous bell test. simultaneous pairs are obtained from get_layered_coupling_map 221 | We assume each cr ran only one time 222 | (e.g. [[1_2, 3_4], [5_6, 7_8]] is okay, but [[1_2, 3_4], [1_2, 5_6, 7_8]] is not okay) 223 | """ 224 | 225 | n_reset = 2 226 | cxnum = 5 227 | insert_barrier = False 228 | 229 | hadamard_idle = False 230 | y_basis = False 231 | measure_idle = False 232 | circs = [] 233 | 234 | for coupling_map in layered_coupling_map: 235 | bits = flatten_bits(coupling_map) 236 | nbits = len(bits) 237 | 238 | qc = QuantumCircuit(conf.n_qubits, nbits) 239 | # prepare qubits in superposition and then reset (conditionally) if requested 240 | if n_reset > 0: 241 | for bit in bits: 242 | qc.h(bit) 243 | for rnum in range(n_reset): 244 | qc.barrier() 245 | for bit in bits: 246 | qc.reset(bit) 247 | qc.barrier() 248 | elif insert_barrier: 249 | qc.barrier(bits) 250 | # now do the Bell state 251 | if hadamard_idle: # Hadamard all qubits except CNOT targets 252 | for i in range(conf.n_qubits): 253 | if i not in [edge[1] for edge in coupling_map]: 254 | qc.h(i) 255 | else: # Hadamard only CNOT control qubits 256 | for edge in coupling_map: 257 | qc.h(edge[0]) 258 | for i in range(cxnum): 259 | if insert_barrier: 260 | qc.barrier(bits) 261 | for edge in coupling_map: 262 | qc.cx(edge[0], edge[1]) 263 | qc.barrier(edge[0], edge[1]) 264 | if y_basis: 265 | if insert_barrier: 266 | qc.barrier(bits) 267 | for edge in coupling_map: 268 | qc.s(edge[0]) 269 | qc.sdg(edge[1]) 270 | qc.h(edge[0]) 271 | qc.h(edge[1]) 272 | if measure_idle: 273 | full_list = list(range(conf.n_qubits)) 274 | qc.measure(full_list, full_list) 275 | else: 276 | qc.measure(bits, list(range(nbits))) 277 | 278 | qc.metadata["coupling_map"] = coupling_map 279 | qc.metadata["cxnum"] = cxnum 280 | circs.append(qc) 281 | return circs 282 | 283 | 284 | def extract_ind_counts(crs, counts, measure_idle): 285 | # it is important to follow bits in int format to match the arrangement 286 | # of classical register in circuit composer in run code 287 | if not measure_idle: 288 | bits = flatten_bits(crs) 289 | len(bits) 290 | bit2idx = {} 291 | for i, bit in enumerate(bits): 292 | bit2idx.update({int(bit): i}) 293 | # shuffle the data 294 | ind_counts = {} 295 | for i, cr in enumerate(crs): 296 | label = "{}_{}".format(cr[0], cr[1]) 297 | if measure_idle: 298 | idx1 = int(cr[0]) 299 | idx2 = int(cr[1]) 300 | else: 301 | idx1 = bit2idx[int(cr[0])] 302 | idx2 = bit2idx[int(cr[1])] 303 | ind_counts.update({label: marginal_counts(counts, [idx1, idx2])}) 304 | 305 | if measure_idle and cr[0] > cr[1]: 306 | ind_counts[label]["01"], ind_counts[label]["10"] = ( 307 | ind_counts[label].get("10", 0), 308 | ind_counts[label].get("01", 0), 309 | ) 310 | 311 | return ind_counts 312 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/mrb/mirror_rb_analysis.py: -------------------------------------------------------------------------------- 1 | # This code is part of Qiskit. 2 | # 3 | # (C) Copyright IBM 2023. 4 | # 5 | # This code is licensed under the Apache License, Version 2.0. You may 6 | # obtain a copy of this license in the LICENSE.txt file in the root directory 7 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Any modifications or derivative works of this code must retain this 10 | # copyright notice, and modified files need to carry a notice indicating 11 | # that they have been altered from the originals. 12 | """ 13 | Mirror RB analysis class. 14 | """ 15 | 16 | from typing import List, Union 17 | import numpy as np 18 | from uncertainties import unumpy as unp 19 | from scipy.spatial.distance import hamming 20 | 21 | import qiskit_experiments.curve_analysis as curve 22 | from qiskit_experiments.framework import AnalysisResultData, ExperimentData 23 | from qiskit_experiments.data_processing import DataProcessor 24 | from qiskit_experiments.data_processing.data_action import DataAction 25 | from qiskit_experiments.library.randomized_benchmarking.rb_analysis import RBAnalysis 26 | 27 | 28 | class MirrorRBAnalysis(RBAnalysis): 29 | r"""A class to analyze mirror randomized benchmarking experiment. 30 | 31 | # section: overview 32 | This analysis takes a series for Mirror RB curve fitting. 33 | From the fit :math:`\alpha` value this analysis estimates the mean entanglement infidelity (EI) 34 | and the error per Clifford (EPC), also known as the average gate infidelity (AGI). 35 | 36 | The EPC (AGI) estimate is obtained using the equation 37 | 38 | .. math:: 39 | 40 | EPC = \frac{2^n - 1}{2^n}\left(1 - \alpha\right) 41 | 42 | where :math:`n` is the number of qubits (width of the circuit). 43 | 44 | The EI is obtained using the equation 45 | 46 | .. math:: 47 | 48 | EI = \frac{4^n - 1}{4^n}\left(1 - \alpha\right) 49 | 50 | The fit :math:`\alpha` parameter can be fit using one of the following three quantities 51 | plotted on the y-axis: 52 | 53 | Success Probabilities (:math:`p`): The proportion of shots that return the correct bitstring 54 | 55 | Adjusted Success Probabilities (:math:`p_0`): 56 | 57 | .. math:: 58 | 59 | p_0 = \sum_{k = 0}^n \left(-\frac{1}{2}\right)^k h_k 60 | 61 | where :math:`h_k` is the probability of observing a bitstring of Hamming distance of k from the 62 | correct bitstring 63 | 64 | Effective Polarizations (:math:`S`): 65 | 66 | .. math:: 67 | 68 | S = \frac{4^n}{4^n-1}\left(\sum_{k=0}^n\left(-\frac{1}{2}\right)^k h_k\right)-\frac{1}{4^n-1} 69 | 70 | # section: fit_model 71 | The fit is based on the following decay functions: 72 | 73 | .. math:: 74 | 75 | F(x) = a \alpha^{x} + b 76 | 77 | # section: fit_parameters 78 | defpar a: 79 | desc: Height of decay curve. 80 | init_guess: Determined by :math:`1 - b`. 81 | bounds: [0, 1] 82 | defpar b: 83 | desc: Base line. 84 | init_guess: Determined by :math:`(1/2)^n` (for success probability) or :math:`(1/4)^n` 85 | (for adjusted success probability and effective polarization). 86 | bounds: [0, 1] 87 | defpar \alpha: 88 | desc: Depolarizing parameter. 89 | init_guess: Determined by :func:`~rb_decay` with standard RB curve. 90 | bounds: [0, 1] 91 | 92 | # section: reference 93 | .. ref_arxiv:: 1 2112.09853 94 | 95 | """ 96 | 97 | @classmethod 98 | def _default_options(cls): 99 | """Default analysis options. 100 | 101 | Analysis Options: 102 | analyzed_quantity (str): Set the metric to plot on the y-axis. Must be one of 103 | "Effective Polarization" (default), "Success Probability", or "Adjusted 104 | Success Probability". 105 | gate_error_ratio (Optional[Dict[str, float]]): A dictionary with gate name keys 106 | and error ratio values used when calculating EPG from the estimated EPC. 107 | The default value will use standard gate error ratios. 108 | If you don't know accurate error ratio between your basis gates, 109 | you can skip analysis of EPGs by setting this options to ``None``. 110 | epg_1_qubit (List[AnalysisResult]): Analysis results from previous RB experiments 111 | for individual single qubit gates. If this is provided, EPC of 112 | 2Q RB is corrected to exclude the depolarization of underlying 1Q channels. 113 | """ 114 | default_options = super()._default_options() 115 | 116 | # Set labels of axes 117 | default_options.plotter.set_figure_options( 118 | xlabel="Clifford Length", 119 | ylabel="Effective Polarization", 120 | ) 121 | 122 | # Plot all (adjusted) success probabilities 123 | default_options.plot_raw_data = True 124 | 125 | # Exponential decay parameter 126 | default_options.result_parameters = ["alpha"] 127 | 128 | # Default gate error ratio for calculating EPG 129 | default_options.gate_error_ratio = "default" 130 | 131 | # By default, EPG for single qubits aren't set 132 | default_options.epg_1_qubit = None 133 | 134 | # By default, effective polarization is plotted (see arXiv:2112.09853). We can 135 | # also plot success probability or adjusted success probability (see PyGSTi). 136 | # Do this by setting options to "Success Probability" or "Adjusted Success Probability" 137 | default_options.analyzed_quantity = "Effective Polarization" 138 | 139 | default_options.set_validator( 140 | field="analyzed_quantity", 141 | validator_value=[ 142 | "Success Probability", 143 | "Adjusted Success Probability", 144 | "Effective Polarization", 145 | ], 146 | ) 147 | 148 | return default_options 149 | 150 | def _generate_fit_guesses( 151 | self, 152 | user_opt: curve.FitOptions, 153 | curve_data: curve.ScatterTable, 154 | ) -> Union[curve.FitOptions, List[curve.FitOptions]]: 155 | """Create algorithmic guess with analysis options and curve data. 156 | 157 | Args: 158 | user_opt: Fit options filled with user provided guess and bounds. 159 | curve_data: Formatted data collection to fit. 160 | 161 | Returns: 162 | List of fit options that are passed to the fitter function. 163 | """ 164 | 165 | user_opt.bounds.set_if_empty(a=(0, 1), alpha=(0, 1), b=(0, 1)) 166 | num_qubits = len(self._physical_qubits) 167 | 168 | # Initialize guess for baseline and amplitude based on infidelity type 169 | b_guess = 1 / 4**num_qubits 170 | if self.options.analyzed_quantity == "Success Probability": 171 | b_guess = 1 / 2**num_qubits 172 | 173 | mirror_curve = curve_data.get_subset_of("rb_decay") 174 | alpha_mirror = curve.guess.rb_decay(mirror_curve.x, mirror_curve.y, b=b_guess) 175 | a_guess = (curve_data.y[0] - b_guess) / (alpha_mirror ** curve_data.x[0]) 176 | 177 | user_opt.p0.set_if_empty(b=b_guess, a=a_guess, alpha=alpha_mirror) 178 | 179 | return user_opt 180 | 181 | def _create_analysis_results( 182 | self, 183 | fit_data: curve.CurveFitResult, 184 | quality: str, 185 | **metadata, 186 | ) -> List[AnalysisResultData]: 187 | """Create analysis results for important fit parameters. Besides the 188 | default standard RB parameters, Entanglement Infidelity (EI) is also calculated. 189 | 190 | Args: 191 | fit_data: Fit outcome. 192 | quality: Quality of fit outcome. 193 | 194 | Returns: 195 | List of analysis result data. 196 | """ 197 | 198 | outcomes = super()._create_analysis_results(fit_data, quality, **metadata) 199 | num_qubits = len(self._physical_qubits) 200 | 201 | # nrb is calculated for both EPC and EI per the equations in the docstring 202 | ei_nrb = 4**num_qubits 203 | ei_scale = (ei_nrb - 1) / ei_nrb 204 | ei = ei_scale * (1 - fit_data.ufloat_params["alpha"]) 205 | 206 | outcomes.append( 207 | AnalysisResultData( 208 | name="EI", 209 | value=ei, 210 | chisq=fit_data.reduced_chisq, 211 | quality=quality, 212 | extra=metadata, 213 | ) 214 | ) 215 | 216 | return outcomes 217 | 218 | def _initialize(self, experiment_data: ExperimentData): 219 | """Initialize curve analysis by setting up the data processor for Mirror 220 | RB data. 221 | 222 | Args: 223 | experiment_data: Experiment data to analyze. 224 | """ 225 | super()._initialize(experiment_data) 226 | 227 | num_qubits = len(self._physical_qubits) 228 | target_bs = [] 229 | for circ_result in experiment_data.data(): 230 | if circ_result["metadata"]["inverting_pauli_layer"] is True: 231 | target_bs.append("0" * num_qubits) 232 | else: 233 | target_bs.append(circ_result["metadata"]["target"]) 234 | 235 | self.set_options( 236 | data_processor=DataProcessor( 237 | input_key="counts", 238 | data_actions=[ 239 | _ComputeQuantities( 240 | analyzed_quantity=self.options.analyzed_quantity, 241 | num_qubits=num_qubits, 242 | target_bs=target_bs, 243 | ) 244 | ], 245 | ) 246 | ) 247 | 248 | 249 | class _ComputeQuantities(DataAction): 250 | """Data processing node for computing useful mirror RB quantities from raw results.""" 251 | 252 | def __init__( 253 | self, 254 | num_qubits, 255 | target_bs, 256 | analyzed_quantity: str = "Effective Polarization", 257 | validate: bool = True, 258 | ): 259 | """ 260 | Args: 261 | num_qubits: Number of qubits. 262 | quantity: The quantity to calculate. 263 | validate: If set to False the DataAction will not validate its input. 264 | """ 265 | super().__init__(validate) 266 | self._num_qubits = num_qubits 267 | self._analyzed_quantity = analyzed_quantity 268 | self._target_bs = target_bs 269 | 270 | def _process(self, data: np.ndarray): 271 | # Arrays to store the y-axis data and uncertainties 272 | y_data = [] 273 | y_data_unc = [] 274 | 275 | for i, circ_result in enumerate(data): 276 | target_bs = self._target_bs[i] 277 | 278 | # h[k] = proportion of shots that are Hamming distance k away from target bitstring 279 | hamming_dists = np.zeros(self._num_qubits + 1) 280 | success_prob = 0.0 281 | success_prob_unc = 0.0 282 | for bitstring, count in circ_result.items(): 283 | # Compute success probability 284 | if self._analyzed_quantity == "Success Probability": 285 | if bitstring == target_bs: 286 | success_prob = count / sum(circ_result.values()) 287 | success_prob_unc = np.sqrt(success_prob * (1 - success_prob)) 288 | break 289 | else: 290 | # Compute hamming distance proportions 291 | target_bs_to_list = [int(char) for char in target_bs] 292 | actual_bs_to_list = [int(char) for char in bitstring] 293 | k = int( 294 | round( 295 | hamming(target_bs_to_list, actual_bs_to_list) 296 | * self._num_qubits 297 | ) 298 | ) 299 | hamming_dists[k] += count / sum(circ_result.values()) 300 | 301 | if self._analyzed_quantity == "Success Probability": 302 | y_data.append(success_prob) 303 | y_data_unc.append(success_prob_unc) 304 | continue 305 | 306 | # Compute hamming distance uncertainties 307 | hamming_dist_unc = np.sqrt(hamming_dists * (1 - hamming_dists)) 308 | 309 | # Compute adjusted success probability and standard deviation 310 | adjusted_success_prob = 0.0 311 | adjusted_success_prob_unc = 0.0 312 | for k in range(self._num_qubits + 1): 313 | adjusted_success_prob += (-0.5) ** k * hamming_dists[k] 314 | adjusted_success_prob_unc += (0.5) ** k * hamming_dist_unc[k] ** 2 315 | adjusted_success_prob_unc = np.sqrt(adjusted_success_prob_unc) 316 | if self._analyzed_quantity == "Adjusted Success Probability": 317 | y_data.append(adjusted_success_prob) 318 | y_data_unc.append(adjusted_success_prob_unc) 319 | 320 | # Compute effective polarization and standard deviation (arXiv:2112.09853v1) 321 | pol_factor = 4**self._num_qubits 322 | pol = pol_factor / (pol_factor - 1) * adjusted_success_prob - 1 / ( 323 | pol_factor - 1 324 | ) 325 | pol_unc = np.sqrt(pol_factor / (pol_factor - 1)) * adjusted_success_prob_unc 326 | if self._analyzed_quantity == "Effective Polarization": 327 | y_data.append(pol) 328 | y_data_unc.append(pol_unc) 329 | 330 | return unp.uarray(y_data, y_data_unc) 331 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/mirror_test/mirror_circuits.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2023, 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | """Trotter circuit generation""" 12 | 13 | from collections import defaultdict 14 | from typing import Sequence 15 | from math import inf 16 | import numpy as np 17 | import networkx as nx 18 | from qiskit.circuit import QuantumCircuit, Parameter 19 | from qiskit.circuit.library import CXGate, CZGate, ECRGate 20 | from qiskit.transpiler import CouplingMap, generate_preset_pass_manager as generate_pm 21 | from qiskit.transpiler.exceptions import TranspilerError 22 | from qiskit.quantum_info import PauliList 23 | from qiskit_ibm_runtime import IBMBackend 24 | 25 | 26 | def remove_qubit_couplings( 27 | couplings: Sequence[tuple[int, int]], qubits: Sequence[int] | None = None 28 | ) -> list[tuple[int, int]]: 29 | """Remove qubits from a coupling list. 30 | 31 | Args: 32 | couplings: A sequence of qubit couplings. 33 | qubits: Optional, the qubits to remove. 34 | 35 | Returns: 36 | The input couplings with the specified qubits removed. 37 | """ 38 | if qubits is None: 39 | return couplings 40 | qubits = set(qubits) 41 | return [edge for edge in couplings if not qubits.intersection(edge)] 42 | 43 | 44 | def coupling_qubits( 45 | *couplings: Sequence[tuple[int, int]], allowed_qubits: Sequence[int] | None = None 46 | ) -> list[int]: 47 | """Return a sorted list of all qubits involved in 1 or more couplings lists. 48 | 49 | Args: 50 | couplings: 1 or more coupling lists. 51 | allowed_qubits: Optional, the allowed qubits to include. If None all 52 | qubits are allowed. 53 | 54 | Returns: 55 | The intersection of all qubits in the couplings and the allowed qubits. 56 | """ 57 | qubits = set() 58 | for edges in couplings: 59 | for edge in edges: 60 | qubits.update(edge) 61 | if allowed_qubits is not None: 62 | qubits = qubits.intersection(allowed_qubits) 63 | return list(qubits) 64 | 65 | 66 | def chain_coupling_map( 67 | coupling_map: list[tuple[int, int]], 68 | path: list[int], 69 | ) -> list[list[tuple[int, int]]]: 70 | """Construct the sub-CouplingMap for a 1D path through a 2D coupling map. 71 | 72 | Args: 73 | coupling_map: The input coupling map that is connected along the specified path. 74 | path: The ordered list of nodes to constructed a path for. 75 | 76 | Returns: 77 | The sub set of edges in the coupling map that are on the specified path. 78 | """ 79 | coupling_edges = nx.DiGraph(list(coupling_map)).edges() 80 | path_edges = [] 81 | for pos in range(1, len(path)): 82 | node_a = path[pos - 1] 83 | node_b = path[pos] 84 | for edge in ((node_a, node_b), (node_b, node_a)): 85 | if edge in coupling_edges: 86 | path_edges.append(edge) 87 | return path_edges 88 | 89 | 90 | def directed_coupling_map(backend: IBMBackend) -> CouplingMap: 91 | """Construct a single-directional coupling map of shortest gates. 92 | 93 | Args: 94 | backend: A backend to extract coupling map and gate durations from. 95 | 96 | Returns: 97 | The directed coupling map of the shortest gate for each coupling pair. 98 | """ 99 | directional_coupling = {} 100 | target = backend.target 101 | durations = target.durations() 102 | for inst, qubits in target.instructions: 103 | if inst.num_qubits == 2 and qubits is not None: 104 | key = tuple(sorted(qubits)) 105 | if key in directional_coupling: 106 | continue 107 | q0, q1 = key 108 | try: 109 | length1 = durations.get(inst, (q0, q1)) 110 | except TranspilerError: 111 | length1 = inf 112 | try: 113 | length2 = durations.get(inst, (q1, q0)) 114 | except TranspilerError: 115 | length2 = inf 116 | 117 | shortest_pair = [q0, q1] if length1 <= length2 else [q1, q0] 118 | directional_coupling[key] = shortest_pair 119 | return CouplingMap(sorted(directional_coupling.values())) 120 | 121 | 122 | def construct_layer_couplings( 123 | backend: IBMBackend, path: Sequence[int] = None 124 | ) -> list[list[tuple[int, int]]]: 125 | """Separate a coupling map into disjoint 2-qubit gate layers. 126 | 127 | Args: 128 | backend: A backend to construct layer couplings for. 129 | path: Optional, the ordered list of nodes to constructed a 1D path for couplings. 130 | 131 | Returns: 132 | A list of disjoint layers of directed couplings for the input coupling map. 133 | """ 134 | coupling_map = directed_coupling_map(backend) 135 | if path is not None: 136 | coupling_map = chain_coupling_map(coupling_map, path) 137 | 138 | # Convert coupling map to a networkx graph 139 | coupling_graph = nx.Graph(list(coupling_map)) 140 | 141 | # Edge coloring is vertex coloring on the dual graph 142 | dual_graph = nx.line_graph(coupling_graph) 143 | edge_coloring = nx.greedy_color(dual_graph, interchange=True) 144 | 145 | # Sort layers 146 | layers = defaultdict(list) 147 | for edge, color in edge_coloring.items(): 148 | if edge not in coupling_map: 149 | edge = tuple(reversed(edge)) 150 | layers[color].append(edge) 151 | layers = [sorted(layers[i]) for i in sorted(layers.keys())] 152 | 153 | return layers 154 | 155 | 156 | def entangling_layer( 157 | gate_2q: str, 158 | couplings: Sequence[tuple[int, int]], 159 | qubits: Sequence[int] | None = None, 160 | ) -> QuantumCircuit: 161 | """Generating a entangling layer for the specified couplings. 162 | 163 | This corresonds to a Trotter layer for a ZZ Ising term with angle Pi/2. 164 | 165 | Args: 166 | gate_2q: The 2-qubit basis gate for the layer, should be "cx", "cz", or "ecr". 167 | couplings: A sequence of qubit couplings to add CX gates to. 168 | qubits: Optional, the physical qubits for the layer. Any couplings involving 169 | qubits not in this list will be removed. If None the range up to the largest 170 | qubit in the couplings will be used. 171 | 172 | Returns: 173 | The QuantumCircuit for the entangling layer. 174 | """ 175 | # Get qubits and convert to set to order 176 | if qubits is None: 177 | qubits = range(1 + max(coupling_qubits(*couplings))) 178 | qubits = set(qubits) 179 | 180 | # Mapping of physical qubit to virtual qubit 181 | qubit_mapping = {q: i for i, q in enumerate(qubits)} 182 | 183 | # Convert couplings to indices for virtual qubits 184 | indices = [ 185 | [qubit_mapping[i] for i in edge] 186 | for edge in couplings 187 | if qubits.issuperset(edge) 188 | ] 189 | 190 | # Layer circuit on virtual qubits 191 | circuit = QuantumCircuit(len(qubits)) 192 | 193 | # Get 2-qubit basis gate and pre and post rotation circuits 194 | gate2q = None 195 | pre = QuantumCircuit(2) 196 | post = QuantumCircuit(2) 197 | 198 | if gate_2q == "cx": 199 | gate2q = CXGate() 200 | # Pre-rotation 201 | pre.sdg(0) 202 | pre.z(1) 203 | pre.sx(1) 204 | pre.s(1) 205 | # Post-rotation 206 | post.sdg(1) 207 | post.sxdg(1) 208 | post.s(1) 209 | elif gate_2q == "ecr": 210 | gate2q = ECRGate() 211 | # Pre-rotation 212 | pre.z(0) 213 | pre.s(1) 214 | pre.sx(1) 215 | pre.s(1) 216 | # Post-rotation 217 | post.x(0) 218 | post.sdg(1) 219 | post.sxdg(1) 220 | post.s(1) 221 | elif gate_2q == "cz": 222 | gate2q = CZGate() 223 | # Identity pre-rotation 224 | # Post-rotation 225 | post.sdg([0, 1]) 226 | else: 227 | raise ValueError( 228 | f"Invalid 2-qubit basis gate {gate_2q}, should be 'cx', 'cz', or 'ecr'" 229 | ) 230 | 231 | # Add 1Q pre-rotations 232 | for inds in indices: 233 | circuit.compose(pre, qubits=inds, inplace=True) 234 | 235 | # Use barriers around 2-qubit basis gate to specify a layer for PEA noise learning 236 | circuit.barrier() 237 | for inds in indices: 238 | circuit.append(gate2q, (inds[0], inds[1])) 239 | circuit.barrier() 240 | 241 | # Add 1Q post-rotations after barrier 242 | for inds in indices: 243 | circuit.compose(post, qubits=inds, inplace=True) 244 | 245 | # Add physical qubits as metadata 246 | circuit.metadata["physical_qubits"] = tuple(qubits) 247 | 248 | return circuit 249 | 250 | 251 | def trotter_circuit( 252 | theta: Parameter | float, 253 | layer_couplings: Sequence[Sequence[tuple[int, int]]], 254 | num_steps: int, 255 | gate_2q: str | None = "cx", 256 | backend: IBMBackend | None = None, 257 | qubits: Sequence[int] | None = None, 258 | ) -> QuantumCircuit: 259 | """Generate a Trotter circuit for the 2D Ising 260 | 261 | Args: 262 | theta: The angle parameter for X. 263 | layer_couplings: A list of couplings for each entangling layer. 264 | num_steps: the number of Trotter steps. 265 | gate_2q: The 2-qubit basis gate to use in entangling layers. 266 | Can be "cx", "cz", "ecr", or None if a backend is provided. 267 | backend: A backend to get the 2-qubit basis gate from, if provided 268 | will override the basis_gate field. 269 | qubits: Optional, the allowed physical qubits to truncate the 270 | couplings to. If None the range up to the largest 271 | qubit in the couplings will be used. 272 | 273 | Returns: 274 | The Trotter circuit. 275 | """ 276 | if backend is not None: 277 | try: 278 | basis_gates = backend.configuration().basis_gates 279 | except AttributeError: 280 | basis_gates = backend.basis_gates 281 | for gate in ["cx", "cz", "ecr"]: 282 | if gate in basis_gates: 283 | gate_2q = gate 284 | break 285 | 286 | # If no qubits, get the largest qubit from all layers and 287 | # specify the range so the same one is used for all layers. 288 | if qubits is None: 289 | qubits = range(1 + max(coupling_qubits(*layer_couplings))) 290 | 291 | coup_q_list = coupling_qubits(*layer_couplings) 292 | 293 | # Generate the entangling layers 294 | layers = [ 295 | entangling_layer(gate_2q, couplings, qubits=qubits) 296 | for couplings in layer_couplings 297 | ] 298 | 299 | # Construct the circuit for a single Trotter step 300 | num_qubits = len(qubits) 301 | trotter_step = QuantumCircuit(num_qubits) 302 | trotter_step.rx(theta, coup_q_list) 303 | for layer in layers: 304 | trotter_step.compose(layer, range(num_qubits), inplace=True) 305 | 306 | # Construct the circuit for the specified number of Trotter steps 307 | circuit = QuantumCircuit(num_qubits) 308 | for _ in range(num_steps): 309 | circuit.rx(theta, coup_q_list) 310 | for layer in layers: 311 | circuit.compose(layer, range(num_qubits), inplace=True) 312 | 313 | circuit.metadata["physical_qubits"] = tuple(qubits) 314 | return circuit 315 | 316 | 317 | def mirror_trotter_circuit_1d( 318 | theta: Parameter | float, 319 | delta: Parameter | float, 320 | num_steps: int, 321 | path: Sequence[int], 322 | backend: IBMBackend, 323 | ) -> QuantumCircuit: 324 | """Generate a mirrored Trotter circuit for 1D Ising on specified path. 325 | 326 | Args: 327 | theta: The angle parameter for X in simulated Hamiltonian. 328 | delta: The angle parameter for Rx rotation before final measurement 329 | num_steps: the number of Trotter steps. The returned circuit will 330 | have a 2-qubit layer depth of ``4 * num_steps``. 331 | path: The ordered list of nodes to constructed a 1D path for couplings. 332 | backend: A backend to get the 2-qubit basis gate from, if provided 333 | will override the basis_gate field. 334 | 335 | Returns: 336 | The Trotter circuit. 337 | """ 338 | layer_couplings = construct_layer_couplings(backend, path=path) 339 | circuit = trotter_circuit(theta, layer_couplings, num_steps, backend=backend) 340 | 341 | # Construct mirror circuit 342 | mirror_circuit = circuit.compose(circuit.inverse()) 343 | mirror_circuit.metadata["physical_qubits"] = path 344 | 345 | # Add contrast rotation 346 | mirror_circuit.rx(delta, path) 347 | 348 | return mirror_circuit 349 | 350 | 351 | def mirror_trotter_pub_1d( 352 | num_steps: int, 353 | path: Sequence[int], 354 | backend: IBMBackend, 355 | theta_values: Sequence[float] = (0,), 356 | magnetization_values: Sequence[float] = (1,), 357 | ): 358 | """Generate a mirrored Trotter circuit EstimatorPub for 1D Ising on specified path. 359 | 360 | Args: 361 | num_steps: the number of Trotter steps. The returned circuit will 362 | have a 2-qubit layer depth of ``4 * num_steps``. 363 | path: The ordered list of nodes to constructed a 1D path for couplings. 364 | backend: A backend to get the 2-qubit basis gate from, if provided 365 | will override the basis_gate field. 366 | theta_values: The angle parameter values for X in simulated Hamiltonian. 367 | magnetization_values: The ideal magnetization values for the circuit after 368 | mirroring. Implement by single-qubit rotations of each qubit to reduce 369 | contrast of final measurements. 370 | 371 | Returns: 372 | The mirrored Trotter circuit EstimatorPub. 373 | """ 374 | # Shape is theta first, magnetization second, observable last 375 | shape = (len(theta_values), len(magnetization_values), 1) 376 | theta_bc = np.broadcast_to(np.array(theta_values).reshape((-1, 1, 1)), shape) 377 | delta_vals = 2 * np.arccos(np.sqrt((1 + np.array(magnetization_values)) / 2)) 378 | delta_bc = np.broadcast_to(delta_vals.reshape(1, -1, 1), shape) 379 | param_vals = np.stack([theta_bc, delta_bc], axis=-1) 380 | 381 | # Important that theta < delta in circuit order 382 | theta = Parameter("A") 383 | delta = Parameter("C") 384 | circuit = mirror_trotter_circuit_1d(theta, delta, num_steps, path, backend=backend) 385 | pm = generate_pm( 386 | optimization_level=1, target=backend.target, layout_method="trivial" 387 | ) 388 | circuit = pm.run(circuit) 389 | obs = magnetization_observables(path, circuit.num_qubits) 390 | pub = (circuit, obs, param_vals) 391 | return pub 392 | 393 | 394 | def magnetization_observables( 395 | physical_qubits: Sequence[int], num_qubits: int | None = None 396 | ) -> PauliList: 397 | """Return the PauliList for magnetization measurement observables for ISA circuits.""" 398 | max_qubit = max(physical_qubits) 399 | if num_qubits is None: 400 | num_qubits = 1 + max_qubit 401 | elif num_qubits <= max_qubit: 402 | raise ValueError( 403 | f"num_qubits must be >= {max_qubit} for specified physical qubits" 404 | ) 405 | zs = np.zeros((len(physical_qubits), num_qubits), dtype=bool) 406 | for idx, qubit in enumerate(physical_qubits): 407 | zs[idx, qubit] = True 408 | xs = np.zeros_like(zs) 409 | return PauliList.from_symplectic(zs, xs) 410 | -------------------------------------------------------------------------------- /qiskit_device_benchmarking/bench_code/dynamic_circuits_rb/dc_rb_experiment.py: -------------------------------------------------------------------------------- 1 | # (C) Copyright IBM 2024. 2 | # 3 | # This code is licensed under the Apache License, Version 2.0. You may 4 | # obtain a copy of this license in the LICENSE.txt file in the root directory 5 | # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 6 | # 7 | # Any modifications or derivative works of this code must retain this 8 | # copyright notice, and modified files need to carry a notice indicating 9 | # that they have been altered from the originals. 10 | 11 | """ 12 | Dynamic circuits RB Experiment class. 13 | """ 14 | 15 | from matplotlib.backends.backend_svg import FigureCanvasSVG 16 | from matplotlib.figure import Figure 17 | import cmath 18 | import math 19 | 20 | from qiskit_ibm_runtime.transpiler.passes.scheduling import ( 21 | DynamicCircuitInstructionDurations, 22 | ) 23 | from scipy.linalg import det 24 | from numpy.random import default_rng 25 | from qiskit.circuit import QuantumCircuit, Delay 26 | 27 | from qiskit.quantum_info import Clifford 28 | from qiskit.quantum_info.random import random_clifford 29 | from qiskit.circuit.instruction import Instruction 30 | from qiskit.circuit.library import UGate, SXGate, RZGate 31 | from qiskit.exceptions import QiskitError 32 | from numpy.random import Generator 33 | from typing import Sequence, List, Iterator 34 | import numpy as np 35 | from qiskit.providers.backend import Backend 36 | from qiskit_experiments.framework import ( 37 | BaseExperiment, 38 | BackendTiming, 39 | ) 40 | from qiskit_experiments.data_processing import ( 41 | DataProcessor, 42 | Probability, 43 | MarginalizeCounts, 44 | ) 45 | import qiskit_experiments.curve_analysis as curve 46 | from ..mcm_rb import SubDecayFit 47 | 48 | 49 | class DynamicCircuitsRB(BaseExperiment): 50 | """Dynamic circuits Randomized Benchmarking. 51 | 52 | # section: overview 53 | 54 | a series of dynamic circuit benchmarking routines based on interleaving dynamic circuit 55 | operation blocks in one-qubit randomized benchmarking sequences of data qubits. The blocks span 56 | between the set of data qubits and a measurement qubit and may include feedforward operations 57 | based on the measurement. 58 | 59 | # section: reference 60 | .. ref_arxiv:: 1 2408.07677 61 | 62 | """ 63 | 64 | def __init__( 65 | self, 66 | physical_qubits: Sequence[int], 67 | backend: Backend, 68 | n_blocks=(0, 1, 2, 3, 4, 5, 10, 15, 20), 69 | num_samples=3, 70 | seed=100, 71 | cliff_per_meas=5, 72 | ff_operations=("I_c0", "Z_c0", "I_c1", "Z_c1", "Delay"), 73 | ff_delay=2120, 74 | plot_measured_qubit=False, 75 | plot_summary=False, 76 | ): 77 | """Dynamic circuits RB. 78 | Args: 79 | physical_qubits: The qubits on which to run the experiment. 80 | backend: The backend to run the experiment on. 81 | n_blocks: Number of measurements/feedforward operations 82 | num_samples: Number of different sequences to generate. 83 | seed: Seed for the random number generator. 84 | ff_operations: Sequence of the dynamic circuits blocks labels. 85 | ff_delay: Feedforward latency in dt units. 86 | plot_measured_qubit: Plot the decay curve of the measured qubit. 87 | plot_summary: Plot summary of the decay parameters. 88 | """ 89 | super().__init__(physical_qubits=physical_qubits, backend=backend) 90 | self.analysis = DynamicCircuitsRBAnalysis( 91 | physical_qubits=physical_qubits, 92 | ff_operations=ff_operations, 93 | plot_measured_qubit=plot_measured_qubit, 94 | plot_summary=plot_summary, 95 | ) 96 | self.n_blocks = n_blocks 97 | self.seed = seed 98 | self.num_samples = num_samples 99 | self.ff_operations = ff_operations 100 | self.ff_delay = ff_delay 101 | self.cliff_per_meas = cliff_per_meas 102 | if "H_CNOT" in self.ff_operations and len(physical_qubits) != 2: 103 | raise Exception("The CNOT blocks are supported only for 2 physical qubits") 104 | 105 | def circuits(self) -> List[QuantumCircuit]: 106 | """Return a list of RB circuits. 107 | 108 | Returns: 109 | A list of :class:`QuantumCircuit`. 110 | """ 111 | rng = default_rng(seed=self.seed) 112 | 113 | n_qubits = self.num_qubits 114 | 115 | # Construct interleaved parts 116 | ff_circs = [] 117 | for ff_type in self.ff_operations: 118 | ff_circs.append(self.ff_circ(ff_type)) 119 | 120 | circuits = [] 121 | 122 | for i in range(self.num_samples): 123 | for length in self.n_blocks: 124 | generators = ( 125 | self._generate_sequences(length * self.cliff_per_meas, rng) 126 | for _ in range(n_qubits - 1) 127 | ) 128 | 129 | # Generate MCM RB circuit 130 | circs = [] 131 | for ff_type in self.ff_operations: 132 | circ = QuantumCircuit(n_qubits, n_qubits) 133 | circ.metadata = { 134 | "xval": length, 135 | "physical_qubits": self.physical_qubits, 136 | "num_sample": i, 137 | "ff_type": ff_type, 138 | } 139 | circs.append(circ) 140 | 141 | n_elms = 0 142 | for elms in zip(*generators): 143 | n_elms += 1 144 | for q, elm in enumerate(elms): 145 | # Add a single random clifford 146 | for inst in self._sequence_to_instructions(elm): 147 | for circ in circs: 148 | circ._append(inst, [circ.qubits[q]], []) 149 | # Sync time 150 | for circ in circs: 151 | circ.barrier() 152 | if n_elms <= (length * self.cliff_per_meas) and ( 153 | np.mod(n_elms, self.cliff_per_meas) == 0 154 | ): 155 | # Interleave MCM 156 | for circ, ff_circ in zip(circs, ff_circs): 157 | circ.compose(ff_circ, inplace=True, qubits=circ.qubits) 158 | circ.barrier() 159 | for circ in circs: 160 | circ.barrier() 161 | circ.measure(circ.qubits, circ.clbits) 162 | 163 | circuits.extend(circs) 164 | 165 | return circuits 166 | 167 | def ff_circ(self, ff_type): 168 | circ = QuantumCircuit(self.num_qubits, self.num_qubits) 169 | timing = BackendTiming(self.backend) 170 | durations = DynamicCircuitInstructionDurations.from_backend(self.backend) 171 | clbits = circ.clbits 172 | qubits = circ.qubits 173 | if ff_type == "H_CNOT": 174 | circ.h(qubits[-1]) 175 | circ.barrier() 176 | circ.cx(qubits[-1], qubits[0]) 177 | circ.barrier() 178 | circ.measure(qubits[-1], clbits[-1]) 179 | with circ.if_test((clbits[-1], 1)): 180 | circ.x(qubits[0]) 181 | circ.x(qubits[-1]) 182 | elif ff_type == "H_CNOT_FFDD": 183 | meas_dt = durations.get("measure", 0, "dt") 184 | x_dt = durations.get("x", 0, "dt") 185 | ff_dt = self.ff_delay 186 | delay1 = timing.round_delay( 187 | time=((meas_dt - ff_dt - 2 * x_dt) / 2) * timing.dt 188 | ) 189 | delay2 = timing.round_delay(time=(ff_dt - x_dt) * timing.dt) 190 | circ.h(qubits[-1]) 191 | circ.barrier() 192 | circ.cx(qubits[-1], qubits[0]) 193 | circ.barrier() 194 | circ.x([qubits[0]]) 195 | circ.append(Delay(delay1, "dt"), [qubits[0]], []) 196 | circ.x([qubits[0]]) 197 | circ.append(Delay(delay1, "dt"), [qubits[0]], []) 198 | circ.x([qubits[0]]) 199 | circ.append(Delay(delay2, "dt"), [qubits[0]], []) 200 | circ.x([qubits[0]]) 201 | circ.measure(qubits[-1], clbits[-1]) 202 | circ.barrier() 203 | with circ.if_test((clbits[-1], 1)): 204 | circ.x(qubits[0]) 205 | circ.x(qubits[-1]) 206 | elif ff_type == "H_CNOT_MDD": 207 | meas_dt = durations.get("measure", 0, "dt") 208 | x_dt = durations.get("x", 0, "dt") 209 | delay_quarter = timing.round_delay( 210 | time=((meas_dt - 2 * x_dt) / 4) * timing.dt 211 | ) 212 | circ.h(qubits[-1]) 213 | circ.barrier() 214 | circ.cx(qubits[-1], qubits[0]) 215 | circ.barrier() 216 | circ.barrier() 217 | circ.x([qubits[0]]) 218 | circ.append(Delay(delay_quarter, "dt"), [qubits[0]], []) 219 | circ.x([qubits[0]]) 220 | circ.append(Delay(delay_quarter * 2, "dt"), [qubits[0]], []) 221 | circ.x([qubits[0]]) 222 | circ.append(Delay(delay_quarter, "dt"), [qubits[0]], []) 223 | circ.x([qubits[0]]) 224 | circ.measure(qubits[-1], clbits[-1]) 225 | circ.barrier() 226 | with circ.if_test((clbits[-1], 1)): 227 | circ.x(qubits[0]) 228 | circ.x(qubits[-1]) 229 | elif ff_type == "X_c1": 230 | circ.x(qubits) 231 | circ.barrier() 232 | circ.measure(qubits[-1], clbits[-1]) 233 | with circ.if_test((clbits[-1], 1)): 234 | circ.x(qubits) 235 | elif ff_type == "X_c0": 236 | circ.measure(qubits[-1], clbits[-1]) 237 | with circ.if_test((clbits[-1], 1)): 238 | circ.x(qubits) 239 | elif ff_type == "Z_c1": 240 | circ.z(qubits[:-1]) 241 | circ.x(qubits[-1]) 242 | circ.barrier() 243 | circ.measure(qubits[-1], clbits[-1]) 244 | with circ.if_test((clbits[-1], 1)): 245 | circ.z(qubits[:-1]) 246 | circ.x(qubits[-1]) 247 | elif ff_type == "Z_c0": 248 | circ.measure(qubits[-1], clbits[-1]) 249 | with circ.if_test((clbits[-1], 1)): 250 | circ.z(qubits[:-1]) 251 | circ.x(qubits[-1]) 252 | elif ff_type == "I_c0": 253 | circ.measure(qubits[-1], clbits[-1]) 254 | circ.barrier() 255 | # uses repeated Z instead of identity to make sure it uses the same feedforward timing 256 | with circ.if_test((clbits[-1], 1)): 257 | circ.x(qubits[-1]) 258 | circ.z(qubits[:-1]) 259 | circ.barrier() 260 | circ.z(qubits[:-1]) 261 | elif ff_type == "I_c1": 262 | circ.x(qubits[-1]) 263 | circ.barrier() 264 | circ.measure(qubits[-1], clbits[-1]) 265 | # uses repeated Z instead of identity to make sure it uses the same feedforward timing 266 | with circ.if_test((clbits[-1], 1)): 267 | circ.x(qubits[-1]) 268 | circ.z(qubits[:-1]) 269 | circ.barrier() 270 | circ.z(qubits[:-1]) 271 | elif ff_type == "Delay": 272 | meas_dt = durations.get("measure", self.physical_qubits[-1], "dt") 273 | circ.append(Delay(meas_dt, unit="dt"), [qubits[-1]], []) 274 | circ.barrier() 275 | circ.append(Delay(self.ff_delay, unit="dt"), [qubits[-1]], []) 276 | else: 277 | raise Exception(f"Not supporting {ff_type}") 278 | return circ 279 | 280 | def _generate_sequences(self, length: int, rng: Generator) -> Iterator[Clifford]: 281 | """Generate N+1 Clifford sequences with inverse at the end.""" 282 | composed = Clifford([[1, 0], [0, 1]]) 283 | for _ in range(length): 284 | elm = random_clifford(1, rng) 285 | composed = composed.compose(elm) 286 | yield elm 287 | if length > 0: 288 | yield composed.adjoint() 289 | 290 | def _sequence_to_instructions(self, elm: Clifford) -> List[Instruction]: 291 | """Single qubit Clifford decomposition with fixed number of physical gates. 292 | 293 | This overrules standard Qiskit transpile protocol and immediately 294 | apply hard-coded decomposition with respect to the backend basis gates. 295 | Note that this decomposition ignores global phase. 296 | 297 | This decomposition guarantees constant gate duration per every Clifford. 298 | """ 299 | if not self.backend: 300 | return [elm.to_instruction()] 301 | else: 302 | basis_gates = self.backend.configuration().basis_gates 303 | # First decompose into Euler angle rotations. 304 | theta, phi, lam = self._zyz_decomposition(elm.to_matrix()) 305 | 306 | if all(op in basis_gates for op in ("sx", "rz")): 307 | return [ 308 | RZGate(lam), 309 | SXGate(), 310 | RZGate(theta + math.pi), 311 | SXGate(), 312 | RZGate(phi - math.pi), 313 | ] 314 | if "u" in basis_gates: 315 | return [UGate(theta, phi, lam)] 316 | raise QiskitError( 317 | f"Current decomposition mechanism doesn't support basis gates {basis_gates}." 318 | ) 319 | 320 | def _zyz_decomposition(self, mat: np.ndarray): 321 | # This code is copied from 322 | # qiskit.quantum_info.synthesis.one_qubit_decompose.OneQubitEulerDecomposer 323 | su_mat = det(mat) ** (-0.5) * mat 324 | theta = 2 * math.atan2(abs(su_mat[1, 0]), abs(su_mat[0, 0])) 325 | phiplambda2 = cmath.phase(su_mat[1, 1]) 326 | phimlambda2 = cmath.phase(su_mat[1, 0]) 327 | phi = phiplambda2 + phimlambda2 328 | lam = phiplambda2 - phimlambda2 329 | 330 | return theta, phi, lam 331 | 332 | 333 | class DynamicCircuitsRBAnalysis(SubDecayFit): 334 | def __init__( 335 | self, 336 | physical_qubits, 337 | ff_operations, 338 | plot_measured_qubit=True, 339 | plot_summary=True, 340 | ): 341 | super().__init__() 342 | self.physical_qubits = physical_qubits 343 | self.ff_operations = ff_operations 344 | self.plot_summary = plot_summary 345 | self.plot_measured_qubit = plot_measured_qubit 346 | 347 | def _run_analysis( 348 | self, 349 | experiment_data, 350 | ): 351 | analysis_results, figs = [], [] 352 | q_m = self.physical_qubits[-1] 353 | for ff_type in self.ff_operations: 354 | for i, q in enumerate(self.physical_qubits): 355 | name = f"{ff_type}(Q{q}_M{q_m})" 356 | self.set_options( 357 | data_processor=DataProcessor( 358 | "counts", [MarginalizeCounts({i}), Probability("0")] 359 | ), 360 | result_parameters=[curve.ParameterRepr("alpha", name)], 361 | filter_data={"ff_type": ff_type}, 362 | ) 363 | self._name = name 364 | self.plotter.set_figure_options( 365 | xlabel="Number of FF operation", 366 | ylabel="P(0)", 367 | figure_title=f"Data qubit: {q}, Measured qubit: {q_m} Operation: {ff_type}", 368 | ) 369 | analysis_result, fig = super()._run_analysis(experiment_data) 370 | analysis_results += analysis_result 371 | if q == q_m and not self.plot_measured_qubit: 372 | continue 373 | figs += fig 374 | 375 | if self.plot_summary: 376 | results_fig = Figure(figsize=(6, 4)) 377 | results_separate_fig = Figure(figsize=(len(self.physical_qubits) * 1.4, 4)) 378 | _ = FigureCanvasSVG(results_fig) 379 | _ = FigureCanvasSVG(results_separate_fig) 380 | ax = results_fig.subplots(1, 1) 381 | axs = results_separate_fig.subplots(1, len(self.physical_qubits)) 382 | x = np.arange(len(self.physical_qubits)) 383 | x_ticks = [f"Q{q}" for q in self.physical_qubits] 384 | for ff_type in self.ff_operations: 385 | ys, y_errs = [], [] 386 | for i, q in enumerate(self.physical_qubits): 387 | alpha = next( 388 | filter( 389 | lambda res: res.name == f"{ff_type}(Q{q}_M{q_m})", 390 | analysis_results, 391 | ) 392 | ) 393 | y, y_err = alpha.value.n, alpha.value.s 394 | ys.append(y) 395 | y_errs.append(y_err) 396 | axs[i].errorbar( 397 | [1], 398 | y, 399 | yerr=y_err, 400 | fmt="o", 401 | alpha=0.5, 402 | capsize=4, 403 | markersize=5, 404 | label=ff_type, 405 | ) 406 | ax.errorbar( 407 | x, 408 | ys, 409 | yerr=y_errs, 410 | fmt="o", 411 | alpha=0.5, 412 | capsize=4, 413 | markersize=5, 414 | label=ff_type, 415 | ) 416 | ax.legend() 417 | ax.set_xticks(x, x_ticks) 418 | ax.set_title(f"Measured qubit: {q_m}") 419 | for i, q in enumerate(self.physical_qubits): 420 | axs[i].set_xticks([1], [f"Q{q}"]) 421 | axs[-1].set_xticks([1], [f"Q{q_m}:M"]) 422 | axs[-1].legend(bbox_to_anchor=(1.05, 1.0), loc="upper left") 423 | results_separate_fig.tight_layout() 424 | figs += [results_fig, results_separate_fig] 425 | 426 | return analysis_results, figs 427 | 428 | @classmethod 429 | def _default_options(cls): 430 | default_options = super()._default_options() 431 | default_options.plot_raw_data = True 432 | default_options.average_method = "sample" 433 | 434 | return default_options 435 | --------------------------------------------------------------------------------