diff --git a/.github/workflows/feature-branches.yml b/.github/workflows/feature-branches.yml
index b0570aa53..5b6712629 100644
--- a/.github/workflows/feature-branches.yml
+++ b/.github/workflows/feature-branches.yml
@@ -16,7 +16,7 @@ on:
jobs:
build:
strategy:
- # fail-fast: true is OK here
+ fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest] # No Mac
python-version: [3.8, '3.11'] # Only extremal Python versions
diff --git a/.gitignore b/.gitignore
index acf96801b..40e9529f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,7 @@
.ipynb_checkpoints
test/test_packages/temp_test_files/*
*_checkpoints/
+*_checkpoint/
jupyter_notebooks/**/offline
test/test_packages/offline
hooks/etc/permissions.yml
@@ -62,6 +63,7 @@ jupyter_notebooks/Tutorials/tutorial_files/exampleBriefReport
jupyter_notebooks/Tutorials/tutorial_files/*.ipynb
jupyter_notebooks/Tutorials/tutorial_files/tempTest
jupyter_notebooks/Tutorials/tutorial_files/*checkpoints
+jupyter_notebooks/Tutorials/objects/advanced/test_ibmq*
jupyter_notebooks/Tutorials/tutorial_files/test_mirror_benchmark
diff --git a/jupyter_notebooks/Tutorials/objects/advanced/IBMQExperiment.ipynb b/jupyter_notebooks/Tutorials/objects/advanced/IBMQExperiment.ipynb
index bc0739218..28ca273b8 100644
--- a/jupyter_notebooks/Tutorials/objects/advanced/IBMQExperiment.ipynb
+++ b/jupyter_notebooks/Tutorials/objects/advanced/IBMQExperiment.ipynb
@@ -7,20 +7,26 @@
"# Running experiments on IBM Q Processors\n",
"This tutorial will demonstrate how to run an experiment on IBM Q Processors. To do so you will need [QisKit](https://qiskit.org/) installed and an [IBM Q account](https://quantum-computing.ibm.com/).\n",
"\n",
+ "There have been major changes to `IBMQExperiment` as of pygsti 0.9.13. This is due to Qiskit 1.0 and subsequent deprecations of V1 backends and `qiskit-ibm-provider`. The `IBMQExperiment` class only supports V2 backends and is based on `qiskit-ibm-runtime`.\n",
+ "\n",
+ "For details on how to migrate from `qiskit<1` or `qiskit-ibm-provider`, see [this blog post](https://www.ibm.com/quantum/blog/transition-to-1), [this Qiskit 1.0 migration guide](https://docs.quantum.ibm.com/api/migration-guides/qiskit-1.0-features), or [this Qiskit Runtime migration guide](https://docs.quantum.ibm.com/api/migration-guides/qiskit-runtime).\n",
+ "\n",
"This was last run with QisKit versions:"
]
},
{
- "cell_type": "raw",
+ "cell_type": "code",
+ "execution_count": null,
"metadata": {},
+ "outputs": [],
"source": [
- "qiskit.__qiskit_version__ = {'qiskit-terra': '0.25.3', 'qiskit': '0.44.3', 'qiskit-aer': None, 'qiskit-ignis': None, 'qiskit-ibmq-provider': '0.20.2', 'qiskit-nature': None, 'qiskit-finance': None, 'qiskit-optimization': None, 'qiskit-machine-learning': None}\n",
- "qiskit_ibm_provider.__version__ = '0.7.2'"
+ "#qiskit.__version__ = '1.1.1'\n",
+ "#qiskit_ibm_runtime.__version__ = '0.25.0'"
]
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": null,
"metadata": {
"tags": []
},
@@ -42,7 +48,8 @@
},
"outputs": [],
"source": [
- "from qiskit_ibm_provider import IBMProvider"
+ "from qiskit_ibm_runtime import QiskitRuntimeService\n",
+ "from qiskit_ibm_runtime.fake_provider import FakeSherbrooke"
]
},
{
@@ -55,17 +62,21 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# If your first time, you may need to initialize your account with your IBMQ API token\n",
- "#IBMProvider.save_account(token=\"\")"
+ "\n",
+ "# You can also specify instances (i.e. \"ibm-q/open/main\" is the default instance)\n",
+ "# You can also save/load named accounts for different instances, etc. See save_account docs for more information.\n",
+ "\n",
+ "#QiskitRuntimeService.save_account(channel=\"ibm_quantum\", token=\"\", overwrite=True, set_as_default=True)"
]
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"metadata": {
"tags": [
"nbval-skip"
@@ -73,16 +84,47 @@
},
"outputs": [],
"source": [
- "# You can use your own instance if you have different credentials\n",
- "#provider = IBMProvider(instance='ibm-q/open/main')\n",
+ "# Once credentials are saved, the service can be loaded each time:\n",
+ "service = QiskitRuntimeService(channel=\"ibm_quantum\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "nbval-skip"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "# You can list all the available backends to ensure your instance is running properly\n",
+ "service.backends()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "nbval-skip"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "# Can use a physical device...\n",
+ "#backend = service.backend('ibm_sherbrooke')\n",
"\n",
- "# You can leave it blank to use the default for your account\n",
- "provider = IBMProvider()"
+ "# Can also ask for the least busy physical device\n",
+ "backend = service.least_busy()\n",
+ "\n",
+ "# ... or can use a simulated fake backend\n",
+ "sim_backend = FakeSherbrooke()"
]
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": null,
"metadata": {
"tags": [
"nbval-skip"
@@ -90,8 +132,8 @@
},
"outputs": [],
"source": [
- "dev_name = 'ibm_lagos'\n",
- "backend = provider.get_backend(dev_name)"
+ "# Let's see which backend is the least busy!\n",
+ "print(backend)"
]
},
{
@@ -112,17 +154,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [],
- "source": [
- "# Using the configuration files in pygsti.extras.devices (legacy and may not be up-to-date)\n",
- "device = ExperimentalDevice.from_legacy_device('ibmq_bogota')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {
"tags": [
"nbval-skip"
@@ -130,6 +162,9 @@
},
"outputs": [],
"source": [
+ "# Using the configuration files in pygsti.extras.devices (legacy and may not be up-to-date)\n",
+ "#device = ExperimentalDevice.from_legacy_device('ibmq_bogota')\n",
+ "\n",
"# Using the active backend to pull current device specification\n",
"device = ExperimentalDevice.from_qiskit_backend(backend)"
]
@@ -164,17 +199,35 @@
"outputs": [],
"source": [
"#circuit design parameters\n",
- "depths = [0, 2, 4, 16, 32, 64]\n",
+ "depths = [0, 2, 4, 16]\n",
"circuits_per_shape = 20\n",
"\n",
"# dict setting the circuit widths (# qubits) you want to probe \n",
"# and the qubits you want to use at each width\n",
"# You can use device.graph.edges() to make sure these are connected components\n",
+ "def get_N_connected_qubits(device, N, starting_qubits = None):\n",
+ " if starting_qubits is None:\n",
+ " starting_qubits = []\n",
+ " qubits = set(starting_qubits)\n",
+ "\n",
+ " for edge in device.graph.edges():\n",
+ " # Check if connected, and add if so\n",
+ " if not len(qubits) or edge[0] in qubits or edge[1] in qubits:\n",
+ " qubits.update(edge)\n",
+ " \n",
+ " # Check if we can break\n",
+ " if len(qubits) >= N:\n",
+ " break\n",
+ " \n",
+ " return list(qubits)[:N]\n",
+ "\n",
+ "max_width = 4\n",
+ "selected_qubits = get_N_connected_qubits(device, max_width)\n",
+ "print(f\"Selected qubits {selected_qubits} for device {backend.name}\")\n",
+ "\n",
"qubit_lists = {}\n",
- "qubit_lists[1] = [('Q0',),]\n",
- "qubit_lists[2] = [('Q0', 'Q1'),]\n",
- "qubit_lists[3] = [('Q0', 'Q1', 'Q2'),]\n",
- "qubit_lists[4] = [('Q0', 'Q1', 'Q2', 'Q3')]\n",
+ "for i in range(max_width):\n",
+ " qubit_lists[i] = [tuple(selected_qubits[:i+1])]\n",
"\n",
"widths = list(qubit_lists.keys())\n",
"\n",
@@ -226,9 +279,9 @@
"metadata": {},
"source": [
"## Running on IBM Q\n",
- "We're now ready to run on the IBM Q processor. We do this using an `IBMQExperiment` object, which \n",
+ "We're now ready to run on the IBM Q processor. We do this using an `IBMQExperiment` object.\n",
"\n",
- "First it converts pyGSTi circuits into jobs that can be submitted to IBM Q. **This step includes transpiling of the pyGSTi circuits into OpenQASM** (and then into QisKit objects)."
+ "We can enable checkpointing for `IBMQExperiment` objects by providing a path. This is the default and is recommended! We are also overriding old checkpoints here to ensure we have a clean starting point."
]
},
{
@@ -241,14 +294,17 @@
},
"outputs": [],
"source": [
- "exp = ibmq.IBMQExperiment(combined_edesign, pspec, circuits_per_batch=75, num_shots=1024)"
+ "exp = ibmq.IBMQExperiment(combined_edesign, pspec, circuits_per_batch=75, num_shots=1024, seed=20231201,\n",
+ " checkpoint_path='test_ibmq', checkpoint_override=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "We're now ready to submit this experiment to IBM Q."
+ "First we convert pyGSTi circuits into jobs that can be submitted to IBM Q. **This step includes transpiling of the pyGSTi circuits into OpenQASM** (and then into QisKit objects).\n",
+ "\n",
+ "This can now be done in parallel (with progress bars) using the `max_workers` kwarg!"
]
},
{
@@ -261,14 +317,32 @@
},
"outputs": [],
"source": [
- "exp.submit(backend)"
+ "exp.transpile(backend, num_workers=4)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "nbval-skip"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "# We can simulate having been interrupted by removing the last few transpiled batches\n",
+ "del exp.qiskit_isa_circuit_batches[3:]\n",
+ "\n",
+ "# And now transpilation should only redo the missing batches\n",
+ "# We don't need to reprovide the options as they are saved by the first transpile call\n",
+ "exp.transpile(backend)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "You can then monitor the jobs. If get an error message, you can query the error using `exp['qjob'][i].error_message()` for batch `i`."
+ "If the `IBMQExperiment` object is lost and needs to be reloaded (i.e. notebook restarts), it can be loaded from file now."
]
},
{
@@ -281,14 +355,14 @@
},
"outputs": [],
"source": [
- "exp.monitor()"
+ "exp2 = ibmq.IBMQExperiment.from_dir('test_ibmq')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "You can then grab the results, **Once you see that all the jobs are complete** (`.retrieve_results()` will just hang if the jobs have not yet completed)."
+ "We're now ready to submit this experiment to IBM Q.Note that we can submit using a different backend than what was used to generate the experiment design. In general, it is not a good idea to mix and match backends for physical devices unless they have the exact same connectivity and qubit labeling; however, it **is** often useful for debugging purposes to use the simulator backend rather than a physical device."
]
},
{
@@ -301,14 +375,14 @@
},
"outputs": [],
"source": [
- "exp.retrieve_results()"
+ "exp2.submit(backend)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "This `IBMQExperiment` object now contains the results of your experiment. It contains much of the information about exactly what was submitted to IBM Q, and raw results objects that IBM Q returned."
+ "You can then monitor the jobs. If get an error message, you can query the error using `exp.qjobs[i].error_message()` for batch `i`."
]
},
{
@@ -321,14 +395,14 @@
},
"outputs": [],
"source": [
- "print(exp.keys())"
+ "exp2.monitor()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "But, most importantly, it contains the data formatted into a pyGSTi `ProtocolData` object, which is the packaged-up data that pyGSTi analysis proctols use."
+ "Again, the `IBMQExperiment` can be loaded from file if checkpointing is being used. The Qiskit RuntimeJobs are not serialized; however, they can be retrieved from the IBMQ service from their job ids. In order to do this, pass `regen_jobs=True` and a `service` to the `from_dir()` call."
]
},
{
@@ -341,14 +415,27 @@
},
"outputs": [],
"source": [
- "data = exp['data']"
+ "exp3 = ibmq.IBMQExperiment.from_dir('test_ibmq', regen_jobs=True, service=service)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "nbval-skip"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "exp3.monitor()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "We can write this data to disk, which saves the `ProtocolData` in the standard pyGSTi format. It also pickles (or JSONs) up all of the additional information contained then `IBMQExperiment` object, e.g., the job IDs, in a subfolder `ibmqexperiment`."
+ "You can then grab the results, **Once you see that all the jobs are complete** (`.retrieve_results()` will just hang if the jobs have not yet completed)."
]
},
{
@@ -361,16 +448,14 @@
},
"outputs": [],
"source": [
- "exp.write('test_ibmq_experiment')"
+ "exp3.retrieve_results()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "If you only want to load the `ProtocolData` you can do this using pyGSTi's standard `io` functions. We can also load the `IBMQExperiment` object, which will skip unpickling any objects when the unpickling fails (e.g., due to changes in `QisKit`).\n",
- "\n",
- "New in '0.9.12': IBM jobs are no longer pickle-able. Instead, they will be retrieved from the server. However, this requires the provider to be passed in at load time."
+ "This `IBMQExperiment` object now contains the results of your experiment. It contains much of the information about exactly what was submitted to IBM Q, and raw results objects that IBM Q returned."
]
},
{
@@ -383,7 +468,15 @@
},
"outputs": [],
"source": [
- "loaded_exp = ibmq.IBMQExperiment.from_dir('test_ibmq_experiment', provider)"
+ "display(exp3.qjobs)\n",
+ "display(exp3.batch_results)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "But, most importantly, it contains the data formatted into a pyGSTi `ProtocolData` object, which is the packaged-up data that pyGSTi analysis proctols use."
]
},
{
@@ -396,15 +489,14 @@
},
"outputs": [],
"source": [
- "# Now we can run as before\n",
- "loaded_exp.monitor()"
+ "data = exp3.data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Analzing the results\n",
+ "## Analyzing the results\n",
"Because `retrieve_results()` has formatted the data into a `ProctocolData` object, we can just hand this to the analysis protocol(s) that are designed for analyzing this type of data. Here we'll analyze this data using a standard RB curve-fitting analysis."
]
},
@@ -477,7 +569,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.5"
+ "version": "3.11.9"
}
},
"nbformat": 4,
diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py
index 2ccb7aaae..8bbcc9c63 100644
--- a/pygsti/circuits/circuit.py
+++ b/pygsti/circuits/circuit.py
@@ -4163,7 +4163,7 @@ def convert_to_openqasm(self, num_qubits=None,
gatename_conversion=None, qubit_conversion=None,
block_between_layers=True,
block_between_gates=False,
- include_delay_on_idle=True,
+ include_delay_on_idle=False,
gateargs_map=None): # TODO
"""
Converts this circuit to an openqasm string.
@@ -4208,9 +4208,9 @@ def convert_to_openqasm(self, num_qubits=None,
include_delay_on_idle: bool, optional
When `True`, includes a delay operation on implicit idles in each layer, as per
Qiskit's OpenQASM 2.0 convention after the deprecation of the id operation.
- Defaults to True, which is commensurate with legacy usage of this function.
- However, this can now be set to False to avoid this behaviour if generating
+ Defaults to False, to avoid this behaviour if generating
actually valid OpenQASM (with no opaque delay instruction) is desired.
+ Can be set to True, which is commensurate with legacy usage of this function.
gateargs_map : dict, optional
If not None, a dict that maps strings (representing pyGSTi standard gate names) to
@@ -4347,6 +4347,7 @@ def convert_to_openqasm(self, num_qubits=None,
# Delay 0 works because of the barrier
# In OpenQASM3, this should probably be a stretch instead
openqasm += 'delay(0)' + ' q[' + str(qubit_conversion[q]) + '];\n'
+ pass
# Add in a barrier after every circuit layer if block_between_layers==True.
# Including barriers is critical for QCVV testing, circuits should usually
diff --git a/pygsti/extras/devices/experimentaldevice.py b/pygsti/extras/devices/experimentaldevice.py
index 9b9bcdefc..3749fe92f 100644
--- a/pygsti/extras/devices/experimentaldevice.py
+++ b/pygsti/extras/devices/experimentaldevice.py
@@ -63,22 +63,13 @@ def from_qiskit_backend(cls, backend, gate_mapping=None):
-------
Initialized ExperimentalDevice
"""
- try:
- props = backend.properties().to_dict()
-
- qubits = [f'Q{i}' for i in range(len(props['qubits']))]
- # Technically we could read all the gates off and create the actual native pspec
- # This is not how devices functioned in the past, but maybe it is useful. Thoughts?
- edges = [[f'Q{i}' for i in g['qubits']] for g in props['gates'] if g['gate'] == 'cx']
- graph = _QubitGraph(qubits, initial_edges=edges)
- except AttributeError:
- # Probably the simulator backend 32 qubits max with arbitrary connectivity
- qubits = [f'Q{i}' for i in range(32)]
- edges = []
- for i in range(32):
- for j in range(i+1, 32):
- edges.extend([(f'Q{i}', f'Q{j}'), (f'Q{j}', f'Q{i}')])
- graph = _QubitGraph(qubits, initial_edges=edges)
+ # Get qubits
+ num_qubits = backend.num_qubits
+ qubits = [f'Q{i}' for i in range(num_qubits)]
+
+ # Get qubit connectivity
+ edges = [[qubits[edge[0]], qubits[edge[1]]] for edge in backend.coupling_map]
+ graph = _QubitGraph(qubits, initial_edges=edges)
return cls(qubits, graph, gate_mapping)
diff --git a/pygsti/extras/ibmq/__init__.py b/pygsti/extras/ibmq/__init__.py
index 1ad464321..6cfb71200 100644
--- a/pygsti/extras/ibmq/__init__.py
+++ b/pygsti/extras/ibmq/__init__.py
@@ -1,4 +1,4 @@
-""" Exteneral Device Specifications Sub-package """
+""" IBMQ Experiment Sub-package """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
@@ -8,4 +8,4 @@
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
-from .ibmqcore import *
+from .ibmqexperiment import *
diff --git a/pygsti/extras/ibmq/ibmqcore.py b/pygsti/extras/ibmq/ibmqcore.py
deleted file mode 100644
index 74b36c836..000000000
--- a/pygsti/extras/ibmq/ibmqcore.py
+++ /dev/null
@@ -1,433 +0,0 @@
-""" Functions for sending experiments to IBMQ devices and converting the results to pyGSTi objects """
-#***************************************************************************************************
-# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
-# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
-# in this software.
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
-# in compliance with the License. You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
-#***************************************************************************************************
-
-from ... import data as _data
-from ...protocols import ProtocolData as _ProtocolData
-import numpy as _np
-import time as _time
-import json as _json
-import pickle as _pickle
-import os as _os
-import warnings as _warnings
-
-try: import qiskit as _qiskit
-except: _qiskit = None
-
-# Most recent version of QisKit that this has been tested on:
-#qiskit.__qiskit_version__ = {
-# 'qiskit-terra': '0.25.3',
-# 'qiskit': '0.44.3',
-# 'qiskit-aer': None,
-# 'qiskit-ignis': None,
-# 'qiskit-ibmq-provider': '0.20.2',
-# 'qiskit-nature': None,
-# 'qiskit-finance': None,
-# 'qiskit-optimization': None,
-# 'qiskit-machine-learning': None
-#}
-#qiskit_ibm_provider.__version__ = '0.7.2'
-
-_attribute_to_json = ['remove_duplicates', 'randomized_order', 'circuits_per_batch', 'num_shots', 'job_ids']
-_attribute_to_pickle = ['pspec', 'pygsti_circuits', 'pygsti_openqasm_circuits',
- 'qiskit_QuantumCircuits', 'qiskit_QuantumCircuits_as_openqasm',
- 'submit_time_calibration_data', 'qobj', 'batch_result_object'
- ]
-
-
-def reverse_dict_key_bits(counts_dict):
- new_dict = {}
- for key in counts_dict.keys():
- new_dict[key[::-1]] = counts_dict[key]
- return new_dict
-
-
-# NOTE: This is probably duplicative of some other code in pyGSTi
-def partial_trace(ordered_target_indices, input_dict):
- output_dict = {}
- for bitstring in input_dict.keys():
- new_string = ''
- for index in ordered_target_indices:
- new_string += bitstring[index]
- try:
- output_dict[new_string] += input_dict[bitstring]
- except:
- output_dict[new_string] = input_dict[bitstring]
- return output_dict
-
-
-def q_list_to_ordered_target_indices(q_list, num_qubits):
- if q_list is None:
- return list(range(num_qubits))
- else:
- output = []
- for q in q_list:
- assert q[0] == 'Q'
- output.append(int(q[1:]))
- return output
-
-
-class IBMQExperiment(dict):
-
- def __init__(self, edesign, pspec, remove_duplicates=True, randomized_order=True, circuits_per_batch=75,
- num_shots=1024):
- """
- A object that converts pyGSTi ExperimentDesigns into jobs to be submitted to IBM Q, submits these
- jobs to IBM Q and receives the results.
-
- Parameters
- ----------
- edesign: ExperimentDesign
- The ExperimentDesign to be run on IBM Q. This can be a combined experiment design (e.g., a GST
- design combined with an RB design).
-
- pspec: QubitProcessorSpec
- A QubitProcessorSpec that represents the IBM Q device being used. This can be created using the
- extras.devices.create_processor_spec(). The ProcessorSpecs qubit ordering *must* correspond
- to that of the IBM device (which will be the case if you create it using that function).
- I.e., pspecs qubits should be labelled Q0 through Qn-1 and the labelling of the qubits
- should agree with IBM's labelling.
-
- remove_duplicates: bool, optional
- If true, each distinct circuit in `edesign` is run only once. If false, if a circuit is
- repeated multiple times in `edesign` it is run multiple times.
-
- randomized_order: bool, optional
- Whether or not to randomize the order of the circuits in `edesign` before turning them
- into jobs to be submitted to IBM Q.
-
- circuits_per_batch: int, optional
- The circuits in edesign are divded into batches, each containing at most this many
- circuits. The default value of 75 is (or was) the maximal value allowed on the public
- IBM Q devices.
-
- num_shots: int, optional
- The number of samples from / repeats of each circuit.
-
- Returns
- -------
- IBMQExperiment
- An object containing jobs to be submitted to IBM Q, which can then be submitted
- using the methods .submit() and whose results can be grabbed from IBM Q using
- the method .retrieve_results(). This object has dictionary-like access for all of
- the objects it contains (e.g., ['qobj'] is a list of the objects to be submitted to
- IBM Q).
-
- """
-
- self['edesign'] = edesign
- self['pspec'] = pspec
- self['remove_duplicates'] = remove_duplicates
- self['randomized_order'] = randomized_order
- self['circuits_per_batch'] = circuits_per_batch
- self['num_shots'] = num_shots
- # Populated when submitting to IBM Q with .submit()
- self['qjob'] = None
- self['job_ids'] = None
- # Populated when grabbing results from IBM Q with .retrieve_results()
- self['batch_result_object'] = None
- self['data'] = None
-
- circuits = edesign.all_circuits_needing_data.copy()
- if randomized_order:
- if remove_duplicates:
- circuits = list(set(circuits))
- _np.random.shuffle(circuits)
- else:
- assert(not remove_duplicates), "Can only remove duplicates if randomizing order!"
-
- num_batches = int(_np.ceil(len(circuits) / circuits_per_batch))
-
- self['pygsti_circuits'] = [[] for i in range(num_batches)]
- self['pygsti_openqasm_circuits'] = [[] for i in range(num_batches)]
- self['qiskit_QuantumCircuits'] = [[] for i in range(num_batches)]
- self['qiskit_QuantumCircuits_as_openqasm'] = [[] for i in range(num_batches)]
- self['submit_time_calibration_data'] = []
- self['qobj'] = []
-
- batch_idx = 0
- for circ_idx, circ in enumerate(circuits):
- self['pygsti_circuits'][batch_idx].append(circ)
- if len(self['pygsti_circuits'][batch_idx]) == circuits_per_batch:
- batch_idx += 1
-
- #create Qiskit quantum circuit for each circuit defined in experiment list
- total_num = 0
-
- #start = _time.time()
- for batch_idx, circuit_batch in enumerate(self['pygsti_circuits']):
- print("Constructing job for circuit batch {} of {}".format(batch_idx + 1, num_batches))
- #openqasm_circuit_ids = []
- for circ_idx, circ in enumerate(circuit_batch):
- pygsti_openqasm_circ = circ.convert_to_openqasm(num_qubits=pspec.num_qubits,
- standard_gates_version='x-sx-rz')
- qiskit_qc = _qiskit.QuantumCircuit.from_qasm_str(pygsti_openqasm_circ)
-
- self['pygsti_openqasm_circuits'][batch_idx].append(pygsti_openqasm_circ)
- self['qiskit_QuantumCircuits'][batch_idx].append(qiskit_qc)
- self['qiskit_QuantumCircuits_as_openqasm'][batch_idx].append(qiskit_qc.qasm())
-
- #print(batch_idx, circ_idx, len(submitted_openqasm_circuits), total_num)
- total_num += 1
-
- self['qobj'].append(_qiskit.compiler.assemble(self['qiskit_QuantumCircuits'][batch_idx], shots=num_shots))
-
- def submit(self, ibmq_backend, start=None, stop=None, ignore_job_limit=True,
- wait_time=1, wait_steps=10):
- """
- Submits the jobs to IBM Q, that implements the experiment specified by the ExperimentDesign
- used to create this object.
-
- Parameters
- ----------
- ibmq_backend: qiskit.providers.ibmq.ibmqbackend.IBMQBackend
- The IBM Q backend to submit the jobs to. Should be the backend corresponding to the
- processor that this experiment has been designed for.
-
- start: int, optional
- Batch index to start submission (inclusive). Defaults to None,
- which will start submission on the first unsubmitted job.
- Jobs can be resubmitted by manually specifying this,
- i.e. start=0 will start resubmitting jobs from the beginning.
-
- stop: int, optional
- Batch index to stop submission (exclusive). Defaults to None,
- which will submit as many jobs as possible given the backend's
- maximum job limit.
-
- ignore_job_limit: bool, optional
- If True, then stop is set to submit all remaining jobs. This is set
- as True to maintain backwards compatibility. Note that is more jobs
- are needed than the max limit, this will enter a wait loop until all
- jobs have been successfully submitted.
-
- wait_time: int
- Number of seconds for each waiting step.
-
- wait_steps: int
- Number of steps to take before retrying job submission.
-
- Returns
- -------
- None
- """
-
- #Get the backend version
- backend_version = ibmq_backend.version
-
- total_waits = 0
- self['qjob'] = [] if self['qjob'] is None else self['qjob']
- self['job_ids'] = [] if self['job_ids'] is None else self['job_ids']
-
- # Set start and stop to submit the next unsubmitted jobs if not specified
- if start is None:
- start = len(self['qjob'])
-
- if stop is not None:
- stop = min(stop, len(self['qobj']))
- elif ignore_job_limit:
- stop = len(self['qobj'])
- else:
- job_limit = ibmq_backend.job_limit()
- allowed_jobs = job_limit.maximum_jobs - job_limit.active_jobs
- if start + allowed_jobs < len(self['qobj']):
- print(f'Given job limit and active jobs, only {allowed_jobs} can be submitted')
-
- stop = min(start + allowed_jobs, len(self['qobj']))
-
- #if the backend version is 1 I believe this should correspond to the use of the legacy
- #qiskit-ibmq-provider API which supports passing in Qobj objects for specifying experiments
- #if the backend version is 2 I believe this should correspond to the new API in qiskit-ibm-provider.
- #This new API doesn't support passing in Qobjs into the run method for backends, so we need
- #to pass in the list of QuantumCircuit objects directly.
- if backend_version == 1:
- batch_iterator = enumerate(self['qobj'])
- elif backend_version >= 2:
- batch_iterator = enumerate(self['qiskit_QuantumCircuits'])
-
- for batch_idx, batch in batch_iterator:
- if batch_idx < start or batch_idx >= stop:
- continue
-
- print("Submitting batch {}".format(batch_idx + 1))
- submit_status = False
- batch_waits = 0
- while not submit_status:
- try:
- backend_properties = ibmq_backend.properties()
- #If using a simulator backend then backend_properties is None
- if not ibmq_backend.simulator:
- self['submit_time_calibration_data'].append(backend_properties.to_dict())
- #if using the new API we need to pass in the number of shots.
- if backend_version == 1:
- self['qjob'].append(ibmq_backend.run(batch))
- else:
- self['qjob'].append(ibmq_backend.run(batch, shots = self['num_shots']))
-
- status = self['qjob'][-1].status()
- initializing = True
- initializing_steps = 0
- while initializing:
- if status.name == 'INITIALIZING' or status.name == 'VALIDATING':
- #print(status)
- status = self['qjob'][-1].status()
- print(' - {} (query {})'.format(status, initializing_steps))
- _time.sleep(1)
- initializing_steps += 1
- else:
- initializing = False
- #print(" -Done intializing. Job status is {}".format(status.name))
- #print(status)
- try:
- job_id = self['qjob'][-1].job_id()
- print(' - Job ID is {}'.format(job_id))
- self['job_ids'].append(job_id)
- except:
- print(' - Failed to get job_id.')
- self['job_ids'].append(None)
- try:
- print(' - Queue position is {}'.format(self['qjob'][-1].queue_info().position))
- except:
- print(' - Failed to get queue position for batch {}'.format(batch_idx + 1))
- submit_status = True
- except Exception as ex:
- template = "An exception of type {0} occurred. Arguments:\n{1!r}"
- message = template.format(type(ex).__name__, ex.args)
- print(message)
- try:
- print('Machine status is {}.'.format(ibmq_backend.status().status_msg))
- except Exception as ex1:
- print('Failed to get machine status!')
- template = "An exception of type {0} occurred. Arguments:\n{1!r}"
- message = template.format(type(ex).__name__, ex1.args)
- print(message)
- total_waits += 1
- batch_waits += 1
- print("This batch has failed {0} times and there have been {1} total failures".format(
- batch_waits, total_waits))
- print('Waiting ', end='')
- for step in range(wait_steps):
- print('{} '.format(step), end='')
- _time.sleep(wait_time)
- print()
-
- def monitor(self):
- """
- Queries IBM Q for the status of the jobs.
- """
- for counter, qjob in enumerate(self['qjob']):
- status = qjob.status()
- print("Batch {}: {}".format(counter + 1, status))
- if status.name == 'QUEUED':
- print(' - Queue position is {}'.format(qjob.queue_info().position))
-
- # Print unsubmitted for any entries in qobj but not qjob
- for counter in range(len(self['qjob']), len(self['qobj'])):
- print("Batch {}: NOT SUBMITTED".format(counter + 1))
-
- def retrieve_results(self):
- """
- Gets the results of the completed jobs from IBM Q, and processes
- them into a pyGSTi DataProtocol object (stored as the key 'data'),
- which can then be used in pyGSTi data analysis routines (e.g., if this
- was a GST experiment, it can input into a GST protocol object that will
- analyze the data).
- """
- self['batch_result_object'] = []
- #get results from backend jobs and add to dict
- ds = _data.DataSet()
- for exp_idx, qjob in enumerate(self['qjob']):
- print("Querying IBMQ for results objects for batch {}...".format(exp_idx + 1))
- batch_result = qjob.result()
- self['batch_result_object'].append(batch_result)
- #exp_dict['batch_data'] = []
- for i, circ in enumerate(self['pygsti_circuits'][exp_idx]):
- ordered_target_indices = [self['pspec'].qubit_labels.index(q) for q in circ.line_labels]
- counts_data = partial_trace(ordered_target_indices, reverse_dict_key_bits(batch_result.get_counts(i)))
- #exp_dict['batch_data'].append(counts_data)
- ds.add_count_dict(circ, counts_data)
-
- self['data'] = _ProtocolData(self['edesign'], ds)
-
- def write(self, dirname=None):
- """
- Writes to disk, storing both the pyGSTi DataProtocol object in pyGSTi's standard
- format and saving all of the IBM Q submission information stored in this object,
- written into the subdirectory 'ibmqexperiment'.
-
- Parameters
- ----------
- dirname : str
- The *root* directory to write into. This directory will have
- an 'edesign' subdirectory, which will be created if needed and
- overwritten if present. If None, then the path this object
- was loaded from is used (if this object wasn't loaded from disk,
- an error is raised).
-
- """
- if dirname is None:
- dirname = self['edesign']._loaded_from
- if dirname is None: raise ValueError("`dirname` must be given because there's no default directory")
-
- self['data'].write(dirname)
-
- dict_to_json = {atr: self[atr] for atr in _attribute_to_json}
-
- _os.mkdir(dirname + '/ibmqexperiment')
- with open(dirname + '/ibmqexperiment/meta.json', 'w') as f:
- _json.dump(dict_to_json, f, indent=4)
-
- for atr in _attribute_to_pickle:
- with open(dirname + '/ibmqexperiment/{}.pkl'.format(atr), 'wb') as f:
- _pickle.dump(self[atr], f)
-
- @classmethod
- def from_dir(cls, dirname, provider=None):
- """
- Initialize a new IBMQExperiment object from `dirname`.
-
- Parameters
- ----------
- dirname : str
- The directory name.
-
- provider: IBMProvider
- Provider used to retrieve qjob objects from job_ids
-
- Returns
- -------
- IBMQExperiment
- """
- ret = cls.__new__(cls)
- with open(dirname + '/ibmqexperiment/meta.json', 'r') as f:
- from_json = _json.load(f)
- ret.update(from_json)
-
- for atr in _attribute_to_pickle:
- with open(dirname + '/ibmqexperiment/{}.pkl'.format(atr), 'rb') as f:
- try:
- ret[atr] = _pickle.load(f)
- except:
- _warnings.warn("Couldn't unpickle {}, so skipping this attribute.".format(atr))
- ret[atr] = None
-
- if provider is None:
- _warnings.warn("No provider specified, cannot retrieve IBM jobs")
- else:
- ret['qjob'] = []
- for i, jid in enumerate(ret['job_ids']):
- print(f"Loading job {i+1}/{len(ret['job_ids'])}...")
- ret['qjob'].append(provider.backend.retrieve_job(jid))
-
- try:
- ret['data'] = _ProtocolData.from_dir(dirname)
- except:
- pass
-
- return ret
diff --git a/pygsti/extras/ibmq/ibmqexperiment.py b/pygsti/extras/ibmq/ibmqexperiment.py
new file mode 100644
index 000000000..27996265f
--- /dev/null
+++ b/pygsti/extras/ibmq/ibmqexperiment.py
@@ -0,0 +1,636 @@
+""" Functions for sending experiments to IBMQ devices and converting the results to pyGSTi objects """
+#***************************************************************************************************
+# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
+# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
+# in this software.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+# in compliance with the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
+#***************************************************************************************************
+
+from datetime import datetime as _datetime
+from functools import partial as _partial
+import json as _json
+import numpy as _np
+import os as _os
+from pathos import multiprocessing as _mp
+import pathlib as _pathlib
+import pickle as _pickle
+import time as _time
+import tqdm as _tqdm
+import warnings as _warnings
+
+# Try to load Qiskit
+try:
+ import qiskit as _qiskit
+ from qiskit.providers import JobStatus as _JobStatus
+except:
+ _qiskit = None
+
+# Try to load IBM Runtime
+try:
+ from qiskit_ibm_runtime import SamplerV2 as _Sampler
+ from qiskit_ibm_runtime import Session as _Session
+ from qiskit_ibm_runtime import RuntimeJobV2 as _RuntimeJobV2
+ from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager as _pass_manager
+except: _Sampler = None
+
+# Most recent version of QisKit that this has been tested on:
+#qiskit.__version__ = '1.1.1'
+#qiskit_ibm_runtime.__version__ = '0.25.0'
+# Note that qiskit<1.0 is going EOL in August 2024,
+# and v1 backends are also being deprecated (we now support only v2)
+# Also qiskit-ibm-provider is ALSO being deprecated,
+# so I'm only supporting runtime here
+
+try:
+ from bson import json_util as _json_util
+except ImportError:
+ _json_util = None
+
+from pygsti import data as _data, io as _io
+from pygsti.protocols import ProtocolData as _ProtocolData, HasProcessorSpec as _HasPSpec
+from pygsti.protocols.protocol import _TreeNode
+from pygsti.io import metadir as _metadir
+
+
+# Needs to be defined first for multiprocessing reasons
+def _transpile_batch(circs, pass_manager, qasm_convert_kwargs):
+ batch = []
+ for circ in circs:
+ # TODO: Replace this with direct to qiskit
+ pygsti_openqasm_circ = circ.convert_to_openqasm(**qasm_convert_kwargs)
+ qiskit_qc = _qiskit.QuantumCircuit.from_qasm_str(pygsti_openqasm_circ)
+ batch.append(qiskit_qc)
+
+ # Run pass manager on batch
+ return pass_manager.run(batch)
+
+
+class IBMQExperiment(_TreeNode, _HasPSpec):
+ """
+ A object that converts pyGSTi ExperimentDesigns into jobs to be submitted to IBM Q, submits these
+ jobs to IBM Q and receives the results.
+ """
+
+ @classmethod
+ def from_dir(cls, dirname, regen_jobs=False, service=None, new_checkpoint_path=None):
+ """
+ Initialize a new IBMQExperiment object from `dirname`.
+
+ Parameters
+ ----------
+ dirname : str
+ The directory name.
+
+ regen_jobs: bool, optional
+ Whether to recreate the RuntimeJobs from IBMQ based on the job ides.
+ Defaults to False. You should set this to True if you would like to
+ call monitor() or retrieve_results().
+
+ service: QiskitRuntimeService
+ Service used to retrieve RuntimeJobs from IBMQ based on job_ids
+ (if regen_jobs is True).
+
+ new_checkpoint_path: str, optional
+ A string for the path to use for writing intermediate checkpoint
+ files to disk. If None, this defaults to using the same checkpoint
+ as the serialized IBMQExperiment object. If provided, this will be
+ the new checkpoint path used moving forward. Note that this can be
+ the desired {dirname} for an eventual `write({dirname})` call, i.e. the
+ serialized IBMQExperiment checkpoint after a successful `retrieve_results()`
+ is equivalent to the serialized IBMQExperiment after `write()`.
+
+ Returns
+ -------
+ IBMQExperiment
+ """
+ p = _pathlib.Path(dirname)
+ edesign = _io.read_edesign_from_dir(dirname)
+
+ try:
+ exp_dir = p / 'ibmqexperiment'
+ attributes_from_meta = _io.load_meta_based_dir(exp_dir)
+
+ # Don't override checkpoint during this construction
+ ret = cls(edesign, None, disable_checkpointing=True)
+ ret.__dict__.update(attributes_from_meta)
+ ret.edesign = edesign
+ except KeyError:
+ _warnings.warn("Failed to load ibmqexperiment, falling back to old serialization format logic")
+
+ # Don't override checkpoint during this construction
+ ret = cls(edesign, None, disable_checkpointing=True)
+ with open(p / 'ibmqexperiment' / 'meta.json', 'r') as f:
+ from_json = _json.load(f)
+ ret.__dict__.update(from_json)
+
+ # Old keys to new class members
+ key_attr_map = {
+ 'pspec': ('processor_spec', None),
+ 'pygsti_circuits': ('pygsti_circuit_batches', []),
+ 'pygsti_openqasm_circuits': ('qasm_circuit_batches', []),
+ 'submit_time_calibration_data': ('submit_time_calibration_data', []),
+ 'batch_result_object': ('batch_results', [])
+ }
+
+ for key, (attr, def_val) in key_attr_map.items():
+ with open(p / f'ibmqexperiment' / '{key}.pkl', 'rb') as f:
+ try:
+ setattr(ret, attr, _pickle.load(f))
+ except:
+ _warnings.warn(f"Couldn't unpickle {key}, so setting {attr} to {def_val}.")
+ setattr(ret, attr, def_val)
+
+ # Handle nonstandard serialization
+ try:
+ data = _ProtocolData.from_dir(p, preloaded_edesign=edesign)
+ ret.data = data
+ except:
+ pass
+
+ if ret.qiskit_isa_circuit_batches is None:
+ ret.qiskit_isa_circuit_batches = []
+
+ # Regenerate Qiskit RuntimeJobs
+ ret.qjobs = []
+ if regen_jobs:
+ assert _Sampler is not None, "Could not import qiskit-ibm-runtime, needed for regen_jobs=True"
+ assert service is not None, "No service specified, cannot retrieve IBM jobs"
+ ret._retrieve_jobs(service=service)
+
+ # Update checkpoint path if requested
+ if new_checkpoint_path is not None:
+ ret.checkpoint_path = new_checkpoint_path
+ if not ret.disable_checkpointing:
+ ret.write(ret.checkpoint_path)
+
+ return ret
+
+ def __init__(self, edesign, pspec, remove_duplicates=True, randomized_order=True, circuits_per_batch=75,
+ num_shots=1024, seed=None, checkpoint_path=None, disable_checkpointing=False, checkpoint_override=False):
+ _TreeNode.__init__(self, None, None)
+
+ self.auxfile_types = {}
+ _HasPSpec.__init__(self, pspec)
+
+ self.edesign = edesign
+ self.remove_duplicates = remove_duplicates
+ self.randomized_order = randomized_order
+ self.circuits_per_batch = circuits_per_batch
+ self.num_shots = num_shots
+ self.seed = seed
+ self.checkpoint_path = str(checkpoint_path) if checkpoint_path is not None else 'ibmqexperiment_checkpoint'
+ self.disable_checkpointing = disable_checkpointing
+ # Populated with transpiling to IBMQ with .transpile()
+ self.pygsti_circuit_batches = []
+ self.qiskit_isa_circuit_batches = []
+ # Populated when submitting to IBM Q with .submit()
+ self.qjobs = []
+ self.job_ids = []
+ self.submit_time_calibration_data = []
+ # Populated when grabbing results from IBM Q with .retrieve_results()
+ self.batch_results = []
+ self.data = None
+
+ # If not in this list, will be automatically dumped to meta.json
+ # 'none' means it will not be read in, 'reset' means it will come back in as None
+ # Several of these could be stored in the meta.json but are kept external for easy chkpts
+ # DEV NOTE: If any of these change, make sure to update the checkpointing code appropriately
+ self.auxfile_types['edesign'] = 'none'
+ self.auxfile_types['data'] = 'reset'
+ # self.processor_spec is handled by _HasPSpec base class
+ self.auxfile_types['pygsti_circuit_batches'] = 'list:text-circuit-list'
+ self.auxfile_types['qiskit_isa_circuit_batches'] = 'list:qpy'
+ self.auxfile_types['qjobs'] = 'none'
+ self.auxfile_types['job_ids'] = 'json'
+ self.auxfile_types['batch_results'] = 'none' # TODO: Fix this
+ if _json_util is not None:
+ self.auxfile_types['submit_time_calibration_data'] = 'list:json'
+ else:
+ # Fall back to pickles if we do not have bson to deal with datetime.datetime
+ self.auxfile_types['submit_time_calibration_data'] = 'pickle'
+
+ if not self.disable_checkpointing:
+ chkpath = _pathlib.Path(self.checkpoint_path)
+ if chkpath.exists() and not checkpoint_override:
+ raise RuntimeError(f"Checkpoint {self.checkpoint_path} already exists. Either "
+ + "specify a different checkpoint_path, set checkpoint_override=True to clobber the current checkpoint,"
+ + " or turn checkpointing off via disable_checkpointing=True (not recommended)."
+ )
+ self.write(chkpath)
+
+ def monitor(self):
+ """
+ Queries IBM Q for the status of the jobs.
+ """
+ assert _qiskit is not None, "Could not import qiskit, needed for monitor()"
+ assert len(self.qjobs) == len(self.job_ids), \
+ "Mismatch between jobs and job ids! If loading from file, use the regen_jobs=True option in from_dir()."
+
+ for counter, qjob in enumerate(self.qjobs):
+ status = qjob.status()
+ print(f"Batch {counter + 1}: {status}")
+ if status in [_JobStatus.QUEUED, 'QUEUED']:
+ try:
+ print(f' - Queue position is {qjob.queue_position(True)}')
+ except Exception:
+ print(' - Unable to retrieve queue position')
+ if isinstance(self.qjobs[-1], _RuntimeJobV2):
+ print(' (because queue position not available in RuntimeJobV2)')
+ try:
+ metrics = qjob.metrics()
+ start_time = _datetime.fromisoformat(metrics["estimated_start_time"])
+ local_time = start_time.astimezone()
+ print(f' - Estimated start time: {local_time.strftime("%Y-%m-%d %H:%M:%S")} (local timezone)')
+ except Exception:
+ print(f' - Unable to retrieve estimated start time')
+ elif status in [_JobStatus.ERROR, 'ERROR']:
+ try:
+ print(f' - Error logs: {qjob.logs()}')
+ except Exception:
+ print(f' - Unable to access error logs')
+
+ # Print unsubmitted for any entries in qobj but not qjob
+ for counter in range(len(self.qjobs), len(self.qiskit_isa_circuit_batches)):
+ print(f"Batch {counter + 1}: NOT SUBMITTED")
+
+ def retrieve_results(self):
+ """
+ Gets the results of the completed jobs from IBM Q, and processes
+ them into a pyGSTi DataProtocol object (stored as the key 'data'),
+ which can then be used in pyGSTi data analysis routines (e.g., if this
+ was a GST experiment, it can input into a GST protocol object that will
+ analyze the data).
+ """
+ assert len(self.qjobs) == len(self.job_ids), \
+ "Mismatch between jobs and job ids! If loading from file, use the regen_jobs=True option in from_dir()."
+
+ def reverse_dict_key_bits(counts_dict):
+ new_dict = {}
+ for key in counts_dict.keys():
+ new_dict[key[::-1]] = counts_dict[key]
+ return new_dict
+
+ # NOTE: This is probably duplicative of some other code in pyGSTi
+ def partial_trace(ordered_target_indices, input_dict):
+ output_dict = {}
+ for bitstring in input_dict.keys():
+ new_string = ''
+ for index in ordered_target_indices:
+ new_string += bitstring[index]
+ try:
+ output_dict[new_string] += input_dict[bitstring]
+ except:
+ output_dict[new_string] = input_dict[bitstring]
+ return output_dict
+
+ if len(self.batch_results):
+ print(f'Already retrieved results of {len(self.batch_results)}/{len(self.qiskit_isa_circuit_batches)} circuit batches')
+
+ #get results from backend jobs and add to dict
+ ds = _data.DataSet()
+ for exp_idx in range(len(self.batch_results), len(self.qjobs)):
+ qjob = self.qjobs[exp_idx]
+ print(f"Querying IBMQ for results objects for batch {exp_idx + 1}...")
+ batch_result = qjob.result()
+ self.batch_results.append(batch_result)
+
+ if not self.disable_checkpointing:
+ self._write_checkpoint()
+
+ for i, circ in enumerate(self.pygsti_circuit_batches[exp_idx]):
+ ordered_target_indices = [self.processor_spec.qubit_labels.index(q) for q in circ.line_labels]
+ counts_data = partial_trace(ordered_target_indices, reverse_dict_key_bits(batch_result[i].data.cr.get_counts()))
+ ds.add_count_dict(circ, counts_data)
+
+ self.data = _ProtocolData(self.edesign, ds)
+
+ if not self.disable_checkpointing:
+ self.data.write(self.checkpoint_path, edesign_already_written=True)
+
+ def submit(self, ibmq_backend, start=None, stop=None, ignore_job_limit=True, wait_time=5, max_attempts=10):
+ """
+ Submits the jobs to IBM Q, that implements the experiment specified by the ExperimentDesign
+ used to create this object.
+
+ Parameters
+ ----------
+ ibmq_backend: qiskit.providers.ibmq.ibmqbackend.IBMQBackend
+ The IBM Q backend to submit the jobs to. Should be the backend corresponding to the
+ processor that this experiment has been designed for.
+
+ start: int, optional
+ Batch index to start submission (inclusive). Defaults to None,
+ which will start submission on the first unsubmitted job.
+ Jobs can be resubmitted by manually specifying this,
+ i.e. start=0 will start resubmitting jobs from the beginning.
+
+ stop: int, optional
+ Batch index to stop submission (exclusive). Defaults to None,
+ which will submit as many jobs as possible given the backend's
+ maximum job limit.
+
+ ignore_job_limit: bool, optional
+ If True, then stop is set to submit all remaining jobs. This is set
+ as True to maintain backwards compatibility. Note that is more jobs
+ are needed than the max limit, this will enter a wait loop until all
+ jobs have been successfully submitted.
+
+ wait_time: int
+ Number of seconds for each waiting step.
+
+ wait_steps: int
+ Number of steps to take before retrying job submission.
+
+ Returns
+ -------
+ None
+ """
+ assert _qiskit is not None, "Could not import qiskit, needed for submit()"
+ assert _Sampler is not None, "Could not import qiskit-ibm-runtime, needed for submit()"
+
+ assert len(self.qiskit_isa_circuit_batches) == len(self.pygsti_circuit_batches), \
+ "Transpilation missing! Either run .transpile() first, or if loading from file, " + \
+ "use the regen_qiskit_circs=True option in from_dir()."
+
+ #Get the backend version
+ backend_version = ibmq_backend.version
+ assert backend_version >= 2, "IBMQExperiment no longer supports v1 backends due to their deprecation by IBM"
+
+ total_waits = 0
+ self.qjobs = [] if self.qjobs is None else self.qjobs
+ self.job_ids = [] if self.job_ids is None else self.job_ids
+
+ # Set start and stop to submit the next unsubmitted jobs if not specified
+ if start is None:
+ start = len(self.qjobs)
+
+ stop = len(self.qiskit_isa_circuit_batches) if stop is None else min(stop, len(self.qiskit_isa_circuit_batches))
+ if not ignore_job_limit:
+ job_limit = ibmq_backend.job_limit()
+ allowed_jobs = job_limit.maximum_jobs - job_limit.active_jobs
+ if start + allowed_jobs < stop:
+ print(f'Given job limit and active jobs, only {allowed_jobs} can be submitted')
+
+ stop = min(start + allowed_jobs, stop)
+
+ ibmq_session = _Session(backend = ibmq_backend)
+ sampler = _Sampler(session=ibmq_session)
+
+ for batch_idx, batch in enumerate(self.qiskit_isa_circuit_batches):
+ if batch_idx < start or batch_idx >= stop:
+ continue
+
+ print(f"Submitting batch {batch_idx + 1}")
+ submit_status = False
+ batch_waits = 0
+ while not submit_status and batch_waits < max_attempts:
+ try:
+ #If submitting to a real device, get calibration data
+ try:
+ backend_properties = ibmq_backend.properties()
+ self.submit_time_calibration_data.append(backend_properties.to_dict())
+ except AttributeError:
+ # We can't get the properties
+ # Likely this is a fake backend/simulator, append empty submit data
+ self.submit_time_calibration_data.append({})
+
+ # Submit job
+ self.qjobs.append(sampler.run(batch, shots = self.num_shots))
+
+ status = self.qjobs[-1].status()
+ initializing = True
+ initializing_steps = 0
+ while initializing and initializing_steps < max_attempts:
+ if status in [_JobStatus.INITIALIZING, "INITIALIZING", _JobStatus.VALIDATING, "VALIDATING"]:
+ status = self.qjobs[-1].status()
+ print(f' - {status} (query {initializing_steps})')
+ _time.sleep(wait_time)
+ initializing_steps += 1
+ else:
+ initializing = False
+
+ try:
+ job_id = self.qjobs[-1].job_id()
+ print(f' - Job ID is {job_id}')
+ self.job_ids.append(job_id)
+ except Exception:
+
+ print(' - Failed to get job_id.')
+ self.job_ids.append(None)
+
+ try:
+ print(f' - Queue position is {self.qjobs[-1].queue_position()}')
+ except Exception:
+ print(f' - Failed to get queue position for batch {batch_idx + 1}')
+ if isinstance(self.qjobs[-1], _RuntimeJobV2):
+ print(' (because queue position not available in RuntimeJobV2)')
+ try:
+ metrics = self.qjobs[-1].metrics()
+ start_time = _datetime.fromisoformat(metrics["estimated_start_time"])
+ print(f' - Estimated start time: {start_time.astimezone()} (local timezone)')
+ except Exception:
+ print(f' - Unable to retrieve estimated start time')
+
+ submit_status = True
+
+ except Exception as ex:
+ template = " An exception of type {0} occurred. Arguments:\n{1!r}"
+ message = template.format(type(ex).__name__, ex.args)
+ print(message)
+ try:
+ print(' Machine status is {}.'.format(ibmq_backend.status().status_msg))
+ except Exception as ex1:
+ print(' Failed to get machine status!')
+ template = " An exception of type {0} occurred. Arguments:\n{1!r}"
+ message = template.format(type(ex).__name__, ex1.args)
+ print(message)
+ total_waits += 1
+ batch_waits += 1
+ print(f"This batch has failed {batch_waits} times and there have been {total_waits} total failures")
+ print('Waiting', end='')
+ _time.sleep(wait_time)
+ finally:
+ # Checkpoint calibration and job id data
+ if not self.disable_checkpointing:
+ chkpt_path = _pathlib.Path(self.checkpoint_path) / "ibmqexperiment"
+ with open(chkpt_path / 'meta.json', 'r') as f:
+ metadata = _json.load(f)
+
+ _metadir._write_auxfile_member(chkpt_path, 'job_ids', self.auxfile_types['job_ids'], self.job_ids)
+
+ if self.auxfile_types['submit_time_calibration_data'] == 'list:json':
+ # We only need to write the last calibration data
+ filenm = f"submit_time_calibration_data{len(self.submit_time_calibration_data)-1}"
+ _metadir._write_auxfile_member(chkpt_path, filenm, 'json', self.submit_time_calibration_data[-1])
+ metadata['submit_time_calibration_data'].append(None)
+ else:
+ # We are pickling the whole thing, no option to do incremental
+ _metadir._write_auxfile_member(chkpt_path, 'submit_time_calibration_data', 'pickle', self.submit_time_calibration_data)
+
+ with open(chkpt_path / 'meta.json', 'w') as f:
+ _json.dump(metadata, f, indent=4)
+
+ if submit_status is False:
+ raise RuntimeError("Ran out of max attempts and job was still not submitted successfully")
+
+ def transpile(self, ibmq_backend, qiskit_pass_kwargs=None, qasm_convert_kwargs=None, num_workers=1):
+ """Transpile pyGSTi circuits into Qiskit circuits for submission to IBMQ.
+
+ Parameters
+ ----------
+ ibmq_backend:
+ IBM backend to use during Qiskit transpilation
+
+ opt_level: int, optional
+ Optimization level for Qiskit `generate_preset_pass_manager`.
+
+ qiskit_pass_kwargs: dict, optional
+ Additional kwargs to pass in to `generate_preset_pass_manager`.
+ If not defined, the default is {'seed_transpiler': self.seed, 'optimization_level': 0,
+ 'basis_gates': ibmq_backend.operation_names}
+ Note that "optimization_level" is a required argument to the pass manager.
+
+ qasm_convert_kwargs: dict, optional
+ Additional kwargs to pass in to `Circuit.convert_to_openqasm`.
+ If not defined, the default is {'num_qubits': self.processor_spec.num_qubits,
+ 'standard_gates_version': 'x-sx-rz'}
+
+ num_workers: int, optional
+ Number of workers to use for parallel (by batch) transpilation
+ """
+ circuits = self.edesign.all_circuits_needing_data.copy()
+ num_batches = int(_np.ceil(len(circuits) / self.circuits_per_batch))
+
+ if qiskit_pass_kwargs is None:
+ qiskit_pass_kwargs = {}
+ qiskit_pass_kwargs['seed_transpiler'] = qiskit_pass_kwargs.get('seed_transpiler', self.seed)
+ qiskit_pass_kwargs['optimization_level'] = qiskit_pass_kwargs.get('optimization_level', 0)
+ qiskit_pass_kwargs['basis_gates'] = qiskit_pass_kwargs.get('basis_gates', ibmq_backend.operation_names)
+
+ if qasm_convert_kwargs is None:
+ qasm_convert_kwargs = {}
+ qasm_convert_kwargs['num_qubits'] = qasm_convert_kwargs.get('num_qubits', self.processor_spec.num_qubits)
+ qasm_convert_kwargs['standard_gates_version'] = qasm_convert_kwargs.get('standard_gates_version', 'x-sx-rz')
+
+ if not len(self.pygsti_circuit_batches):
+ rand_state = _np.random.RandomState(self.seed) # TODO: Should this be a different seed as transpiler?
+
+ if self.randomized_order:
+ if self.remove_duplicates:
+ circuits = list(set(circuits))
+ rand_state.shuffle(circuits)
+ else:
+ assert(not self.remove_duplicates), "Can only remove duplicates if randomizing order!"
+
+ for batch_idx in range(num_batches):
+ start = batch_idx*self.circuits_per_batch
+ end = min(len(circuits), (batch_idx+1)*self.circuits_per_batch)
+ self.pygsti_circuit_batches.append(circuits[start:end])
+
+ if not self.disable_checkpointing:
+ chkpt_path = _pathlib.Path(self.checkpoint_path) / "ibmqexperiment"
+ with open(chkpt_path / 'meta.json', 'r') as f:
+ metadata = _json.load(f)
+
+ pcbdata = _metadir._write_auxfile_member(chkpt_path, 'pygsti_circuit_batches', self.auxfile_types['pygsti_circuit_batches'], self.pygsti_circuit_batches)
+ if 'pygsti_circuit_batches' in metadata:
+ metadata['pygsti_circuit_batches'] = pcbdata
+
+ with open(chkpt_path / 'meta.json', 'w') as f:
+ _json.dump(metadata, f)
+
+ if len(self.qiskit_isa_circuit_batches):
+ print(f'Already completed transpilation of {len(self.qiskit_isa_circuit_batches)}/{num_batches} circuit batches')
+ if len(self.qiskit_isa_circuit_batches) == num_batches:
+ return
+
+ pm = _pass_manager(backend=ibmq_backend, **qiskit_pass_kwargs)
+
+ # Set up parallel tasks
+ tasks = [self.pygsti_circuit_batches[i] for i in range(len(self.qiskit_isa_circuit_batches), num_batches)]
+
+ # We want to use transpile_batch and it's the same pm/convert kwargs, so create a new function with partially applied kwargs
+ # This function now only takes circs as an argument (which are our task elements above)
+ task_fn = _partial(_transpile_batch, pass_manager=pm, qasm_convert_kwargs=qasm_convert_kwargs)
+
+ # Run in parallel (p.imap) with progress bars (tqdm)
+ #with _mp.Pool(num_workers) as p:
+ # isa_circuits = list(_tqdm.tqdm(p.imap(task_fn, tasks), total=len(tasks)))
+ for task in _tqdm.tqdm(tasks):
+ self.qiskit_isa_circuit_batches.append(task_fn(task))
+
+ # Save single batch
+ chkpt_path = _pathlib.Path(self.checkpoint_path) / "ibmqexperiment"
+ with open(chkpt_path / 'meta.json', 'r') as f:
+ metadata = _json.load(f)
+
+ filenm = f"qiskit_isa_circuit_batches{len(self.qiskit_isa_circuit_batches)-1}"
+ _metadir._write_auxfile_member(chkpt_path, filenm, 'qpy', self.qiskit_isa_circuit_batches[-1])
+ if 'qiskit_isa_circuit_batches' in metadata:
+ metadata['qiskit_isa_circuit_batches'].append(None)
+
+ with open(chkpt_path / 'meta.json', 'w') as f:
+ _json.dump(metadata, f)
+
+ def write(self, dirname=None):
+ """
+ Writes to disk, storing both the pyGSTi DataProtocol object in pyGSTi's standard
+ format and saving all of the IBM Q submission information stored in this object,
+ written into the subdirectory 'ibmqexperiment'.
+
+ Parameters
+ ----------
+ dirname : str
+ The *root* directory to write into. This directory will have
+ an 'edesign' subdirectory, which will be created if needed and
+ overwritten if present. If None, then the path this object
+ was loaded from is used (if this object wasn't loaded from disk,
+ an error is raised).
+
+ """
+ if dirname is None:
+ dirname = self.checkpoint_path
+ if dirname is None:
+ raise ValueError("`dirname` must be given because there's no checkpoint or default edesign directory")
+
+ dirname = _pathlib.Path(dirname)
+
+ self.edesign.write(dirname)
+
+ if self.data is not None:
+ self.data.write(dirname, edesign_already_written=True)
+
+ self._write_checkpoint(dirname)
+
+ def _write_checkpoint(self, dirname=None):
+ """Write only the ibmqexperiment part of .write().
+
+ Parameters
+ ----------
+ dirname : str
+ The *root* directory to write into. This directory will have
+ an 'edesign' subdirectory, which will be created if needed and
+ overwritten if present. If None, then the path this object
+ was loaded from is used (if this object wasn't loaded from disk,
+ an error is raised).
+ """
+ dirname = dirname if dirname is not None else self.checkpoint_path
+ exp_dir = _pathlib.Path(dirname) / 'ibmqexperiment'
+ exp_dir.mkdir(parents=True, exist_ok=True)
+ _io.metadir.write_obj_to_meta_based_dir(self, exp_dir, 'auxfile_types')
+
+ def _retrieve_jobs(self, service):
+ """Retrieves RuntimeJobs from IBMQ based on job_ids.
+
+ Parameters
+ ----------
+ provider: IBMProvider
+ Provider used to retrieve RuntimeJobs from IBMQ based on job_ids
+ """
+ for i, jid in enumerate(self.job_ids):
+ print(f"Loading job {i+1}/{len(self.job_ids)}...")
+ self.qjobs.append(service.job(jid))
+
+
diff --git a/pygsti/io/metadir.py b/pygsti/io/metadir.py
index 849da9729..879c44702 100644
--- a/pygsti/io/metadir.py
+++ b/pygsti/io/metadir.py
@@ -10,6 +10,7 @@
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
+import datetime as _dt
import numpy as _np
import scipy.sparse as _sps
import importlib as _importlib
@@ -23,6 +24,13 @@
except ImportError:
_ObjectId = None
+try:
+ # If available, use bson's JSON converter utilities
+ # Allows us to serialize datetime objects, for example
+ from bson import json_util as _json_util
+except ImportError:
+ _json_util=None
+
from pygsti.io import readers as _load
from pygsti.io import writers as _write
from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable
@@ -84,6 +92,7 @@ def _get_auxfile_ext(typ):
elif typ == 'pickle': ext = '.pkl'
elif typ == 'none': ext = '.NA'
elif typ == 'reset': ext = '.NA'
+ elif typ == 'qpy': ext = '.qpy'
else:
#DEPRECATED formats! REMOVE LATER
if typ == 'text-circuit-lists': ext = '.txt'
@@ -146,7 +155,10 @@ def load_meta_based_dir(root_dir, auxfile_types_member='auxfile_types',
ret = {}
with open(str(root_dir / 'meta.json'), 'r') as f:
- meta = _json.load(f)
+ if _json_util is not None:
+ meta = _json.load(f, object_hook=_json_util.object_hook)
+ else:
+ meta = _json.load(f)
#Convert lists => tuples, as we prefer immutable tuples
#for key in meta:
@@ -302,10 +314,21 @@ def should_skip_loading(path):
val = _np.load(pth)
elif typ == 'json':
with open(str(pth), 'r') as f:
- val = _json.load(f)
+ if _json_util is not None:
+ val = _json.load(f, object_hook=_json_util.object_hook)
+ else:
+ val = _json.load(f)
elif typ == 'pickle':
with open(str(pth), 'rb') as f:
val = _pickle.load(f)
+ elif typ == 'qpy':
+ try:
+ import qiskit as _qiskit
+
+ with open(str(pth), 'rb') as f:
+ val = _qiskit.qpy.load(f)
+ except Exception as e:
+ raise RuntimeError("QPY serialization format requested but failed") from e
else:
raise ValueError("Invalid aux-file type: %s" % typ)
@@ -386,7 +409,10 @@ def write_meta_based_dir(root_dir, valuedict, auxfile_types=None, init_meta=None
with open(str(root_dir / 'meta.json'), 'w') as f:
_check_jsonable(meta)
- _json.dump(meta, f)
+ if _json_util is not None:
+ _json.dump(meta, f, indent=4, default=_json_util.default)
+ else:
+ _json.dump(meta, f, indent=4)
def _write_auxfile_member(root_dir, filenm, typ, val):
@@ -451,10 +477,22 @@ def _write_auxfile_member(root_dir, filenm, typ, val):
elif typ == 'json':
with open(str(pth), 'w') as f:
_check_jsonable(val)
- _json.dump(val, f, indent=4)
+ if _json_util is not None:
+ _json.dump(val, f, indent=4, default=_json_util.default)
+ else:
+ _json.dump(val, f, indent=4)
elif typ == 'pickle':
with open(str(pth), 'wb') as f:
_pickle.dump(val, f)
+ elif typ == 'qpy':
+ try:
+ import qiskit as _qiskit
+
+ with open(str(pth), 'wb') as f:
+ _qiskit.qpy.dump(val, f)
+ except Exception as e:
+ raise RuntimeError("QPY serialization format requested but failed") from e
+
else:
raise ValueError("Invalid aux-file type: %s" % typ)
@@ -475,7 +513,10 @@ def _cls_from_meta_json(dirname):
class
"""
with open(str(_pathlib.Path(dirname) / 'meta.json'), 'r') as f:
- meta = _json.load(f)
+ if _json_util is not None:
+ meta = _json.load(f, object_hook=_json_util.object_hook)
+ else:
+ meta = _json.load(f)
return _class_for_name(meta['type']) # class of object to create
@@ -501,7 +542,10 @@ def _obj_to_meta_json(obj, dirname):
meta = {'type': _full_class_name(obj)}
with open(str(_pathlib.Path(dirname) / 'meta.json'), 'w') as f:
_check_jsonable(meta)
- _json.dump(meta, f)
+ if _json_util is not None:
+ _json.dump(meta, f, indent=4, default=_json_util.default)
+ else:
+ _json.dump(meta, f, indent=4)
def write_obj_to_meta_based_dir(obj, dirname, auxfile_types_member, omit_attributes=(),
@@ -633,7 +677,10 @@ def write_dict_to_json_or_pkl_files(d, dirname):
jsonable = _to_jsonable(val)
_check_jsonable(jsonable)
with open(dirname / (key + '.json'), 'w') as f:
- _json.dump(jsonable, f)
+ if _json_util is not None:
+ _json.dump(jsonable, f, indent=4, default=_json_util.default)
+ else:
+ _json.dump(jsonable, f, indent=4)
except Exception as e:
fn = str(dirname / (key + '.json'))
_warnings.warn("Could not write %s (falling back on pickle format):\n" % fn + str(e))
@@ -673,6 +720,8 @@ def _check_jsonable(x):
doesn't contain dicts with non-string-valued keys """
if x is None or isinstance(x, (float, int, str)):
pass # no problem
+ elif _json_util is not None and isinstance(x, _dt.datetime):
+ pass # No problem for datetime.datetime so long as we have bson.json_utils
elif isinstance(x, (tuple, list)):
for i, v in enumerate(x):
try:
diff --git a/pygsti/io/readers.py b/pygsti/io/readers.py
index 573826bfe..30fb430c5 100644
--- a/pygsti/io/readers.py
+++ b/pygsti/io/readers.py
@@ -374,7 +374,11 @@ def _replace_strs_with_circuits(x):
if isinstance(x, dict): # this case isn't written anymore - just to read old-format files (TODO REMOVE LATER)
return {_replace_strs_with_circuits(k): _replace_strs_with_circuits(v) for k, v in x.items()}
if isinstance(x, str):
- return std.parse_circuit(x, create_subcircuits=not _Circuit.default_expand_subcircuits)
+ try:
+ return std.parse_circuit(x, create_subcircuits=not _Circuit.default_expand_subcircuits)
+ except ValueError:
+ # Failed to parse, possible this string is not a circuit
+ pass
return x
return _replace_strs_with_circuits(obj)
diff --git a/pygsti/protocols/protocol.py b/pygsti/protocols/protocol.py
index 28c2459c3..4523c946a 100644
--- a/pygsti/protocols/protocol.py
+++ b/pygsti/protocols/protocol.py
@@ -1012,6 +1012,102 @@ def map_qubit_labels(self, mapper):
return ExperimentDesign(mapped_circuits, mapped_qubit_labels, mapped_children, self._dirs)
+class CanCreateAllCircuitsDesign(ExperimentDesign):
+ """A type of ExperimentDesign that can create
+ all_circuits_needing_data from subdesigns or other information.
+
+ In cases where all_circuits_needing_data *can* be recreated,
+ i.e. it has not been modified by the user in some unexpected way,
+ this class will ensure that all_circuits_needing_data is skipped
+ during serialization and regenerated during deserialization.
+ """
+ def _create_all_circuits_needing_data(self):
+ """Create all_circuits_needing_data for other information.
+
+ This interface is needed to ensure that all_circuits_needing_data
+ can be regenerated consistently during construction and deserialization.
+ """
+ raise NotImplementedError("Derived classes should implement this")
+
+ @classmethod
+ def from_dir(cls, dirname, parent=None, name=None, quick_load=False):
+ """
+ Initialize a new ExperimentDesign object from `dirname`.
+
+ This is specialized to regenerate all_circuits_needing_data
+ if it was not serialized.
+
+ Parameters
+ ----------
+ dirname : str
+ The *root* directory name (under which there is a 'edesign'
+ subdirectory).
+
+ parent : ExperimentDesign, optional
+ The parent design object, if there is one. Primarily used
+ internally - if in doubt, leave this as `None`.
+
+ name : str, optional
+ The sub-name of the design object being loaded, i.e. the
+ key of this data object beneath `parent`. Only used when
+ `parent` is not None.
+
+ quick_load : bool, optional
+ Setting this to True skips the loading of the potentially long
+ circuit lists. This can be useful when loading takes a long time
+ and all the information of interest lies elsewhere, e.g. in an
+ encompassing results object.
+
+ Returns
+ -------
+ ExperimentDesign
+ """
+ ret = super().from_dir(dirname, parent=parent, name=name, quick_load=quick_load)
+
+ if ret.auxfile_types['all_circuits_needing_data'] == 'reset':
+ ret.all_circuits_needing_data = ret._create_all_circuits_needing_data()
+
+ ret.auxfile_types['all_circuits_needing_data'] = ret.old_all_circuits_type
+ del ret.old_all_circuits_type
+
+ return ret
+
+ def write(self, dirname=None, parent=None):
+ """
+ Write this experiment design to a directory.
+
+ This is specialized to skip writing all_circuits_needing_data
+ if it can be regenerated from other class information.
+
+ Parameters
+ ----------
+ dirname : str
+ The *root* directory to write into. This directory will have
+ an 'edesign' subdirectory, which will be created if needed and
+ overwritten if present. If None, then the path this object
+ was loaded from is used (if this object wasn't loaded from disk,
+ an error is raised).
+
+ parent : ExperimentDesign, optional
+ The parent experiment design, when a parent is writing this
+ design as a sub-experiment-design. Otherwise leave as None.
+
+ Returns
+ -------
+ None
+ """
+ initial_circuits = self._create_all_circuits_needing_data()
+ if self.all_circuits_needing_data == initial_circuits:
+ self.old_all_circuits_type = self.auxfile_types['all_circuits_needing_data']
+ self.auxfile_types['all_circuits_needing_data'] = 'reset'
+
+ super().write(dirname=dirname, parent=parent)
+
+ if self.auxfile_types['all_circuits_needing_data'] == 'reset':
+ self.auxfile_types['all_circuits_needing_data'] = self.old_all_circuits_type
+ del self.old_all_circuits_type
+
+
class CircuitListsDesign(ExperimentDesign):
"""
Experiment design specification that is comprised of multiple circuit lists.
@@ -1200,7 +1296,7 @@ def map_qubit_labels(self, mapper):
self.nested, remove_duplicates=False) # no need to remove duplicates
-class CombinedExperimentDesign(ExperimentDesign): # for multiple designs on the same dataset
+class CombinedExperimentDesign(CanCreateAllCircuitsDesign): # for multiple designs on the same dataset
"""
An experiment design that combines the specifications of one or more "sub-designs".
@@ -1236,9 +1332,42 @@ class CombinedExperimentDesign(ExperimentDesign): # for multiple designs on the
interleave : bool, optional
Whether the circuits of the `sub_designs` should be interleaved to
- form the circuit ordering of this experiment design.
+ form the circuit ordering of this experiment design. DEPRECATED
"""
+ def _create_all_circuits_needing_data(self, sub_designs=None, interleave=False):
+ """Create all_circuits_needing_data for other information.
+
+ This interface is needed to ensure that all_circuits_needing_data
+ can be regenerated consistently during construction and deserialization.
+
+ Parameters
+ ----------
+ subdesigns: list of ExperimentDesigns, optional
+ List of subdesigns to use. If not provided, will use self._vals.values()
+ as the subdesigns. Primarily used during initialization when self._vals
+ is not set yet.
+
+ Returns
+ -------
+ all_circuits: list of Circuits
+ Union of all_circuits_needing_data from subdesigns without duplicates
+ """
+ sub_designs = self._vals if sub_designs is None else sub_designs
+ all_circuits = []
+ if interleave:
+ subdesign_circuit_lists = [sub_design.all_circuits_needing_data for sub_design in sub_designs.values()]
+ #zip_longest is like zip, but if the iterables are of different lengths it returns a specified fill value
+ #(default None) in place of the missing elements once an iterable has been exhausted.
+ for circuits in _itertools.zip_longest(*subdesign_circuit_lists):
+ for circuit in circuits:
+ if circuit is not None:
+ all_circuits.append(circuit)
+ else:
+ for des in sub_designs.values():
+ all_circuits.extend(des.all_circuits_needing_data)
+ return all_circuits
+
@classmethod
def from_edesign(cls, edesign, name):
"""
@@ -1310,19 +1439,7 @@ def __init__(self, sub_designs, all_circuits=None, qubit_labels=None, sub_design
sub_designs = {("**%d" % i): des for i, des in enumerate(sub_designs)}
if all_circuits is None:
- all_circuits = []
- if interleave:
- subdesign_circuit_lists = [sub_design.all_circuits_needing_data for sub_design in sub_designs.values()]
- #zip_longest is like zip, but if the iterables are of different lengths it returns a specified fill value
- #(default None) in place of the missing elements once an iterable has been exhausted.
- for circuits in _itertools.zip_longest(*subdesign_circuit_lists):
- for circuit in circuits:
- if circuit is not None:
- all_circuits.append(circuit)
- else:
- for des in sub_designs.values():
- all_circuits.extend(des.all_circuits_needing_data)
- _lt.remove_duplicates_in_place(all_circuits) # Maybe don't always do this?
+ all_circuits = self._create_all_circuits_needing_data(sub_designs, interleave)
if qubit_labels is None and len(sub_designs) > 0:
first = sub_designs[list(sub_designs.keys())[0]].qubit_labels
@@ -1617,7 +1734,7 @@ def map_qubit_labels(self, mapper):
return SimultaneousExperimentDesign(mapped_edesigns, mapped_circuits, mapped_qubit_labels)
-class FreeformDesign(ExperimentDesign):
+class FreeformDesign(CanCreateAllCircuitsDesign):
"""
Experiment design holding an arbitrary circuit list and meta data.
@@ -1630,6 +1747,19 @@ class FreeformDesign(ExperimentDesign):
The qubits that this experiment design applies to. If None, the
line labels of the first circuit is used.
"""
+
+ def _create_all_circuits_needing_data(self):
+ """Create all_circuits_needing_data for other information.
+
+ This interface is needed to ensure that all_circuits_needing_data
+ can be regenerated consistently during construction and deserialization.
+
+ Returns
+ -------
+ list of Circuits
+ Keys of self.aux_info
+ """
+ return list(self.aux_info.keys())
@classmethod
def from_dataframe(cls, df, qubit_labels=None):
@@ -1683,13 +1813,11 @@ def from_edesign(cls, edesign):
raise ValueError("Cannot convert a %s to a %s!" % (str(type(edesign)), str(cls)))
def __init__(self, circuits, qubit_labels=None):
- if isinstance(circuits, dict):
- self.aux_info = circuits.copy()
- circuits = list(circuits.keys())
- else:
- self.aux_info = {c: None for c in circuits}
- super().__init__(circuits, qubit_labels)
- self.auxfile_types['aux_info'] = 'pickle'
+ self.aux_info = circuits.copy() if isinstance(circuits, dict) else {c: None for c in circuits}
+
+ super().__init__(self._create_all_circuits_needing_data(), qubit_labels)
+
+ self.auxfile_types['aux_info'] = 'circuit-str-json'
def _truncate_to_circuits_inplace(self, circuits_to_keep):
truncated_aux_info = {k: v for k, v in self.aux_info.items() if k in circuits_to_keep}
@@ -1699,7 +1827,10 @@ def _truncate_to_circuits_inplace(self, circuits_to_keep):
def to_dataframe(self, pivot_valuename=None, pivot_value="Value", drop_columns=False):
cdict = _NamedDict('Circuit', None)
for cir, info in self.aux_info.items():
- cdict[cir.str] = _NamedDict('ValueName', 'category', items=info)
+ try:
+ cdict[cir.str] = _NamedDict('ValueName', 'category', items=info)
+ except TypeError:
+ raise TypeError("Failed to cast to dataframe. Ensure that aux_info values are dicts!")
df = cdict.to_dataframe()
return _process_dataframe(df, pivot_valuename, pivot_value, drop_columns, preserve_order=True)
diff --git a/pygsti/tools/internalgates.py b/pygsti/tools/internalgates.py
index 5af7e0574..4b03d4eab 100644
--- a/pygsti/tools/internalgates.py
+++ b/pygsti/tools/internalgates.py
@@ -736,6 +736,7 @@ def standard_gatenames_openqasm_conversions(version='u3'):
std_gatenames_to_argmap['Gczr'] = lambda gatearg: ['crz(' + str(gatearg[0]) + ')']
std_gatenames_to_argmap['Gu3'] = lambda gatearg: ['u3(' + str(gatearg[0]) + ', '
+ str(gatearg[1]) + ', ' + str(gatearg[2]) + ')']
+ std_gatenames_to_argmap['Gdelay'] = lambda gatearg: ['delay(' + str(gatearg[0]) + ')']
elif version == 'x-sx-rz':
std_gatenames_to_qasm = {}
@@ -792,6 +793,7 @@ def standard_gatenames_openqasm_conversions(version='u3'):
std_gatenames_to_argmap['Gu3'] = lambda gatearg: ['rz(' + str(gatearg[2]) + ')', 'sx',
'rz(' + str(float(gatearg[0]) + _np.pi) + ')', 'sx',
'rz(' + str(float(gatearg[1]) + _np.pi) + ')']
+ std_gatenames_to_argmap['Gdelay'] = lambda gatearg: ['delay(' + str(gatearg[0]) + ')']
else:
raise ValueError("Unknown version!")
diff --git a/setup.py b/setup.py
index 118040267..fcc5cd2f7 100644
--- a/setup.py
+++ b/setup.py
@@ -46,6 +46,11 @@
'report_pickling': ['pandas'],
'report_pdf_figures': ['matplotlib'],
'html_reports': ['jinja2', 'MarkupSafe'],
+ 'reports':[
+ 'pygsti[report_pickling]',
+ 'pygsti[report_pdf_figures]',
+ 'pygsti[html_reports]'
+ ],
'notebooks': [
'ipython',
'notebook',
@@ -59,32 +64,37 @@
'flake8'
],
'interpygate': ['csaps'],
+ 'serialization': ['bson'],
+ 'ibmq': [
+ 'qiskit>1',
+ 'qiskit-ibm-runtime>=0.17.1',
+ 'tqdm>=4.42.0',
+ 'dill',
+ 'pathos'
+ ],
'testing': [
'pytest',
'pytest-xdist',
'pytest-cov',
+ 'cython', # Don't call this pygsti[extensions] for testing_no_cython logic below
+ 'mpi4py', # Don't call this pygsti[multiprocessor] for no_mpi logic below
'nbval',
- 'csaps',
- 'cvxopt',
- 'cvxpy',
- 'cython',
- 'matplotlib',
- 'mpi4py',
- 'msgpack',
'packaging',
- 'pandas',
'psutil',
'zmq',
- 'jinja2',
'seaborn',
'scipy',
'ply',
'qibo<=0.1.7',
'cirq-core',
- 'notebook',
- 'ipython',
- 'jupyter_server',
- 'torch'
+ 'pygsti[diamond_norm]',
+ 'pygsti[ibmq]',
+ 'pygsti[interpygate]',
+ 'pygsti[msgpack]',
+ 'pygsti[notebooks]',
+ 'pygsti[pytorch]',
+ 'pygsti[reports]',
+ 'pygsti[serialization]'
]
}
@@ -279,7 +289,7 @@ def setup_with_extensions(extensions=None):
'pandas'
],
extras_require=extras,
- python_requires='>=3.5',
+ python_requires='>=3.8',
platforms=["any"],
url='http://www.pygsti.info',
download_url='https://github.com/pyGSTio/pyGSTi/tarball/master',
diff --git a/test/performance/mpi_2D_scaling/mpi_test.py b/test/performance/mpi_2D_scaling/mpi_timings.py
similarity index 100%
rename from test/performance/mpi_2D_scaling/mpi_test.py
rename to test/performance/mpi_2D_scaling/mpi_timings.py
diff --git a/test/performance/mpi_2D_scaling/run.sh b/test/performance/mpi_2D_scaling/run.sh
index b5c77dd13..08595c1c2 100755
--- a/test/performance/mpi_2D_scaling/run.sh
+++ b/test/performance/mpi_2D_scaling/run.sh
@@ -29,4 +29,4 @@ export MKL_NUM_THREADS=1
# Note: This flags are useful on Kahuna to avoid error messages
# But the --mca flags are not necessary for performance
mpirun -np ${NUM_PROCS} --mca pml ucx --mca btl '^openib' \
- python ./mpi_test.py &> ${PREFIX}.out
+ python ./mpi_timings.py &> ${PREFIX}.out
diff --git a/test/unit/conftest.py b/test/unit/conftest.py
new file mode 100644
index 000000000..d27d21835
--- /dev/null
+++ b/test/unit/conftest.py
@@ -0,0 +1,20 @@
+# pytest configuration
+
+# https://stackoverflow.com/a/75438209 for making pytest work with VSCode debugging better
+import sys
+import pytest
+
+def is_debugging():
+ if 'debugpy' in sys.modules:
+ return True
+ return False
+
+# enable_stop_on_exceptions if the debugger is running during a test
+if is_debugging():
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_exception_interact(call):
+ raise call.excinfo.value
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_internalerror(excinfo):
+ raise excinfo.value
\ No newline at end of file
diff --git a/test/unit/extras/ibmq/__init__.py b/test/unit/extras/ibmq/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/test/unit/extras/ibmq/test_ibmqexperiment.py b/test/unit/extras/ibmq/test_ibmqexperiment.py
new file mode 100644
index 000000000..badbd39ff
--- /dev/null
+++ b/test/unit/extras/ibmq/test_ibmqexperiment.py
@@ -0,0 +1,64 @@
+import pygsti
+from pygsti.extras.devices.experimentaldevice import ExperimentalDevice
+from pygsti.extras import ibmq
+from pygsti.processors import CliffordCompilationRules as CCR
+
+class IBMQExperimentTester():
+ @classmethod
+ def setup_class(cls):
+ cls.device = ExperimentalDevice.from_legacy_device('ibmq_bogota')
+ cls.pspec = cls.device.create_processor_spec(['Gc{}'.format(i) for i in range(24)] + ['Gcnot'])
+
+ compilations = {'absolute': CCR.create_standard(cls.pspec, 'absolute', ('paulis', '1Qcliffords'), verbosity=0)}
+
+ mirror_design = pygsti.protocols.MirrorRBDesign(cls.pspec, [0, 2, 4], 10, qubit_labels=('Q0', 'Q1', 'Q2'),
+ clifford_compilations=compilations, sampler='edgegrab', samplerargs=[3/8,])
+ cls.edesign = pygsti.protocols.CombinedExperimentDesign([mirror_design])
+
+
+ def test_init(self):
+ exp1 = ibmq.IBMQExperiment(self.edesign, self.pspec, circuits_per_batch=5, num_shots=1024, seed=20231201,
+ disable_checkpointing=True)
+
+ chkpt = 'test_ibmq_init_checkpoint'
+ exp2 = ibmq.IBMQExperiment(self.edesign, self.pspec, circuits_per_batch=5, num_shots=1024, seed=20231201,
+ checkpoint_path=chkpt, checkpoint_override=True)
+
+ assert exp2.pygsti_circuit_batches == exp1.pygsti_circuit_batches
+
+ exp3 = ibmq.IBMQExperiment.from_dir(chkpt)
+ assert exp3.pygsti_circuit_batches == exp1.pygsti_circuit_batches
+
+ def test_transpile(self):
+ chkpt = 'test_ibmq_transpile_checkpoint'
+ exp1 = ibmq.IBMQExperiment(self.edesign, self.pspec, circuits_per_batch=5, num_shots=1024, seed=20231201,
+ checkpoint_path=chkpt, checkpoint_override=True)
+ exp1.transpile()
+
+ # Test checkpoint load
+ exp2 = ibmq.IBMQExperiment.from_dir(chkpt, regen_circs=True)
+ assert exp2.qiskit_circuit_batches == exp1.qiskit_circuit_batches
+
+ # Test restart
+ del exp2.qiskit_circuit_batches[2:]
+ del exp2.qasm_circuit_batches[2:]
+ exp2.transpile()
+ assert exp2.qiskit_circuit_batches == exp1.qiskit_circuit_batches
+
+ def test_submit(self):
+ chkpt = 'test_ibmq_submit_checkpoint'
+ exp1 = ibmq.IBMQExperiment(self.edesign, self.pspec, circuits_per_batch=5, num_shots=1024, seed=20231201,
+ checkpoint_path=chkpt, checkpoint_override=True)
+ exp1.transpile()
+
+ from qiskit_ibm_runtime.fake_provider import FakeBogotaV2
+ backend = FakeBogotaV2()
+
+ # Submit first 3 jobs
+ exp1.submit(backend, stop=3, max_attempts=1)
+ assert len(exp1.qjobs) == 3
+
+ # Submit rest of jobs
+ exp1.submit(backend, max_attempts=1)
+ assert len(exp1.qjobs) == len(exp1.qasm_circuit_batches)
+
diff --git a/test/unit/objects/test_circuit.py b/test/unit/objects/test_circuit.py
index 49b0daa4e..ba657af41 100644
--- a/test/unit/objects/test_circuit.py
+++ b/test/unit/objects/test_circuit.py
@@ -497,7 +497,7 @@ def test_convert_to_openqasm(self):
ckt = circuit.Circuit([Label('Gxpi2',0), Label(()), Label([Label('Gh',0), Label('Gtdag',1)]),
Label('Gcnot', (0,1))], line_labels=(0,1))
- converted_qasm = ckt.convert_to_openqasm()
+ converted_qasm = ckt.convert_to_openqasm(include_delay_on_idle=True)
#this is really just doing a check if anything has changed. I.e. an integration test.
expected_qasm = 'OPENQASM 2.0;\ninclude "qelib1.inc";\n\nopaque delay(t) q;\n\nqreg q[2];'\
+'\ncreg cr[2];\n\nu3(1.570796326794897, 4.71238898038469, 1.570796326794897) q[0];\ndelay(0) q[1];'\
diff --git a/test/unit/protocols/test_protocols.py b/test/unit/protocols/test_protocols.py
index 1a5d3f4bc..756355a93 100644
--- a/test/unit/protocols/test_protocols.py
+++ b/test/unit/protocols/test_protocols.py
@@ -134,7 +134,8 @@ def test_create_edesign_fromdir_subdirs(self, root_path):
self.assertTrue(all([a == b for a,b in zip(edesign3['subdir2'].all_circuits_needing_data, self.gst_design.circuit_lists[1])]))
def test_map_edesign_sslbls(self):
- for edesign in self.edesigns:
+ edesigns = self._get_tester_edesigns()
+ for edesign in edesigns:
print("Testing edesign of type: ", str(type(edesign)))
orig_qubits = edesign.qubit_labels
for c in edesign.all_circuits_needing_data:
@@ -150,6 +151,92 @@ def test_map_edesign_sslbls(self):
self.assertEqual(mapped_edesign.qubit_labels, mapped_qubits)
for c in mapped_edesign.all_circuits_needing_data:
self.assertTrue(set(c.line_labels).issubset(mapped_qubits))
+
+ @with_temp_path
+ def test_serialization(self, root_path):
+ edesigns = self._get_tester_edesigns()
+ for i, edesign in enumerate(edesigns):
+ print("Testing edesign of type: ", str(type(edesign)))
+ root = pathlib.Path(root_path) / str(i)
+ edesign.write(root)
+ loaded_edesign = type(edesign).from_dir(root)
+ # TODO: We don't have good edesign equality
+ self.assertEqual(set(edesign.all_circuits_needing_data), set(loaded_edesign.all_circuits_needing_data))
+ self.assertEqual(edesign.auxfile_types, loaded_edesign.auxfile_types)
+ self.assertEqual(edesign._vals.keys(), loaded_edesign._vals.keys())
+
+ if isinstance(edesign, (pygsti.protocols.CanCreateAllCircuitsDesign)):
+ # We also need to test that all_circuits_needing_data is not dumped by default
+ self.assertTrue(not (root / 'edesign' / 'all_circuits_needing_data.txt').exists())
+
+ root2 = pathlib.Path(root_path) / f'{i}_2'
+ edesign.all_circuits_needing_data = []
+ edesign.write(root2)
+ loaded_edesign = type(edesign).from_dir(root2)
+ # TODO: We don't have good edesign equality
+ self.assertEqual(set(edesign.all_circuits_needing_data), set(loaded_edesign.all_circuits_needing_data))
+ self.assertEqual(edesign.auxfile_types, loaded_edesign.auxfile_types)
+ self.assertEqual(edesign._vals.keys(), loaded_edesign._vals.keys())
+ self.assertTrue((root2 / 'edesign' / 'all_circuits_needing_data.txt').exists())
+
+ def test_dataframe_conversion(self):
+ # Currently this is just FreeformDesign, but who knows if we add dataframe support to others in the future
+ edesigns = self._get_tester_edesigns()
+ freeform_design = edesigns[4]
+
+ df = freeform_design.to_dataframe()
+ freeform_design2 = pygsti.protocols.FreeformDesign.from_dataframe(df)
+
+ for (c1, aux1), (c2, aux2) in zip(freeform_design.aux_info.items(), freeform_design2.aux_info.items()):
+ self.assertEqual(str(c1), str(c2))
+ self.assertEqual(aux1, aux2)
+
+ def _get_tester_edesigns(self):
+ #Create a bunch of experiment designs:
+ from pygsti.protocols import ExperimentDesign, CircuitListsDesign, CombinedExperimentDesign, \
+ SimultaneousExperimentDesign, FreeformDesign, StandardGSTDesign, GateSetTomographyDesign, \
+ CliffordRBDesign, DirectRBDesign, MirrorRBDesign
+ from pygsti.processors import CliffordCompilationRules as CCR
+
+ circuits_on0 = pygsti.circuits.to_circuits(["{}@(0)", "Gxpi2:0", "Gypi2:0"], line_labels=(0,))
+ circuits_on0b = pygsti.circuits.to_circuits(["Gxpi2:0^2", "Gypi2:0^2"], line_labels=(0,))
+ circuits_on1 = pygsti.circuits.to_circuits(["Gxpi2:1^2", "Gypi2:1^2"], line_labels=(1,))
+ circuits_on01 = pygsti.circuits.to_circuits(["Gcnot:0:1", "Gxpi2:0Gypi2:1^2Gcnot:0:1Gxpi:0"],
+ line_labels=(0,1))
+
+ #For GST edesigns
+ mdl = std.target_model()
+ gst_pspec = mdl.create_processor_spec()
+
+ #For RB edesigns
+ pspec = pygsti.processors.QubitProcessorSpec(2, ["Gxpi2", "Gypi2","Gxx"],
+ geometry='line', qubit_labels=(0,1))
+ compilations = {"absolute": CCR.create_standard(pspec, "absolute", ("paulis", "1Qcliffords"), verbosity=0),
+ "paulieq": CCR.create_standard(pspec, "paulieq", ("1Qcliffords", "allcnots"), verbosity=0),
+ }
+
+ pspec1Q = pygsti.processors.QubitProcessorSpec(1, ["Gxpi2", "Gypi2","Gxmpi2", "Gympi2"],
+ geometry='line', qubit_labels=(0,))
+ compilations1Q = {"absolute": CCR.create_standard(pspec1Q, "absolute", ("paulis", "1Qcliffords"), verbosity=0),
+ "paulieq": CCR.create_standard(pspec1Q, "paulieq", ("1Qcliffords", "allcnots"), verbosity=0),
+ }
+
+ edesigns = []
+ edesigns.append(ExperimentDesign(circuits_on0))
+ edesigns.append(CircuitListsDesign([circuits_on0, circuits_on0b]))
+ edesigns.append(CombinedExperimentDesign({'one': ExperimentDesign(circuits_on0),
+ 'two': ExperimentDesign(circuits_on1),
+ 'three': ExperimentDesign(circuits_on01)}, qubit_labels=(0,1)))
+ edesigns.append(SimultaneousExperimentDesign([ExperimentDesign(circuits_on0), ExperimentDesign(circuits_on1)]))
+ edesigns.append(FreeformDesign({c: {'id': i} for i,c in enumerate(circuits_on01)}))
+ edesigns.append(std.create_gst_experiment_design(2))
+ edesigns.append(GateSetTomographyDesign(gst_pspec, [circuits_on0, circuits_on0b]))
+ edesigns.append(CliffordRBDesign(pspec, compilations, depths=[0,2,5], circuits_per_depth=4))
+ edesigns.append(DirectRBDesign(pspec, compilations, depths=[0,2,5], circuits_per_depth=4))
+ edesigns.append(MirrorRBDesign(pspec1Q, depths=[0,2,4], circuits_per_depth=4,
+ clifford_compilations=compilations1Q))
+
+ return edesigns
def test_truncation(self):
from pygsti.protocols import BenchmarkingDesign