Skip to content

Commit 622554d

Browse files
[WIP] measurement level 1 for T1 experiment (#857)
* Added check for the metadata as it can be either 'int' or 'tuple' * Added test to t1 to test level_1_measurment analysis * fixed test_t1 * Added Kerneled data analysis to t1_analysis * Added the kerneled analysis to the __init__ files * Fixed bug in parallel experiments were the key can either be 'qibit' or 'qubits' * Fixed test for kerneled T1 experiment to use new analysis. * Added to T1 tutorial section about T1 experiment with kermeled measurement * fixed indents to match * fixed lint * fixed bug in name of the helper * updated documentation. * updated per @yaelbh review * Added extreme delay so the fitting quality will rise * T1 test kerneled measurement - changed number of shots and delays * Added to the quality check for the case where a=-1 and b=1 * updated tutorial * added to the analysis '_format_data' * changed tests * fixed normalization bug fixed normalization calculation changed the criteria to re-normalization to be average slope greater than 0. * black * updated analysis * added release notes * fixed releasenotes
1 parent 1b8bc6d commit 622554d

File tree

7 files changed

+333
-5
lines changed

7 files changed

+333
-5
lines changed

docs/tutorials/t1.rst

Lines changed: 63 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,19 @@ for qubit 0.
3131
.. jupyter-execute::
3232

3333
import numpy as np
34+
from qiskit.qobj.utils import MeasLevel
3435
from qiskit_experiments.framework import ParallelExperiment
3536
from qiskit_experiments.library import T1
36-
37+
from qiskit_experiments.library.characterization.analysis.t1_analysis import T1KerneledAnalysis
38+
3739
# A T1 simulator
3840
from qiskit.providers.fake_provider import FakeVigo
3941
from qiskit.providers.aer import AerSimulator
4042
from qiskit.providers.aer.noise import NoiseModel
43+
44+
# A kerneled data simulator
45+
from qiskit_experiments.test.mock_iq_backend import MockIQBackend
46+
from qiskit_experiments.test.mock_iq_helpers import MockIQT1Helper
4147

4248
# Create a pure relaxation noise model for AerSimulator
4349
noise_model = NoiseModel.from_backend(
@@ -102,6 +108,62 @@ using ``child_data``
102108
for result in sub_data.analysis_results():
103109
print(result)
104110

111+
:math:`T_1` experiments with kerneled measurement
112+
---------------------------------------------------
113+
:math:`T_1` experiments can also be done with kerneled measurements.
114+
If we set the run option `meas_level=MeasLevel.KERNELED`, the job
115+
will not discriminate the data and will not label it. In the T1 experiment,
116+
since we know that :math:`P(1|t=0)=1`, we will add a circuit with delay=0,
117+
and another circuit with a very large delay. In this configuration we know that the data starts from
118+
a point [I,Q] that is close to a logical value '1' and ends at a point [I,Q]
119+
that is close to a logical value '0'.
120+
121+
122+
.. jupyter-execute::
123+
124+
# Experiment
125+
ns = 1e-9
126+
mu = 1e-6
127+
128+
# qubit properties
129+
t1 = [45 * mu, 45 * mu]
130+
t2 = [value/2 for value in t1]
131+
132+
# we will guess that our guess is 10% off the exact value of t1 for qubit 0.
133+
t1_estimated_shift = t1[0]/10
134+
135+
# We use log space for the delays because of the noise properties
136+
delays = np.logspace(1, 11, num=23, base=np.exp(1))
137+
delays *= ns
138+
139+
# Adding circuits with delay=0 and long delays so the centers in the IQ plane won't be misplaced.
140+
# Without this, the fitting can provide wrong results.
141+
delays = np.insert(delays, 0, 0)
142+
delays = np.append(delays, [t1[0]*3])
143+
144+
num_qubits = 2
145+
num_shots = 2048
146+
147+
backend = MockIQBackend(
148+
MockIQT1Helper(t1=t1, iq_cluster_centers=[((-5.0, -4.0), (-5.0, 4.0)), ((3.0, 1.0), (5.0, -3.0))]
149+
, iq_cluster_width=[1.0, 2.0])
150+
)
151+
152+
# Creating a T1 experiment
153+
expT1_kerneled = T1(0, delays)
154+
expT1_kerneled.analysis = T1KerneledAnalysis()
155+
expT1_kerneled.analysis.set_options(p0={"amp": 1, "tau": t1[0] + t1_estimated_shift, "base": 0})
156+
157+
# Running the experiment
158+
expdataT1_kerneled = expT1_kerneled.run(backend=backend, meas_return="avg",
159+
meas_level=MeasLevel.KERNELED,
160+
shots=num_shots).block_for_results()
161+
162+
# Displaying results
163+
display(expdataT1_kerneled.figure(0))
164+
for result in expdataT1_kerneled.analysis_results():
165+
print(result)
166+
105167
.. jupyter-execute::
106168

107169
import qiskit.tools.jupyter

qiskit_experiments/library/characterization/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@
5757
:template: autosummary/analysis.rst
5858
5959
T1Analysis
60+
T1KerneledAnalysis
6061
T2RamseyAnalysis
6162
T2HahnAnalysis
6263
TphiAnalysis
@@ -81,6 +82,7 @@
8182
RamseyXYAnalysis,
8283
T2RamseyAnalysis,
8384
T1Analysis,
85+
T1KerneledAnalysis,
8486
T2HahnAnalysis,
8587
TphiAnalysis,
8688
CrossResonanceHamiltonianAnalysis,

qiskit_experiments/library/characterization/analysis/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from .t2ramsey_analysis import T2RamseyAnalysis
2222
from .t2hahn_analysis import T2HahnAnalysis
2323
from .t1_analysis import T1Analysis
24+
from .t1_analysis import T1KerneledAnalysis
2425
from .tphi_analysis import TphiAnalysis
2526
from .cr_hamiltonian_analysis import CrossResonanceHamiltonianAnalysis
2627
from .readout_angle_analysis import ReadoutAngleAnalysis

qiskit_experiments/library/characterization/analysis/t1_analysis.py

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,12 @@
1414
"""
1515
from typing import Union
1616

17+
import numpy as np
18+
from uncertainties import unumpy as unp
19+
1720
import qiskit_experiments.curve_analysis as curve
1821
from qiskit_experiments.framework import Options
22+
from qiskit_experiments.curve_analysis.curve_data import CurveData
1923

2024

2125
class T1Analysis(curve.DecayAnalysis):
@@ -67,3 +71,84 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]:
6771
return "good"
6872

6973
return "bad"
74+
75+
76+
class T1KerneledAnalysis(curve.DecayAnalysis):
77+
r"""A class to analyze T1 experiments with kerneled data.
78+
79+
# section: see_also
80+
qiskit_experiments.curve_analysis.standard_analysis.decay.DecayAnalysis
81+
82+
"""
83+
84+
@classmethod
85+
def _default_options(cls) -> Options:
86+
"""Default analysis options."""
87+
options = super()._default_options()
88+
options.curve_drawer.set_options(
89+
xlabel="Delay",
90+
ylabel="Normalized Projection on the Main Axis",
91+
xval_unit="s",
92+
)
93+
options.result_parameters = [curve.ParameterRepr("tau", "T1", "s")]
94+
options.normalization = True
95+
96+
return options
97+
98+
def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]:
99+
"""Algorithmic criteria for whether the fit is good or bad.
100+
101+
A good fit has:
102+
- a reduced chi-squared lower than three
103+
- absolute amp is within [0.9, 1.1]
104+
- base is less than 0.1
105+
- amp error is less than 0.1
106+
- tau error is less than its value
107+
- base error is less than 0.1
108+
"""
109+
amp = fit_data.ufloat_params["amp"]
110+
tau = fit_data.ufloat_params["tau"]
111+
base = fit_data.ufloat_params["base"]
112+
113+
criteria = [
114+
fit_data.reduced_chisq < 3,
115+
abs(amp.nominal_value - 1.0) < 0.1,
116+
abs(base.nominal_value) < 0.1,
117+
curve.utils.is_error_not_significant(amp, absolute=0.1),
118+
curve.utils.is_error_not_significant(tau),
119+
curve.utils.is_error_not_significant(base, absolute=0.1),
120+
]
121+
122+
if all(criteria):
123+
return "good"
124+
125+
return "bad"
126+
127+
def _format_data(
128+
self,
129+
curve_data: curve.CurveData,
130+
) -> curve.CurveData:
131+
"""Postprocessing for the processed dataset.
132+
133+
Args:
134+
curve_data: Processed dataset created from experiment results.
135+
136+
Returns:
137+
Formatted data.
138+
"""
139+
# check if the SVD decomposition categorized 0 as 1 by calculating the average slope
140+
diff_y = np.diff(unp.nominal_values(curve_data.y), axis=0)
141+
avg_slope = sum(diff_y) / len(diff_y)
142+
if avg_slope[0] > 0:
143+
new_y_data = 1 - curve_data.y
144+
new_curve_data = CurveData(
145+
x=curve_data.x,
146+
y=new_y_data,
147+
y_err=curve_data.y_err,
148+
shots=curve_data.shots,
149+
data_allocation=curve_data.data_allocation,
150+
labels=curve_data.labels,
151+
)
152+
153+
return super()._format_data(new_curve_data)
154+
return super()._format_data(curve_data)

qiskit_experiments/test/mock_iq_helpers.py

Lines changed: 49 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,7 @@ def _parallel_exp_circ_splitter(self, qc_list: List[QuantumCircuit]):
378378
Raises:
379379
QiskitError: If an instruction is applied with qubits that don't belong to the same
380380
experiment.
381+
TypeError: The data type provided doesn't match the expected type (`tuple` or `int`).
381382
"""
382383
# exp_idx_map connects an experiment to its circuit in the output.
383384
exp_idx_map = {exp: exp_idx for exp_idx, exp in enumerate(self.exp_list)}
@@ -401,8 +402,22 @@ def _parallel_exp_circ_splitter(self, qc_list: List[QuantumCircuit]):
401402

402403
# fixing metadata
403404
for exp_metadata in qc.metadata["composite_metadata"]:
404-
# getting a qubit of one of the experiments that we ran in parallel
405-
exp = qubit_exp_map[exp_metadata["qubits"][0]]
405+
# getting a qubit of one of the experiments that we ran in parallel. The key in the
406+
# metadata is different for different experiments.
407+
qubit_metadata = (
408+
exp_metadata.get("qubit")
409+
if exp_metadata.get("qubit") is not None
410+
else exp_metadata.get("qubits")
411+
)
412+
if isinstance(qubit_metadata, tuple):
413+
exp = qubit_exp_map[qubit_metadata[0]]
414+
elif isinstance(qubit_metadata, int):
415+
exp = qubit_exp_map[qubit_metadata]
416+
else:
417+
raise TypeError(
418+
f"The qubit information in the metadata is of type {type(qubit_metadata)}."
419+
f" Supported formats are `tuple` and `int`"
420+
)
406421
# using the qubit to access the experiment. Then, we go to the last circuit in
407422
# `exp_circuit` of the corresponding experiment, and we overwrite the metadata.
408423
exp_circuits_list[exp_idx_map[exp]][-1].metadata = exp_metadata.copy()
@@ -823,3 +838,35 @@ def compute_probabilities(self, circuits: List[QuantumCircuit]) -> List[Dict[str
823838
output_dict_list.append(probability_output_dict)
824839

825840
return output_dict_list
841+
842+
843+
class MockIQT1Helper(MockIQExperimentHelper):
844+
"""Functions needed for T1 experiment on mock IQ backend"""
845+
846+
def __init__(
847+
self,
848+
t1: List[float] = None,
849+
iq_cluster_centers: Optional[List[Tuple[IQPoint, IQPoint]]] = None,
850+
iq_cluster_width: Optional[List[float]] = None,
851+
):
852+
super().__init__(iq_cluster_centers, iq_cluster_width)
853+
self._t1 = t1 or [90e-6]
854+
855+
def compute_probabilities(self, circuits: List[QuantumCircuit]) -> List[Dict[str, float]]:
856+
"""Return the probability of being in the excited state."""
857+
output_dict_list = []
858+
for circuit in circuits:
859+
probability_output_dict = {}
860+
861+
# extracting information from the circuit.
862+
qubit_idx = circuit.metadata["qubit"]
863+
delay = circuit.metadata["xval"]
864+
865+
# creating a probability dict.
866+
if qubit_idx >= len(self._t1):
867+
raise QiskitError(f"There is no 'T1' value for qubit index {qubit_idx}.")
868+
probability_output_dict["1"] = np.exp(-delay / self._t1[qubit_idx])
869+
probability_output_dict["0"] = 1 - probability_output_dict["1"]
870+
output_dict_list.append(probability_output_dict)
871+
872+
return output_dict_list
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
features:
3+
- |
4+
Added new class :class:`.T1KerneledAnalysis`. This class is used for T1
5+
Experiment with the option `meas_level=MeasLevel.KERNELED`. The analysis
6+
normalizes the data and fixes its orientation.

0 commit comments

Comments
 (0)