text
stringlengths 4
4.46M
| id
stringlengths 13
126
| metadata
dict | __index_level_0__
int64 0
415
|
---|---|---|---|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.plugin.DefaultGaussian` device.
"""
# pylint: disable=protected-access,cell-var-from-loop
import numpy.testing as np_testing
import pytest
from pennylane import numpy as np
from pennylane.operation import AnyWires
from pennylane.ops import cv
from pennylane.wires import Wires
s_vals = np.linspace(-3, 3, 13)
phis = np.linspace(-2 * np.pi, 2 * np.pi, 11)
mags = np.linspace(0.0, 1.0, 7)
class TestCV:
"""Tests the continuous variable based operations."""
@pytest.mark.parametrize("phi", phis)
def test_rotation_heisenberg(self, phi):
"""ops: Tests the Heisenberg representation of the Rotation gate."""
matrix = cv.Rotation._heisenberg_rep([phi])
true_matrix = np.array(
[[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]
)
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize(
"op,size",
[
(cv.Squeezing(0.123, -0.456, wires=1), 3),
(cv.Squeezing(0.668, 10.0, wires=0), 3), # phi > 2pi
(cv.Squeezing(1.992, -9.782, wires=0), 3), # phi < -2pi
(cv.Rotation(2.005, wires=1), 3),
(cv.Rotation(-1.365, wires=1), 3),
(cv.Displacement(2.841, 0.456, wires=0), 3),
(cv.Displacement(3.142, -7.221, wires=0), 3), # phi < -2pi
(cv.Displacement(2.004, 8.673, wires=0), 3), # phi > 2pi
(cv.Beamsplitter(0.456, -0.789, wires=[0, 2]), 5),
(cv.TwoModeSqueezing(2.532, 1.778, wires=[1, 2]), 5),
(
cv.InterferometerUnitary(
np.array([[1, 1], [1, -1]]) * -1.0j / np.sqrt(2.0), wires=1
),
5,
),
(cv.ControlledAddition(2.551, wires=[0, 2]), 5),
(cv.ControlledPhase(2.189, wires=[3, 1]), 5),
],
)
def test_adjoint_cv_ops(self, op, size, tol):
op_d = op.adjoint()
op_heis = op._heisenberg_rep(op.parameters)
op_d_heis = op_d._heisenberg_rep(op_d.parameters)
res1 = np.dot(op_heis, op_d_heis)
res2 = np.dot(op_d_heis, op_heis)
np_testing.assert_allclose(res1, np.eye(size), atol=tol)
np_testing.assert_allclose(res2, np.eye(size), atol=tol)
assert op.wires == op_d.wires
@pytest.mark.parametrize(
"op",
[
cv.CrossKerr(-1.724, wires=[2, 0]),
cv.CubicPhase(0.997, wires=2),
cv.Kerr(2.568, wires=2),
],
)
def test_adjoint_no_heisenberg_rep_defined(self, op):
op_d = op.adjoint()
assert op.parameters[0] == -op_d.parameters[0]
@pytest.mark.parametrize("phi", phis)
@pytest.mark.parametrize("mag", mags)
def test_squeezing_heisenberg(self, phi, mag):
"""ops: Tests the Heisenberg representation of the Squeezing gate."""
r = mag
matrix = cv.Squeezing._heisenberg_rep([r, phi])
true_matrix = np.array(
[
[1, 0, 0],
[0, np.cosh(r) - np.cos(phi) * np.sinh(r), -np.sin(phi) * np.sinh(r)],
[0, -np.sin(phi) * np.sinh(r), np.cosh(r) + np.cos(phi) * np.sinh(r)],
]
)
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize("phi", phis)
@pytest.mark.parametrize("mag", mags)
def test_displacement_heisenberg(self, phi, mag):
"""ops: Tests the Heisenberg representation of the Displacement gate."""
r = mag
hbar = 2
matrix = cv.Displacement._heisenberg_rep([r, phi])
true_matrix = np.array(
[
[1, 0, 0],
[np.sqrt(2 * hbar) * r * np.cos(phi), 1, 0],
[np.sqrt(2 * hbar) * r * np.sin(phi), 0, 1],
]
)
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize("phi", phis)
@pytest.mark.parametrize("theta", phis)
def test_beamsplitter_heisenberg(self, phi, theta):
"""ops: Tests the Heisenberg representation of the Beamsplitter gate."""
matrix = cv.Beamsplitter._heisenberg_rep([theta, phi])
true_matrix = np.array(
[
[1, 0, 0, 0, 0],
[0, np.cos(theta), 0, -np.cos(phi) * np.sin(theta), -np.sin(phi) * np.sin(theta)],
[0, 0, np.cos(theta), np.sin(phi) * np.sin(theta), -np.cos(phi) * np.sin(theta)],
[0, np.cos(phi) * np.sin(theta), -np.sin(phi) * np.sin(theta), np.cos(theta), 0],
[0, np.sin(phi) * np.sin(theta), np.cos(phi) * np.sin(theta), 0, np.cos(theta)],
]
)
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize("phi", phis)
@pytest.mark.parametrize("mag", mags)
def test_two_mode_squeezing_heisenberg(self, phi, mag):
"""ops: Tests the Heisenberg representation of the Beamsplitter gate."""
r = mag
matrix = cv.TwoModeSqueezing._heisenberg_rep([r, phi])
true_matrix = np.array(
[
[1, 0, 0, 0, 0],
[0, np.cosh(r), 0, np.cos(phi) * np.sinh(r), np.sin(phi) * np.sinh(r)],
[0, 0, np.cosh(r), np.sin(phi) * np.sinh(r), -np.cos(phi) * np.sinh(r)],
[0, np.cos(phi) * np.sinh(r), np.sin(phi) * np.sinh(r), np.cosh(r), 0],
[0, np.sin(phi) * np.sinh(r), -np.cos(phi) * np.sinh(r), 0, np.cosh(r)],
]
)
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize("s", s_vals)
def test_quadratic_phase_heisenberg(self, s):
"""ops: Tests the Heisenberg representation of the QuadraticPhase gate."""
matrix = cv.QuadraticPhase._heisenberg_rep([s])
true_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, s, 1]])
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize("s", s_vals)
def test_controlled_addition_heisenberg(self, s):
"""ops: Tests the Heisenberg representation of ControlledAddition gate."""
matrix = cv.ControlledAddition._heisenberg_rep([s])
true_matrix = np.array(
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, -s], [0, s, 0, 1, 0], [0, 0, 0, 0, 1]]
)
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize("s", s_vals)
def test_controlled_phase_heisenberg(self, s):
"""Tests the Heisenberg representation of the ControlledPhase gate."""
matrix = cv.ControlledPhase._heisenberg_rep([s])
true_matrix = np.array(
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, s, 0], [0, 0, 0, 1, 0], [0, s, 0, 0, 1]]
)
assert np.allclose(matrix, true_matrix)
@pytest.mark.parametrize("phi", phis)
def test_quadoperator_heisenberg(self, phi):
"""ops: Tests the Heisenberg representation of the QuadOperator gate."""
matrix = cv.QuadOperator._heisenberg_rep([phi])
true_matrix = np.array([0, np.cos(phi), np.sin(phi)])
assert np.allclose(matrix, true_matrix)
class TestNonGaussian:
"""Tests that non-Gaussian gates are properly handled."""
@pytest.mark.parametrize(
"gate",
[cv.Kerr(0.1, wires=0), cv.CrossKerr(0.1, wires=[0, 1]), cv.CubicPhase(0.1, wires=0)],
)
def test_heisenberg_rep_nonguassian(self, gate):
"""ops: Tests that the `_heisenberg_rep` for a non-Gaussian gates is
None
"""
assert gate._heisenberg_rep(0.1) is None
def test_heisenberg_transformation_nongaussian(self):
"""ops: Tests that proper exceptions are raised if we try to call the
Heisenberg transformation of non-Gaussian gates."""
op = cv.Kerr(0.1, wires=0)
with pytest.raises(RuntimeError, match=r"not a Gaussian operation"):
op.heisenberg_tr(Wires(range(op.num_wires)))
op = cv.CrossKerr(0.1, wires=[0, 1])
with pytest.raises(RuntimeError):
op.heisenberg_tr(Wires(range(op.num_wires)))
cv.CubicPhase(0.1, wires=0)
with pytest.raises(RuntimeError):
op.heisenberg_tr(Wires(range(op.num_wires)))
state_prep_data = [
(cv.CoherentState(0.1, 0.2, wires=0), 2, 1, "F"),
(cv.SqueezedState(0.1, 0.2, wires=0), 2, 1, "F"),
(cv.DisplacedSqueezedState(0.1, 0.2, 0.3, 0.4, wires=0), 4, 1, "F"),
(cv.ThermalState(0.1, wires=0), 1, 1, "F"),
(cv.GaussianState(0.1, 0.2, wires=(0, 1, 2, 3, 4)), 2, AnyWires, "F"),
(cv.FockState(1, wires=0), 1, 1, None),
(cv.FockStateVector([0, 0, 1, 0], wires=0), 1, AnyWires, "F"),
(cv.FockDensityMatrix(np.eye(2), wires=0), 1, AnyWires, "F"),
(cv.CatState(0.1, 0.2, 0.3, wires=0), 3, 1, "F"),
]
@pytest.mark.parametrize("op, num_params, num_wires, grad_method", state_prep_data)
def test_state_prep_operations(op, num_params, num_wires, grad_method):
"""Test initialization of state preperation operations."""
assert op.num_params == num_params
assert op.num_wires == num_wires
assert op.grad_method == grad_method
label_data = [
(cv.Rotation(1.2345, wires=0), "R", "R\n(1.23)"),
(cv.Squeezing(1.234, 2.345, wires=0), "S", "S\n(1.23,\n2.35)"),
(cv.Displacement(1.234, 2.345, wires=0), "D", "D\n(1.23,\n2.35)"),
(cv.Beamsplitter(1.234, 2.345, wires=(0, 1)), "BS", "BS\n(1.23,\n2.35)"),
(cv.TwoModeSqueezing(1.2345, 2.3456, wires=(0, 1)), "S", "S\n(1.23,\n2.35)"),
(cv.QuadraticPhase(1.2345, wires=0), "P", "P\n(1.23)"),
(cv.ControlledAddition(1.234, wires=(0, 1)), "X", "X\n(1.23)"),
(cv.ControlledPhase(1.2345, wires=(0, 1)), "Z", "Z\n(1.23)"),
(cv.Kerr(1.234, wires=0), "Kerr", "Kerr\n(1.23)"),
(cv.CrossKerr(1.234, wires=(0, 1)), "CrossKerr", "CrossKerr\n(1.23)"),
(cv.CubicPhase(1.234, wires=0), "V", "V\n(1.23)"),
(cv.InterferometerUnitary(np.eye(2), wires=0), "U", "U"),
(cv.ThermalState(1.234, wires=0), "Thermal", "Thermal\n(1.23)"),
(
cv.GaussianState(np.array([[2, 0], [0, 2]]), np.array([1, 2]), wires=[1]),
"Gaussian",
"Gaussian",
),
(cv.FockState(7, wires=0), "|7β©", "|7β©"),
(cv.FockStateVector([1, 2, 3], wires=(0, 1, 2)), "|123β©", "|123β©"),
(cv.NumberOperator(wires=0), "n", "n"),
(cv.TensorN(wires=(0, 1, 2)), "nβnβn", "nβnβn"),
(cv.QuadOperator(1.234, wires=0), "cos(Ο)x\n+sin(Ο)p", "cos(1.23)x\n+sin(1.23)p"),
(cv.FockStateProjector([1, 2, 3], wires=(0, 1, 2)), "|123β©β¨123|", "|123β©β¨123|"),
]
label_data_base_name = [
(cv.FockState(7, wires=0), "name", "name\n(7)"),
(cv.FockStateVector([1, 2, 3], wires=(0, 1, 2)), "name", "name"),
(cv.TensorN(wires=(0, 1, 2)), "name", "name"),
(cv.QuadOperator(1.234, wires=0), "name", "name\n(1.23)"),
(cv.FockStateProjector([1, 2, 3], wires=(0, 1, 2)), "name", "name"),
]
class TestLabel:
@pytest.mark.parametrize("op, label1, label2", label_data)
def test_label_method(self, op, label1, label2):
"""Tests the label method for formatting in drawings"""
assert op.label() == label1
assert op.label(decimals=2) == label2
@pytest.mark.parametrize("op, label1, label2", label_data_base_name)
def test_label_base_name(self, op, label1, label2):
"""Test label method with custom base label."""
assert op.label(base_label="name") == label1
assert op.label(base_label="name", decimals=2) == label2
|
pennylane/tests/ops/test_cv_ops.py/0
|
{
"file_path": "pennylane/tests/ops/test_cv_ops.py",
"repo_id": "pennylane",
"token_count": 5978
}
| 82 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the ``RotosolveOptimizer``.
"""
# pylint: disable=too-many-arguments,too-few-public-methods
import functools
import pytest
from scipy.optimize import shgo
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import RotosolveOptimizer
from pennylane.utils import _flatten, unflatten
def expand_num_freq(num_freq, param):
if np.isscalar(num_freq):
num_freq = [num_freq] * len(param)
expanded = []
for _num_freq, par in zip(num_freq, param):
if np.isscalar(_num_freq) and np.isscalar(par):
expanded.append(_num_freq)
elif np.isscalar(_num_freq):
expanded.append(np.ones_like(par) * _num_freq)
elif np.isscalar(par):
raise ValueError(f"{num_freq}\n{param}\n{_num_freq}\n{par}")
elif len(_num_freq) == len(par):
expanded.append(_num_freq)
else:
raise ValueError()
return expanded
def successive_params(par1, par2):
"""Return a list of parameter configurations, successively walking from
par1 to par2 coordinate-wise."""
par1_flat = np.fromiter(_flatten(par1), dtype=float)
par2_flat = np.fromiter(_flatten(par2), dtype=float)
walking_param = []
for i in range(len(par1_flat) + 1):
walking_param.append(unflatten(np.append(par2_flat[:i], par1_flat[i:]), par1))
return walking_param
def test_error_missing_frequency_info():
"""Test that an error is raised if neither nums_frequency nor spectra is given."""
opt = RotosolveOptimizer()
fun = lambda x: x
x = np.array(0.5, requires_grad=True)
with pytest.raises(ValueError, match="Neither the number of frequencies nor the"):
opt.step(fun, x)
def test_no_error_missing_frequency_info_untrainable():
"""Test that no error is raised if neither nums_frequency nor spectra
is given for a parameter not marked as trainable."""
opt = RotosolveOptimizer()
fun = lambda x, y: x
x = np.array(0.5, requires_grad=True)
y = np.array(0.1, requires_grad=False)
nums_frequency = {"x": {(): 1}}
opt.step(fun, x, y, nums_frequency=nums_frequency)
def test_error_missing_frequency_info_single_par():
"""Test that an error is raised if neither nums_frequency nor spectra is given
for one of the function arguments."""
opt = RotosolveOptimizer()
def sum_named_arg(x):
return qml.math.sum(x)
x = np.arange(4, requires_grad=True)
nums_frequency = {"x": {(0,): 1, (1,): 1}}
spectra = {"x": {(0,): [0.0, 1.0], (2,): [0.0, 1.0]}}
# For the first three entries either nums_frequency or spectra is provided
with pytest.raises(ValueError, match=r"was provided for the entry \(3,\)"):
opt.step(sum_named_arg, x, nums_frequency=nums_frequency, spectra=spectra)
def test_error_no_trainable_args():
"""Test that an error is raised if none of the arguments is trainable."""
opt = RotosolveOptimizer()
fun = lambda x, y, z: 1.0
x = np.arange(4, requires_grad=False)
with pytest.raises(ValueError, match="Found no parameters to optimize."):
opt.step(fun, x, nums_frequency=None, spectra=None)
classical_functions = [
lambda x: np.sin(x + 0.124) * 2.5123,
lambda x: -np.cos(x[0] + 0.12) * 0.872 + np.sin(x[1] - 2.01) - np.cos(x[2] - 1.35) * 0.111,
lambda x, y: -np.cos(x + 0.12) * 0.872 + np.sin(y[0] - 2.01) - np.cos(y[1] - 1.35) * 0.111,
lambda x, y: (
-np.cos(x + 0.12) * 0.872
+ np.sin(2 * y[0] - 2.01)
+ np.sin(y[0] - 2.01 / 2 - np.pi / 4) * 0.1
- np.cos(y[1] - 1.35 / 2) * 0.2
- np.cos(2 * y[1] - 1.35) * 0.111
),
lambda x, y, z: -np.cos(x + 0.12) * 0.872 + np.sin(y - 2.01) - np.cos(z - 1.35) * 0.111,
lambda x, y, z: (
-np.cos(x + 0.06)
- np.cos(2 * x + 0.12) * 0.872
+ np.sin(y - 2.01 / 3 - np.pi / 3)
+ np.sin(3 * y - 2.01)
- np.cos(z - 1.35) * 0.111
),
]
classical_minima = [
(-np.pi / 2 - 0.124,),
([-0.12, -np.pi / 2 + 2.01, 1.35],),
(-0.12, [-np.pi / 2 + 2.01, 1.35]),
(-0.12, [(-np.pi / 2 + 2.01) / 2, 1.35 / 2]),
(-0.12, -np.pi / 2 + 2.01, 1.35),
(-0.12 / 2, (-np.pi / 2 + 2.01) / 3, 1.35),
]
classical_params = [
(0.24,),
([0.2, -0.3, 0.1],),
(0.3, [0.8, 0.1]),
(0.2, [0.3, 0.5]),
(0.1, 0.2, 0.5),
(0.9, 0.7, 0.2),
]
classical_nums_frequency = [
{"x": {(): 1}},
{"x": {(0,): 1, (1,): 1, (2,): 1}},
{"x": {(): 1}, "y": {(0,): 1, (1,): 1}},
{"x": {(): 1}, "y": {(0,): 2, (1,): 2}},
{"x": {(): 1}, "y": {(): 1}, "z": {(): 1}},
{"x": {(): 2}, "y": {(): 3}, "z": {(): 1}},
]
classical_expected_num_calls = [3, 9, 9, 13, 9, 15]
def custom_optimizer(fun, **kwargs):
r"""Wrapper for ``scipy.optimize.shgo`` that does not return y_min."""
opt_res = shgo(fun, **kwargs)
return opt_res.x[0], None
substep_optimizers = ["brute", "shgo", custom_optimizer]
all_substep_kwargs = [
{"Ns": 93, "num_steps": 3},
{"bounds": ((-1.0, 1.0),), "n": 512},
{"bounds": ((-1.1, 1.4),)},
]
@pytest.mark.parametrize(
"fun, x_min, param, nums_freq, exp_num_calls",
list(
zip(
classical_functions,
classical_minima,
classical_params,
classical_nums_frequency,
classical_expected_num_calls,
)
),
)
@pytest.mark.parametrize(
"substep_optimizer, substep_kwargs",
list(zip(substep_optimizers, all_substep_kwargs)),
)
class TestWithClassicalFunction:
# pylint: disable=unused-argument
def test_number_of_function_calls(
self, fun, x_min, param, nums_freq, exp_num_calls, substep_optimizer, substep_kwargs
):
"""Tests that per parameter 2R+1 function calls are used for an update step."""
# pylint: disable=too-many-arguments
num_calls = 0
@functools.wraps(fun)
def _fun(*args, **kwargs):
nonlocal num_calls
num_calls += 1
return fun(*args, **kwargs)
opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)
# Make only the first argument trainable
param = (np.array(param[0], requires_grad=True),) + param[1:]
# Only one argument is marked as trainable -> Expect only the executions for that arg
opt.step(_fun, *param, nums_frequency=nums_freq)
exp_num_calls_single_trainable = sum(2 * num + 1 for num in nums_freq["x"].values())
assert num_calls == exp_num_calls_single_trainable
num_calls = 0
# Parameters are now marked as trainable -> Expect full number of executions
param = tuple(np.array(p, requires_grad=True) for p in param)
opt.step(_fun, *param, nums_frequency=nums_freq)
assert num_calls == exp_num_calls
def test_single_step_convergence(
self, fun, x_min, param, nums_freq, exp_num_calls, substep_optimizer, substep_kwargs
):
"""Tests convergence for easy classical functions in a single Rotosolve step.
Includes testing of the parameter output shape and the old cost when using step_and_cost."""
opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)
# Make only the first argument trainable
param = (np.array(param[0], requires_grad=True),) + param[1:]
# Only one argument is marked as trainable -> All other arguments have to stay fixed
new_param_step = opt.step(
fun,
*param,
nums_frequency=nums_freq,
)
# The following accounts for the unpacking functionality for length-1 param
if len(param) == 1:
new_param_step = (new_param_step,)
assert all(np.allclose(p, new_p) for p, new_p in zip(param[1:], new_param_step[1:]))
# With trainable parameters, training should happen
param = tuple(np.array(p, requires_grad=True) for p in param)
new_param_step = opt.step(
fun,
*param,
nums_frequency=nums_freq,
)
# The following accounts for the unpacking functionality for length-1 param
if len(param) == 1:
new_param_step = (new_param_step,)
assert len(x_min) == len(new_param_step)
assert np.allclose(
np.fromiter(_flatten(x_min), dtype=float),
np.fromiter(_flatten(new_param_step), dtype=float),
atol=1e-5,
)
# Now with step_and_cost and trainable params
# pylint:disable=unbalanced-tuple-unpacking
new_param_step_and_cost, old_cost = opt.step_and_cost(
fun,
*param,
nums_frequency=nums_freq,
)
# The following accounts for the unpacking functionality for length-1 param
if len(param) == 1:
new_param_step_and_cost = (new_param_step_and_cost,)
assert len(x_min) == len(new_param_step_and_cost)
assert np.allclose(
np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
np.fromiter(_flatten(new_param_step), dtype=float),
atol=1e-5,
)
assert np.isclose(old_cost, fun(*param))
def test_full_output(
self, fun, x_min, param, nums_freq, exp_num_calls, substep_optimizer, substep_kwargs
):
"""Tests the ``full_output`` feature of Rotosolve, delivering intermediate cost
function values at the univariate optimization substeps."""
param = tuple(np.array(p, requires_grad=True) for p in param)
opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)
_, y_output_step = opt.step(
fun,
*param,
nums_frequency=nums_freq,
full_output=True,
)
new_param, old_cost, y_output_step_and_cost = opt.step_and_cost(
fun,
*param,
nums_frequency=nums_freq,
full_output=True,
)
# The following accounts for the unpacking functionality for length-1 param
if len(param) == 1:
new_param = (new_param,)
expected_intermediate_x = successive_params(param, new_param)
expected_y_output = [fun(*par) for par in expected_intermediate_x[1:]]
assert np.allclose(y_output_step, expected_y_output)
assert np.allclose(y_output_step_and_cost, expected_y_output)
assert np.isclose(old_cost, fun(*expected_intermediate_x[0]))
@pytest.mark.parametrize(
"fun, x_min, param, num_freq",
list(zip(classical_functions, classical_minima, classical_params, classical_nums_frequency)),
)
def test_multiple_steps(fun, x_min, param, num_freq):
"""Tests that repeated steps execute as expected."""
param = tuple(np.array(p, requires_grad=True) for p in param)
substep_optimizer = "brute"
substep_kwargs = None
opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)
for _ in range(3):
param = opt.step(
fun,
*param,
nums_frequency=num_freq,
)
# The following accounts for the unpacking functionality for length-one param
if len(x_min) == 1:
param = (param,)
assert (np.isscalar(x_min) and np.isscalar(param)) or len(x_min) == len(param)
assert np.allclose(
np.fromiter(_flatten(x_min), dtype=float),
np.fromiter(_flatten(param), dtype=float),
atol=1e-5,
)
classical_functions_deact = [
lambda x, y: -np.cos(x + 0.12) * 0.872 + np.sin(y[0] - 2.01) - np.cos(y[1] - 1.35) * 0.111,
lambda x, y, z: -np.cos(x + 0.12) * 0.872 + np.sin(y - 2.01) - np.cos(z - 1.35) * 0.111,
]
classical_minima_deact = [
(-0.12, [0.8, 0.1]),
(-0.12, 0.2, 1.35),
]
classical_params_deact = [
(np.array(0.3, requires_grad=True), np.array([0.8, 0.1], requires_grad=False)),
(
np.array(0.1, requires_grad=True),
np.array(0.2, requires_grad=False),
np.array(0.5, requires_grad=True),
),
]
classical_nums_frequency_deact = [
{"x": {(): 1}, "y": {(0,): 1, (1,): 1}},
{"x": {(): 1}, "y": {(): 1}, "z": {(): 1}},
]
@pytest.mark.parametrize(
"fun, x_min, param, num_freq",
list(
zip(
classical_functions_deact,
classical_minima_deact,
classical_params_deact,
classical_nums_frequency_deact,
)
),
)
class TestDeactivatedTrainingWithClassicalFunctions:
def test_single_step(self, fun, x_min, param, num_freq):
"""Tests convergence for easy classical functions in a single Rotosolve step
with some arguments deactivated for training.
Includes testing of the parameter output shape and the old cost when using step_and_cost."""
substep_optimizer = "brute"
substep_kwargs = None
opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)
new_param_step = opt.step(
fun,
*param,
nums_frequency=num_freq,
)
# The following accounts for the unpacking functionality for length-1 param
if len(param) == 1:
new_param_step = (new_param_step,)
assert len(x_min) == len(new_param_step)
assert np.allclose(
np.fromiter(_flatten(x_min), dtype=float),
np.fromiter(_flatten(new_param_step), dtype=float),
atol=1e-5,
)
# pylint:disable=unbalanced-tuple-unpacking
new_param_step_and_cost, old_cost = opt.step_and_cost(
fun,
*param,
nums_frequency=num_freq,
)
# The following accounts for the unpacking functionality for length-1 param
if len(param) == 1:
new_param_step_and_cost = (new_param_step_and_cost,)
assert len(x_min) == len(new_param_step_and_cost)
assert np.allclose(
np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
np.fromiter(_flatten(new_param_step), dtype=float),
atol=1e-5,
)
assert np.isclose(old_cost, fun(*param))
num_wires = 3
dev = qml.device("default.qubit", wires=num_wires)
@qml.qnode(dev)
def scalar_qnode(x):
for w in dev.wires:
qml.RX(x, wires=w)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1) @ qml.PauliZ(2))
@qml.qnode(dev)
def array_qnode(x, y, z):
for _x, w in zip(x, dev.wires):
qml.RX(_x, wires=w)
for i in range(num_wires):
qml.CRY(y, wires=[i, (i + 1) % num_wires])
qml.RZ(z[0], wires=0)
qml.RZ(z[1], wires=1)
qml.RZ(z[1], wires=2) # z[1] is used twice on purpose
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1) @ qml.PauliZ(2))
@qml.qnode(dev)
def _postprocessing_qnode(x, y, z):
for w in dev.wires:
qml.RX(x, wires=w)
for w in dev.wires:
qml.RY(y, wires=w)
for w in dev.wires:
qml.RZ(z, wires=w)
return [qml.expval(qml.PauliZ(w)) for w in dev.wires]
def postprocessing_qnode(x, y, z):
return np.sum(_postprocessing_qnode(x, y, z))
qnodes = [scalar_qnode, array_qnode, postprocessing_qnode]
qnode_params = [
(0.2,),
(np.array([0.1, -0.3, 2.9]), 1.3, [0.2, 0.1]),
(1.2, -2.3, -0.2),
]
qnode_nums_frequency = [
{"x": {(): num_wires}},
{"x": {(0,): 1, (1,): 1, (2,): 1}, "y": {(): 2 * num_wires}, "z": {(0,): 1, (1,): 2}},
None,
]
qnode_spectra = [
None,
None,
{
"x": {(): list(range(num_wires + 1))},
"y": {(): list(range(num_wires + 1))},
"z": {(): list(range(num_wires + 1))},
},
]
@pytest.mark.parametrize(
"qnode, param, nums_frequency, spectra",
list(zip(qnodes, qnode_params, qnode_nums_frequency, qnode_spectra)),
)
@pytest.mark.parametrize(
"substep_optimizer, substep_kwargs",
list(zip(substep_optimizers, all_substep_kwargs)),
)
class TestWithQNodes:
def test_single_step(
self, qnode, param, nums_frequency, spectra, substep_optimizer, substep_kwargs
):
"""Test executing a single step of the RotosolveOptimizer on a QNode."""
param = tuple(np.array(p, requires_grad=True) for p in param)
opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)
repack_param = len(param) == 1
new_param_step = opt.step(
qnode,
*param,
nums_frequency=nums_frequency,
spectra=spectra,
)
if repack_param:
new_param_step = (new_param_step,)
assert (np.isscalar(new_param_step) and np.isscalar(param)) or len(new_param_step) == len(
param
)
# pylint:disable=unbalanced-tuple-unpacking
new_param_step_and_cost, old_cost = opt.step_and_cost(
qnode,
*param,
nums_frequency=nums_frequency,
spectra=spectra,
)
if repack_param:
new_param_step_and_cost = (new_param_step_and_cost,)
assert np.allclose(
np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
np.fromiter(_flatten(new_param_step), dtype=float),
)
assert np.isclose(qnode(*param), old_cost)
def test_multiple_steps(
self, qnode, param, nums_frequency, spectra, substep_optimizer, substep_kwargs
):
"""Test executing multiple steps of the RotosolveOptimizer on a QNode."""
param = tuple(np.array(p, requires_grad=True) for p in param)
# For the following 1D substep_optimizer, the bounds need to be expanded for these QNodes
if substep_optimizer in ["shgo", custom_optimizer]:
substep_kwargs["bounds"] = ((-2.0, 2.0),)
opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)
repack_param = len(param) == 1
initial_cost = qnode(*param)
for _ in range(3):
param = opt.step(
qnode,
*param,
nums_frequency=nums_frequency,
spectra=spectra,
)
# The following accounts for the unpacking functionality for length-1 param
if repack_param:
param = (param,)
assert qnode(*param) < initial_cost
|
pennylane/tests/optimize/test_rotosolve.py/0
|
{
"file_path": "pennylane/tests/optimize/test_rotosolve.py",
"repo_id": "pennylane",
"token_count": 8745
}
| 83 |
# Copyright 2018-2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the ParametrizedEvolution class
"""
# pylint: disable=unused-argument,too-few-public-methods,import-outside-toplevel,comparison-with-itself,protected-access,possibly-unused-variable
from functools import reduce
import numpy as np
import pytest
import pennylane as qml
from pennylane.devices import DefaultQubit, DefaultQubitLegacy
from pennylane.operation import AnyWires
from pennylane.ops import QubitUnitary
from pennylane.pulse import ParametrizedEvolution, ParametrizedHamiltonian
from pennylane.tape import QuantumTape
class MyOp(qml.RX):
"""Variant of qml.RX that claims to not have `adjoint` or a matrix defined."""
has_matrix = False
has_adjoint = False
has_decomposition = False
has_diagonalizing_gates = False
def amp0(p, t):
return p * t
def amp1(p, t):
return p[0] * t + p[1]
H0 = qml.PauliX(1) + amp0 * qml.PauliZ(0) + amp0 * qml.PauliY(1)
params0_ = [0.5, 0.5]
H1 = qml.PauliX(1) + amp0 * qml.PauliZ(0) + amp1 * qml.PauliY(1)
params1_ = (0.5, [0.5, 0.5])
example_pytree_evolutions = [
qml.pulse.ParametrizedEvolution(H0),
qml.pulse.ParametrizedEvolution(H0, params0_),
qml.pulse.ParametrizedEvolution(H0, t=0.3),
qml.pulse.ParametrizedEvolution(H0, params0_, t=0.5),
qml.pulse.ParametrizedEvolution(H0, params0_, t=[0.5, 1.0]),
qml.pulse.ParametrizedEvolution(H0, params0_, t=0.5, return_intermediate=True),
qml.pulse.ParametrizedEvolution(
H0, params0_, t=0.5, return_intermediate=True, complementary=True
),
qml.pulse.ParametrizedEvolution(
H0, params0_, t=0.5, return_intermediate=True, complementary=True, atol=1e-4, rtol=1e-4
),
qml.pulse.ParametrizedEvolution(
H0,
params0_,
t=0.5,
return_intermediate=True,
complementary=True,
atol=1e-4,
rtol=1e-4,
dense=True,
),
qml.pulse.ParametrizedEvolution(H1, params1_, t=0.5),
qml.pulse.ParametrizedEvolution(H1, params1_, t=0.5, return_intermediate=True),
]
@pytest.mark.jax
class TestPytree:
"""Testing pytree related functionality"""
@pytest.mark.parametrize("evol", example_pytree_evolutions)
def test_flatten_unflatten_identity(self, evol):
"""Test that flattening and unflattening is yielding the same parametrized evolution"""
assert evol._unflatten(*evol._flatten()) == evol
@pytest.mark.xfail
@pytest.mark.jax
def test_standard_validity():
"""Run standard validity checks on the parametrized evolution."""
def f1(p, t):
return p * t
H = f1 * qml.PauliY(0)
params = (0.5,)
ev = qml.pulse.ParametrizedEvolution(H, params, 0.5)
qml.ops.functions.assert_valid(ev, skip_pickle=True)
def time_independent_hamiltonian():
"""Create a time-independent Hamiltonian on two qubits."""
ops = [qml.PauliX(0), qml.PauliZ(1), qml.PauliY(0), qml.PauliX(1)]
def f1(params, t):
return params # constant
def f2(params, t):
return params # constant
coeffs = [f1, f2, 4, 9]
return ParametrizedHamiltonian(coeffs, ops)
def time_dependent_hamiltonian():
"""Create a time-dependent two-qubit Hamiltonian that takes two scalar parameters."""
import jax.numpy as jnp
ops = [qml.PauliX(0), qml.PauliZ(1), qml.PauliY(0), qml.PauliX(1)]
def f1(params, t):
return params * t
def f2(params, t):
return params * jnp.cos(t)
coeffs = [f1, f2, 4, 9]
return ParametrizedHamiltonian(coeffs, ops)
@pytest.mark.jax
class TestInitialization:
"""Unit tests for the ParametrizedEvolution class."""
@pytest.mark.parametrize(
"coeffs, params", [([1, 2], []), ([1, 2], None), ([qml.pulse.constant] * 2, [1, 2])]
)
def test_init(self, params, coeffs):
"""Test the initialization."""
ops = [qml.PauliX(0), qml.PauliY(1)]
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(H=H, params=params, t=2, dense=True)
assert ev.H is H
assert qml.math.allequal(ev.t, [0, 2])
assert ev.wires == H.wires
assert ev.num_wires == AnyWires
assert ev.name == "ParametrizedEvolution"
assert ev.id is None
exp_params = [] if params is None else params
assert qml.math.allequal(ev.data, exp_params)
assert qml.math.allequal(ev.parameters, exp_params)
assert ev.num_params == len(exp_params)
assert ev.dense is True
def test_set_dense(self):
"""Test that flag dense is set correctly"""
ops = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]
coeffs = [1, 2, 3]
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(H=H, params=None, t=2)
assert ev.dense is False
assert ev(params=[], t=2).dense is False # Test that calling inherits the dense keyword
ev2 = ParametrizedEvolution(H=H, params=None, t=2, dense=True)
assert ev2.dense is True
assert ev2(params=[], t=2).dense is True # Test that calling inherits the dense keyword
ev3 = ParametrizedEvolution(H=H, params=None, t=2, dense=False)
assert ev3.dense is False
assert ev3(params=[], t=2).dense is False # Test that calling inherits the dense keyword
@pytest.mark.parametrize("dense_bool", [True, False])
def test_updating_dense_in_call(self, dense_bool):
"""Test that the flag dense updated correctly if set when calling ParametrizedEvolution"""
ops = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]
coeffs = [1, 2, 3]
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(H=H, params=None, t=2)
assert ev.dense is False
assert ev(params=[], t=2, dense=dense_bool).dense is dense_bool
ev2 = ParametrizedEvolution(H=H, params=None, t=2, dense=True)
assert ev2.dense is True
assert ev2(params=[], t=2, dense=dense_bool).dense is dense_bool
ev3 = ParametrizedEvolution(H=H, params=None, t=2, dense=False)
assert ev3.dense is False
assert ev3(params=[], t=2, dense=dense_bool).dense is dense_bool
@pytest.mark.parametrize("ret_intmdt, comp", ([False, False], [True, False], [True, True]))
def test_return_intermediate_and_complementary(self, ret_intmdt, comp):
"""Test that the keyword arguments return_intermediate and complementary are taken into
account correctly at initialization and when calling. This includes testing
inheritance when calling without explicitly providing these kwargs."""
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
t = [0.1, 0.2, 0.9] # avoid warning because of simple time argument+return_intermediate
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(
H=H, params=[], t=t, return_intermediate=ret_intmdt, complementary=comp
)
assert ev.hyperparameters["return_intermediate"] is ret_intmdt
assert ev.hyperparameters["complementary"] is comp
new_ev = ev([], t=t)
assert new_ev.hyperparameters["return_intermediate"] is ret_intmdt
assert new_ev.hyperparameters["complementary"] is comp
for new_ret_intmdt, new_comp in ([False, False], [True, False], [True, True]):
new_ev = ev([], t=t, return_intermediate=new_ret_intmdt, complementary=new_comp)
assert new_ev.hyperparameters["return_intermediate"] is new_ret_intmdt
assert new_ev.hyperparameters["complementary"] is new_comp
@pytest.mark.parametrize("len_t", [3, 8])
def test_batch_size_with_return_intermediate(self, len_t):
"""Test that the batch size is correctly set for intermediate time values."""
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
t = np.linspace(0, 1, len_t)
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(H=H, params=[], t=t)
assert ev.batch_size is None
ev = ParametrizedEvolution(H=H, params=[], t=t, return_intermediate=True)
assert ev.batch_size == len_t
ev = ParametrizedEvolution(
H=H, params=[], t=t, return_intermediate=True, complementary=True
)
assert ev.batch_size == len_t
def test_warns_with_complementary_without_ret_intermediate(self):
"""Test that a warning is raised if the keyword argument complementary is activated
without return_intermediate being activated."""
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
H = ParametrizedHamiltonian(coeffs, ops)
with pytest.warns(UserWarning, match="The keyword argument complementary"):
ev = ParametrizedEvolution(
H=H, params=[], t=2, return_intermediate=False, complementary=True
)
assert ev.hyperparameters["return_intermediate"] is False
assert ev.hyperparameters["complementary"] is True
def test_odeint_kwargs(self):
"""Test the initialization with odeint kwargs."""
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(H=H, params=[], t=2, mxstep=10, hmax=1, atol=1e-3, rtol=1e-6)
assert ev.odeint_kwargs == {"mxstep": 10, "hmax": 1, "atol": 1e-3, "rtol": 1e-6}
def test_update_attributes(self):
"""Test that the ``ParametrizedEvolution`` attributes can be updated
using the ``__call__`` method."""
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(H=H, mxstep=10)
# pylint:disable = use-implicit-booleaness-not-comparison
assert ev.parameters == []
assert ev.num_params == 0
assert ev.t is None
assert ev.odeint_kwargs == {"mxstep": 10}
params = []
t = 6
new_ev = ev(params, t, atol=1e-6, rtol=1e-4)
assert new_ev is not ev
assert qml.math.allequal(new_ev.parameters, params)
assert new_ev.num_params == 0
assert qml.math.allequal(new_ev.t, [0, 6])
assert new_ev.odeint_kwargs == {"mxstep": 10, "atol": 1e-6, "rtol": 1e-4}
def test_update_attributes_inside_queuing_context(self):
"""Make sure that updating a ``ParametrizedEvolution`` inside a queuing context, the initial
operator is removed from the queue."""
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
H = ParametrizedHamiltonian(coeffs, ops)
with QuantumTape() as tape:
op = qml.evolve(H)
op2 = op(params=[], t=6)
assert len(tape) == 1
assert tape[0] is op2
@pytest.mark.parametrize("time_interface", ["jax", "python", "numpy"])
def test_list_of_times(self, time_interface):
"""Test the initialization."""
import jax.numpy as jnp
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
H = ParametrizedHamiltonian(coeffs, ops)
t = {
"jax": jnp.arange(0, 10, 0.01),
"python": list(np.arange(0, 10, 0.01)),
"numpy": np.arange(0, 10, 0.01),
}[time_interface]
ev = ParametrizedEvolution(H=H, params=[], t=t)
exp_time_type = {"jax": jnp.ndarray, "python": qml.numpy.ndarray, "numpy": np.ndarray}
assert isinstance(ev.t, exp_time_type[time_interface])
assert qml.math.allclose(ev.t, t)
def test_has_matrix(self):
"""Test that a parametrized evolution has ``has_matrix=True`` only when `t` and `params` are
defined."""
ops = [qml.PauliX(0), qml.PauliY(1)]
coeffs = [1, 2]
H = ParametrizedHamiltonian(coeffs, ops)
ev = ParametrizedEvolution(H=H)
assert ev.has_matrix is False
new_ev = ev(params=[], t=3)
assert new_ev.has_matrix is True
def test_evolve_with_operator_without_matrix_raises_error(self):
"""Test that an error is raised when an ``ParametrizedEvolution`` operator is initialized with a
``ParametrizedHamiltonian`` that contains an operator without a matrix defined."""
ops = [qml.PauliX(0), MyOp(phi=0, wires=0)]
coeffs = [1, 2]
H = ParametrizedHamiltonian(coeffs, ops)
with pytest.raises(
ValueError,
match="All operators inside the parametrized hamiltonian must have a matrix defined",
):
_ = ParametrizedEvolution(H=H, params=[], t=2)
def test_hash_with_data(self):
"""Test that the hash of a ParametrizedEvolution takes all attributes into account."""
H_0 = 0.2 * qml.PauliZ(0) + qml.pulse.constant * (qml.PauliX(0) @ qml.PauliY(1))
H_1 = 0.2 * qml.PauliX(0) + qml.pulse.constant * (qml.PauliX(0) @ qml.PauliY(1))
params_0 = [np.array(0.4)]
params_1 = [np.array(0.43)]
t_0 = (0.3, 0.4)
t_1 = (0.3, 0.5)
atol_0 = 1e-8
atol_1 = 1e-7
compare_to = ParametrizedEvolution(H_0, params_0, t_0, False, False, atol=atol_0)
equal = ParametrizedEvolution(H_0, params_0, t_0, False, False, atol=atol_0)
diff_H = ParametrizedEvolution(H_1, params_0, t_0, False, False, atol=atol_0)
diff_params = ParametrizedEvolution(H_0, params_1, t_0, False, False, atol=atol_0)
diff_t = ParametrizedEvolution(H_0, params_0, t_1, False, False, atol=atol_0)
diff_atol = ParametrizedEvolution(H_0, params_0, t_0, False, False, atol=atol_1)
diff_ret_intmdt = ParametrizedEvolution(H_0, params_0, t_0, True, False, atol=atol_0)
diff_complementary = ParametrizedEvolution(H_0, params_0, t_0, False, True, atol=atol_0)
assert compare_to.hash == equal.hash
assert compare_to.hash != diff_H.hash
assert compare_to.hash != diff_params.hash
assert compare_to.hash != diff_t.hash
assert compare_to.hash != diff_atol.hash
assert compare_to.hash != diff_ret_intmdt.hash
assert compare_to.hash != diff_complementary.hash
@pytest.mark.parametrize(
"params",
[
[0.2, [1, 2, 3], [4, 5, 6, 7]],
[0.2, np.array([1, 2, 3]), np.array([4, 5, 6, 7])],
[0.2, (1, 2, 3), (4, 5, 6, 7)],
],
)
def test_label(self, params):
"""Test that the label displays correctly with and without decimal and base_label"""
H = (
qml.PauliX(1)
+ qml.pulse.constant * qml.PauliY(0)
+ np.polyval * qml.PauliY(1)
+ np.polyval * qml.PauliY(1)
)
op = qml.evolve(H)(params, 2)
cache = {"matrices": []}
assert op.label() == "Parametrized\nEvolution"
assert op.label(decimals=2) == "Parametrized\nEvolution"
assert (
op.label(decimals=2, cache=cache)
== "Parametrized\nEvolution\n(p=[0.20,M0,M1], t=[0. 2.])"
)
assert op.label(base_label="my_label") == "my_label"
assert (
op.label(base_label="my_label", decimals=2, cache=cache)
== "my_label\n(p=[0.20,M0,M1], t=[0. 2.])"
)
def test_label_no_params(self):
"""Test that the label displays correctly with and without decimal and base_label"""
H = (
qml.PauliX(1)
+ qml.pulse.constant * qml.PauliY(0)
+ np.polyval * qml.PauliY(1)
+ np.polyval * qml.PauliY(1)
)
op = qml.evolve(H)
cache = {"matrices": []}
assert op.label() == "Parametrized\nEvolution"
assert op.label(decimals=2) == "Parametrized\nEvolution"
assert op.label(decimals=2, cache=cache) == "Parametrized\nEvolution"
assert op.label(base_label="my_label") == "my_label"
assert op.label(base_label="my_label", decimals=2, cache=cache)
def test_label_reuses_cached_matrices(self):
"""Test that the matrix is reused if it already exists in the cache, instead
of being added to the cache a second time"""
H = (
qml.PauliX(1)
+ qml.pulse.constant * qml.PauliY(0)
+ np.polyval * qml.PauliY(1)
+ np.polyval * qml.PauliY(2)
)
cache = {"matrices": []}
params1 = [3, np.array([0.23, 0.47, 5]), np.array([3.4, 6.8])]
params2 = [5.67, np.array([0.23, 0.47, 5]), np.array([[3.7, 6.2], [1.2, 4.6]])]
op1 = qml.evolve(H)(params1, 2)
op2 = qml.evolve(H)(params2, 2)
assert (
op1.label(decimals=2, cache=cache)
== "Parametrized\nEvolution\n(p=[3.00,M0,M1], t=[0. 2.])"
)
assert len(cache["matrices"]) == 2
assert np.all(cache["matrices"][0] == params1[1])
assert np.all(cache["matrices"][1] == params1[2])
assert (
op2.label(decimals=2, cache=cache)
== "Parametrized\nEvolution\n(p=[5.67,M0,M2], t=[0. 2.])"
)
assert len(cache["matrices"]) == 3
assert np.all(cache["matrices"][0] == params2[1])
assert np.all(cache["matrices"][2] == params2[2])
def test_raises_wrong_number_of_params(self):
"""Test that an error is raised when instantiating (or calling) a
ParametrizedEvolution with the wrong number of parameters."""
# This Hamiltonian expects two scalar parameters
H = time_dependent_hamiltonian()
# Too few parameters
params = (np.array(0.2),)
with pytest.raises(ValueError, match="The length of the params argument and the number"):
# Instantiating
qml.pulse.ParametrizedEvolution(H, params=params, t=0.2)
op = qml.evolve(H)
with pytest.raises(ValueError, match="The length of the params argument and the number"):
# Calling
op(params, 0.2)
# Too many parameters
params = (np.array(0.2), np.array(2.1), np.array(0.4))
with pytest.raises(ValueError, match="The length of the params argument and the number"):
# Calling
op(params, 0.2)
@pytest.mark.jax
class TestMatrix:
"""Test matrix method."""
# pylint: disable=unused-argument
def test_time_independent_hamiltonian(self):
"""Test matrix method for a time independent hamiltonian."""
H = time_independent_hamiltonian()
t = np.arange(0, 4, 0.001)
params = [1, 2]
ev = ParametrizedEvolution(H=H, params=params, t=t, hmax=1, mxstep=1e4)
true_mat = qml.math.expm(-1j * qml.matrix(H(params, t=max(t))) * max(t))
assert qml.math.allclose(ev.matrix(), true_mat, atol=1e-3)
@pytest.mark.slow
def test_time_dependent_hamiltonian(self):
"""Test matrix method for a time dependent hamiltonian. This test approximates the
time-ordered exponential with a product of exponentials using small time steps.
For more information, see https://en.wikipedia.org/wiki/Ordered_exponential."""
import jax
import jax.numpy as jnp
H = time_dependent_hamiltonian()
t = jnp.arange(0, jnp.pi / 4, 0.001)
params = [1, 2]
ev = ParametrizedEvolution(H=H, params=params, t=t, atol=1e-6, rtol=1e-6)
def generator(params):
for ti in t:
yield jax.scipy.linalg.expm(-1j * 0.001 * qml.matrix(H(params, t=ti)))
true_mat = reduce(lambda x, y: y @ x, generator(params))
assert qml.math.allclose(ev.matrix(), true_mat, atol=1e-2)
@pytest.mark.parametrize("comp", [False, True])
@pytest.mark.parametrize("len_t", [2, 6])
def test_return_intermediate_and_complementary(self, comp, len_t):
"""Test that intermediate time evolution matrices are returned."""
import jax
from jax import numpy as jnp
jax.config.update("jax_enable_x64", True)
H = time_independent_hamiltonian()
t = np.linspace(0.4, 0.7, len_t)
params = [1, 2]
ev = ParametrizedEvolution(
H=H, params=params, t=t, return_intermediate=True, complementary=comp, rtol=1e-10
)
matrices = ev.matrix()
assert isinstance(matrices, jnp.ndarray)
assert matrices.shape == (len_t, 4, 4)
H_mat = qml.matrix(H(params, t=t[-1]))
if comp:
true_matrices = [qml.math.expm(-1j * H_mat * (t[-1] - _t)) for _t in t]
else:
true_matrices = [qml.math.expm(-1j * H_mat * (_t - t[0])) for _t in t]
assert qml.math.allclose(matrices, true_matrices, atol=1e-6, rtol=0.0)
@pytest.mark.jax
class TestIntegration:
"""Integration tests for the ParametrizedEvolution class."""
@pytest.mark.parametrize("device_class", ["DefaultQubit", "DefaultQubitJax"])
@pytest.mark.parametrize("time", [0.3, 1, [0, 2], [0.4, 2], (3, 3.1)])
@pytest.mark.parametrize("time_interface", ["python", "numpy", "jax"])
@pytest.mark.parametrize("use_jit", [False, True])
def test_time_input_formats(self, device_class, time, time_interface, use_jit):
import jax
import jax.numpy as jnp
from pennylane.devices.default_qubit_jax import DefaultQubitJax
if time_interface == "jax":
time = jnp.array(time)
elif time_interface == "numpy":
time = np.array(time)
H = qml.pulse.ParametrizedHamiltonian([2], [qml.PauliX(0)])
# This weird-looking code is a temporary solution to be able
# to access both DefaultQubit and DefaultQubitJax without
# having to the break the parameterization of the test.
# Once DefaultQubitJax is removed, the 'device_class'
# parameter would be redundant and dev would always be
# default qubit.
dev = {**globals(), **locals()}[device_class](wires=1)
@qml.qnode(dev, interface="jax")
def circuit(t):
qml.evolve(H)([], t)
return qml.expval(qml.PauliZ(0))
if use_jit:
circuit = jax.jit(circuit)
res = circuit(time)
duration = time if qml.math.ndim(time) == 0 else time[1] - time[0]
assert qml.math.isclose(res, qml.math.cos(4 * duration))
@pytest.mark.parametrize("device_class", [DefaultQubit, DefaultQubitLegacy])
# pylint: disable=unused-argument
def test_time_independent_hamiltonian(self, device_class):
"""Test the execution of a time independent hamiltonian."""
import jax
import jax.numpy as jnp
H = time_independent_hamiltonian()
dev = device_class(wires=2)
t = 4
@qml.qnode(dev)
def circuit(params):
ParametrizedEvolution(H=H, params=params, t=t)
return qml.expval(qml.PauliX(0) @ qml.PauliX(1))
@jax.jit
@qml.qnode(dev)
def jitted_circuit(params):
ParametrizedEvolution(H=H, params=params, t=t)
return qml.expval(qml.PauliX(0) @ qml.PauliX(1))
@qml.qnode(dev)
def true_circuit(params):
true_mat = qml.math.expm(-1j * qml.matrix(H(params, t=t)) * t)
QubitUnitary(U=true_mat, wires=[0, 1])
return qml.expval(qml.PauliX(0) @ qml.PauliX(1))
params = jnp.array([1.0, 2.0])
assert qml.math.allclose(circuit(params), true_circuit(params), atol=1e-3)
assert qml.math.allclose(jitted_circuit(params), true_circuit(params), atol=1e-3)
assert qml.math.allclose(
jax.grad(circuit)(params), jax.grad(true_circuit)(params), atol=1e-3
)
assert qml.math.allclose(
jax.grad(jitted_circuit)(params), jax.grad(true_circuit)(params), atol=1e-3
)
@pytest.mark.parametrize("device_class", [DefaultQubit, DefaultQubitLegacy])
@pytest.mark.slow
def test_time_dependent_hamiltonian(self, device_class):
"""Test the execution of a time dependent hamiltonian. This test approximates the
time-ordered exponential with a product of exponentials using small time steps.
For more information, see https://en.wikipedia.org/wiki/Ordered_exponential."""
import jax
import jax.numpy as jnp
H = time_dependent_hamiltonian()
dev = device_class(wires=2)
t = 0.1
def generator(params):
time_step = 1e-3
times = jnp.arange(0, t, step=time_step)
for ti in times:
yield jax.scipy.linalg.expm(-1j * time_step * qml.matrix(H(params, t=ti)))
@qml.qnode(dev)
def circuit(params):
ParametrizedEvolution(H=H, params=params, t=t)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
@jax.jit
@qml.qnode(dev)
def jitted_circuit(params):
ParametrizedEvolution(H=H, params=params, t=t)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
@qml.qnode(dev)
def true_circuit(params):
true_mat = reduce(lambda x, y: y @ x, generator(params))
QubitUnitary(U=true_mat, wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
params = jnp.array([1.0, 2.0])
assert qml.math.allclose(circuit(params), true_circuit(params), atol=5e-3)
assert qml.math.allclose(jitted_circuit(params), true_circuit(params), atol=5e-3)
assert qml.math.allclose(
jax.grad(circuit)(params), jax.grad(true_circuit)(params), atol=5e-3
)
assert qml.math.allclose(
jax.grad(jitted_circuit)(params), jax.grad(true_circuit)(params), atol=5e-3
)
@pytest.mark.slow
def test_map_wires_with_time_independent_hamiltonian(self):
"""Test the wire mapping for custom wire labels works as expected with DefaultQubit"""
import jax
from jax import numpy as jnp
def f1(params, t):
return params # constant
def f2(params, t):
return params # constant
ops = [qml.PauliX("a"), qml.PauliZ("b"), qml.PauliY("a"), qml.PauliX("b")]
coeffs = [f1, f2, 4, 9]
H = ParametrizedHamiltonian(coeffs, ops)
dev = DefaultQubit()
t = 4
@qml.qnode(dev)
def circuit(params):
ParametrizedEvolution(H=H, params=params, t=t)
return qml.expval(qml.PauliX("a") @ qml.PauliX("b"))
@jax.jit
@qml.qnode(dev)
def jitted_circuit(params):
ParametrizedEvolution(H=H, params=params, t=t)
return qml.expval(qml.PauliX("a") @ qml.PauliX("b"))
@qml.qnode(dev)
def true_circuit(params):
true_mat = qml.math.expm(-1j * qml.matrix(H(params, t=t)) * t)
QubitUnitary(U=true_mat, wires=[0, 1])
return qml.expval(qml.PauliX(0) @ qml.PauliX(1))
params = jnp.array([1.0, 2.0])
assert qml.math.allclose(circuit(params), true_circuit(params), atol=1e-3)
assert qml.math.allclose(jitted_circuit(params), true_circuit(params), atol=1e-3)
assert qml.math.allclose(
jax.grad(circuit)(params), jax.grad(true_circuit)(params), atol=1e-3
)
assert qml.math.allclose(
jax.grad(jitted_circuit)(params), jax.grad(true_circuit)(params), atol=1e-3
)
@pytest.mark.parametrize("device_class", [DefaultQubit, DefaultQubitLegacy])
def test_two_commuting_parametrized_hamiltonians(self, device_class):
"""Test that the evolution of two parametrized hamiltonians that commute with each other
is equal to evolve the two hamiltonians simultaneously. This test uses 8 wires for the device
to test the case where 2 * n < N (the matrix is evolved instead of the state)."""
import jax
import jax.numpy as jnp
def f1(p, t):
return p * t
def f2(p, t):
return jnp.sin(t) * (p - 1)
coeffs = [1, f1, f2]
ops = [qml.PauliX(0), qml.PauliY(1), qml.PauliX(2)]
H1_ = qml.dot(coeffs, ops)
def f3(p, t):
return jnp.cos(t) * (p + 1)
coeffs = [7, f3]
ops = [qml.PauliX(0), qml.PauliX(2)]
H2_ = qml.dot(coeffs, ops)
dev = device_class(wires=8)
@jax.jit
@qml.qnode(dev, interface="jax")
def circuit1(params):
qml.evolve(H1_)(params[0], t=2)
qml.evolve(H2_)(params[1], t=2)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1) @ qml.PauliZ(2))
@jax.jit
@qml.qnode(dev, interface="jax")
def circuit2(params):
qml.evolve(H1_ + H2_)(params, t=2)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1) @ qml.PauliZ(2))
params1 = [(1.0, 2.0), (3.0,)]
params2 = [1.0, 2.0, 3.0]
assert qml.math.allclose(circuit1(params1), circuit2(params2), atol=5e-4)
assert qml.math.allclose(
qml.math.concatenate(jax.grad(circuit1)(params1)),
jax.grad(circuit2)(params2),
atol=5e-4,
)
def test_mixed_device(self):
"""Test mixed device integration matches that of default qubit"""
import jax
import jax.numpy as jnp
jax.config.update("jax_enable_x64", True)
mixed = qml.device("default.mixed", wires=range(3))
default = qml.device("default.qubit", wires=range(3))
coeff = [qml.pulse.pwc(5.0), qml.pulse.pwc(5.0)]
ops = [qml.PauliX(0) @ qml.PauliX(1), qml.PauliY(1) @ qml.PauliY(2)]
H_pulse = qml.dot(coeff, ops)
def circuit(x):
qml.pulse.ParametrizedEvolution(H_pulse, x, 5.0)
return qml.expval(qml.PauliZ(0))
qnode_def = qml.QNode(circuit, default, interface="jax")
qnode_mix = qml.QNode(circuit, mixed, interface="jax")
x = [jnp.arange(3, dtype=float)] * 2
res_def = qnode_def(x)
grad_def = jax.grad(qnode_def)(x)
res_mix = qnode_mix(x)
grad_mix = jax.grad(qnode_mix)(x)
assert qml.math.isclose(res_def, res_mix, atol=1e-4)
assert qml.math.allclose(grad_def, grad_mix, atol=1e-4)
def test_jitted_unitary_differentiation_sparse(self):
"""Test that the unitary can be differentiated with and without jitting using sparse matrices"""
import jax
import jax.numpy as jnp
jax.config.update("jax_enable_x64", True)
def U(params):
H = jnp.polyval * qml.PauliZ(0)
Um = qml.evolve(H, dense=False)(params, t=10.0)
return qml.matrix(Um)
params = jnp.array([[0.5]], dtype=complex)
jac = jax.jacobian(U, holomorphic=True)(params)
jac_jit = jax.jacobian(jax.jit(U), holomorphic=True)(params)
assert qml.math.allclose(jac, jac_jit)
def test_jitted_unitary_differentiation_dense(self):
"""Test that the unitary can be differentiated with and without jitting using dense matrices"""
import jax
import jax.numpy as jnp
jax.config.update("jax_enable_x64", True)
def U(params):
H = jnp.polyval * qml.PauliZ(0)
Um = qml.evolve(H, dense=True)(params, t=10.0)
return qml.matrix(Um)
params = jnp.array([[0.5]], dtype=complex)
jac = jax.jacobian(U, holomorphic=True)(params)
jac_jit = jax.jacobian(jax.jit(U), holomorphic=True)(params)
assert qml.math.allclose(jac, jac_jit)
@pytest.mark.jax
def test_map_wires():
"""Test that map wires returns a new ParametrizedEvolution, with wires updated on
both the operator and the corresponding Hamiltonian"""
def f1(p, t):
return p * t
coeffs = [2, 4, f1]
ops = [qml.PauliX("a"), qml.PauliX("b"), qml.PauliX("c")]
H = qml.dot(coeffs, ops)
op = qml.evolve(H)([3], 2)
wire_map = {"a": 3, "b": 5, "c": 7}
new_op = op.map_wires(wire_map)
assert op.wires == qml.wires.Wires(["a", "b", "c"])
assert op.H.wires == qml.wires.Wires(["a", "b", "c"])
assert new_op.wires == qml.wires.Wires([3, 5, 7])
assert new_op.H.wires == qml.wires.Wires([3, 5, 7])
|
pennylane/tests/pulse/test_parametrized_evolution.py/0
|
{
"file_path": "pennylane/tests/pulse/test_parametrized_evolution.py",
"repo_id": "pennylane",
"token_count": 15468
}
| 84 |
# Copyright 2018-2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for molecular Hamiltonians.
"""
# pylint: disable=too-many-arguments, protected-access
import pytest
import pennylane as qml
from pennylane import I, X, Y, Z
from pennylane import numpy as np
from pennylane import qchem
from pennylane.operation import active_new_opmath
test_symbols = ["C", "C", "N", "H", "H", "H", "H", "H"]
test_coordinates = np.array(
[
0.68219113,
-0.85415621,
-1.04123909,
-1.34926445,
0.23621577,
0.61794044,
1.29068294,
0.25133357,
1.40784596,
0.83525895,
-2.88939124,
-1.16974047,
1.26989596,
0.19275206,
-2.69852891,
-2.57758643,
-1.05824663,
1.61949529,
-2.17129532,
2.04090421,
0.11338357,
2.06547065,
2.00877887,
1.20186581,
]
)
@pytest.mark.parametrize(
(
"charge",
"mult",
"package",
"nact_els",
"nact_orbs",
"mapping",
),
[
(0, 1, "pyscf", 2, 2, "jordan_WIGNER"),
(1, 2, "openfermion", 3, 4, "BRAVYI_kitaev"),
(-1, 2, "openfermion", 1, 2, "jordan_WIGNER"),
(2, 1, "pyscf", 2, 2, "BRAVYI_kitaev"),
],
)
@pytest.mark.usefixtures("skip_if_no_openfermion_support", "use_legacy_and_new_opmath")
def test_building_hamiltonian(
charge,
mult,
package,
nact_els,
nact_orbs,
mapping,
tmpdir,
):
r"""Test that the generated Hamiltonian `built_hamiltonian` is an instance of the PennyLane
Hamiltonian class and the correctness of the total number of qubits required to run the
quantum simulation. The latter is tested for different values of the molecule's charge and
for active spaces with different size"""
args = (test_symbols, test_coordinates)
kwargs = {
"charge": charge,
"mult": mult,
"method": package,
"active_electrons": nact_els,
"active_orbitals": nact_orbs,
"mapping": mapping,
"outpath": tmpdir.strpath,
}
built_hamiltonian, qubits = qchem.molecular_hamiltonian(*args, **kwargs)
if active_new_opmath():
assert not isinstance(built_hamiltonian, qml.Hamiltonian)
else:
assert isinstance(built_hamiltonian, qml.Hamiltonian)
assert qubits == 2 * nact_orbs
@pytest.mark.parametrize(
(
"charge",
"mult",
"package",
"nact_els",
"nact_orbs",
"mapping",
),
[
(0, 1, "pyscf", 2, 2, "jordan_WIGNER"),
(1, 2, "openfermion", 3, 4, "BRAVYI_kitaev"),
(-1, 2, "openfermion", 1, 2, "jordan_WIGNER"),
(2, 1, "pyscf", 2, 2, "BRAVYI_kitaev"),
],
)
@pytest.mark.usefixtures("skip_if_no_openfermion_support", "use_legacy_and_new_opmath")
def test_building_hamiltonian_molecule_class(
charge,
mult,
package,
nact_els,
nact_orbs,
mapping,
tmpdir,
):
r"""Test that the generated Hamiltonian `built_hamiltonian` using the molecule class, is an
instance of the PennyLane Hamiltonian class and the correctness of the total number of qubits
required to run the quantum simulation. The latter is tested for different values of the
molecule's charge and for active spaces with different size"""
args = qchem.Molecule(test_symbols, test_coordinates, charge=charge, mult=mult)
kwargs = {
"method": package,
"active_electrons": nact_els,
"active_orbitals": nact_orbs,
"mapping": mapping,
"outpath": tmpdir.strpath,
}
built_hamiltonian, qubits = qchem.molecular_hamiltonian(args, **kwargs)
if active_new_opmath():
assert not isinstance(built_hamiltonian, qml.Hamiltonian)
else:
assert isinstance(built_hamiltonian, qml.Hamiltonian)
assert qubits == 2 * nact_orbs
@pytest.mark.parametrize(
("symbols", "geometry", "mapping", "h_ref_data"),
[
(
["H", "H"],
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
"jordan_wigner",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.jordan_wigner(h_f)
(
np.array(
[
0.2981788017,
0.2081336485,
0.2081336485,
0.1786097698,
0.042560361,
-0.042560361,
-0.042560361,
0.042560361,
-0.3472487379,
0.1329029281,
-0.3472487379,
0.175463289,
0.175463289,
0.1329029281,
0.1847091733,
]
),
[
I(0),
Z(0),
Z(1),
Z(0) @ Z(1),
Y(0) @ X(1) @ X(2) @ Y(3),
Y(0) @ Y(1) @ X(2) @ X(3),
X(0) @ X(1) @ Y(2) @ Y(3),
X(0) @ Y(1) @ Y(2) @ X(3),
Z(2),
Z(0) @ Z(2),
Z(3),
Z(0) @ Z(3),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(2) @ Z(3),
],
),
),
(
["H", "H"],
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
"parity",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# binary_code = openfermion.parity_code(molecule.n_qubits)
# h_q = openfermion.transforms.binary_code_transform(h_f, binary_code)
(
np.array(
[
0.2981787007221673,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.20813364101195764,
0.20813364101195767,
0.13290292584331462,
0.13290292584331462,
0.175463287257566,
0.175463287257566,
0.17860976802544348,
-0.34724871015550757,
0.18470917137696227,
-0.3472487101555076,
]
),
[
I(0),
X(0) @ Z(1) @ X(2),
X(0) @ Z(1) @ X(2) @ Z(3),
Y(0) @ Y(2),
Y(0) @ Y(2) @ Z(3),
Z(0),
Z(0) @ Z(1),
Z(0) @ Z(1) @ Z(2),
Z(0) @ Z(1) @ Z(2) @ Z(3),
Z(0) @ Z(2),
Z(0) @ Z(2) @ Z(3),
Z(1),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(2) @ Z(3),
],
),
),
(
["H", "H"],
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
"bravyi_kitaev",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.bravyi_kitaev(h_f)
(
np.array(
[
0.2981787007221673,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.20813364101195764,
0.20813364101195767,
0.175463287257566,
0.175463287257566,
0.13290292584331462,
0.13290292584331462,
0.17860976802544348,
-0.3472487101555076,
0.18470917137696227,
-0.34724871015550757,
]
),
[
I(0),
X(0) @ Z(1) @ X(2),
X(0) @ Z(1) @ X(2) @ Z(3),
Y(0) @ Z(1) @ Y(2),
Y(0) @ Z(1) @ Y(2) @ Z(3),
Z(0),
Z(0) @ Z(1),
Z(0) @ Z(1) @ Z(2),
Z(0) @ Z(1) @ Z(2) @ Z(3),
Z(0) @ Z(2),
Z(0) @ Z(2) @ Z(3),
Z(1),
Z(1) @ Z(2) @ Z(3),
Z(1) @ Z(3),
Z(2),
],
),
),
(
["H", "H"],
np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]),
"jordan_wigner",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.jordan_wigner(h_f)
(
np.array(
[
0.2981788017,
0.2081336485,
0.2081336485,
0.1786097698,
0.042560361,
-0.042560361,
-0.042560361,
0.042560361,
-0.3472487379,
0.1329029281,
-0.3472487379,
0.175463289,
0.175463289,
0.1329029281,
0.1847091733,
]
),
[
I(0),
Z(0),
Z(1),
Z(0) @ Z(1),
Y(0) @ X(1) @ X(2) @ Y(3),
Y(0) @ Y(1) @ X(2) @ X(3),
X(0) @ X(1) @ Y(2) @ Y(3),
X(0) @ Y(1) @ Y(2) @ X(3),
Z(2),
Z(0) @ Z(2),
Z(3),
Z(0) @ Z(3),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(2) @ Z(3),
],
),
),
],
)
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
def test_differentiable_hamiltonian(symbols, geometry, mapping, h_ref_data):
r"""Test that molecular_hamiltonian returns the correct Hamiltonian with the differentiable
backend."""
geometry.requires_grad = True
args = [geometry.reshape(2, 3)]
h_args = qchem.molecular_hamiltonian(
symbols, geometry, method="dhf", args=args, mapping=mapping
)[0]
geometry.requires_grad = False
h_noargs = qchem.molecular_hamiltonian(symbols, geometry, method="dhf", mapping=mapping)[0]
ops = [
qml.operation.Tensor(*op) if isinstance(op, qml.ops.Prod) else op
for op in map(qml.simplify, h_ref_data[1])
]
h_ref = qml.Hamiltonian(h_ref_data[0], ops)
h_ref_coeffs, h_ref_ops = h_ref.terms()
h_args_coeffs, h_args_ops = h_args.terms()
h_noargs_coeffs, h_noargs_ops = h_noargs.terms()
assert all(coeff.requires_grad is True for coeff in h_args_coeffs)
assert all(coeff.requires_grad is False for coeff in h_noargs_coeffs)
assert np.allclose(np.sort(h_args_coeffs), np.sort(h_ref_coeffs))
assert qml.Hamiltonian(np.ones(len(h_args_coeffs)), h_args_ops).compare(
qml.Hamiltonian(np.ones(len(h_ref_coeffs)), h_ref_ops)
)
assert np.allclose(np.sort(h_noargs_coeffs), np.sort(h_ref_coeffs))
assert qml.Hamiltonian(np.ones(len(h_noargs_coeffs)), h_noargs_ops).compare(
qml.Hamiltonian(np.ones(len(h_ref_coeffs)), h_ref_ops)
)
@pytest.mark.parametrize(
("symbols", "geometry", "mapping", "h_ref_data"),
[
(
["H", "H"],
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
"jordan_wigner",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.jordan_wigner(h_f)
(
np.array(
[
0.2981788017,
0.2081336485,
0.2081336485,
0.1786097698,
0.042560361,
-0.042560361,
-0.042560361,
0.042560361,
-0.3472487379,
0.1329029281,
-0.3472487379,
0.175463289,
0.175463289,
0.1329029281,
0.1847091733,
]
),
[
I(0),
Z(0),
Z(1),
Z(0) @ Z(1),
Y(0) @ X(1) @ X(2) @ Y(3),
Y(0) @ Y(1) @ X(2) @ X(3),
X(0) @ X(1) @ Y(2) @ Y(3),
X(0) @ Y(1) @ Y(2) @ X(3),
Z(2),
Z(0) @ Z(2),
Z(3),
Z(0) @ Z(3),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(2) @ Z(3),
],
),
),
(
["H", "H"],
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
"parity",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# binary_code = openfermion.parity_code(molecule.n_qubits)
# h_q = openfermion.transforms.binary_code_transform(h_f, binary_code)
(
np.array(
[
0.2981787007221673,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.20813364101195764,
0.20813364101195767,
0.13290292584331462,
0.13290292584331462,
0.175463287257566,
0.175463287257566,
0.17860976802544348,
-0.34724871015550757,
0.18470917137696227,
-0.3472487101555076,
]
),
[
I(0),
X(0) @ Z(1) @ X(2),
X(0) @ Z(1) @ X(2) @ Z(3),
Y(0) @ Y(2),
Y(0) @ Y(2) @ Z(3),
Z(0),
Z(0) @ Z(1),
Z(0) @ Z(1) @ Z(2),
Z(0) @ Z(1) @ Z(2) @ Z(3),
Z(0) @ Z(2),
Z(0) @ Z(2) @ Z(3),
Z(1),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(2) @ Z(3),
],
),
),
(
["H", "H"],
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
"bravyi_kitaev",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.bravyi_kitaev(h_f)
(
np.array(
[
0.2981787007221673,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.04256036141425139,
0.20813364101195764,
0.20813364101195767,
0.175463287257566,
0.175463287257566,
0.13290292584331462,
0.13290292584331462,
0.17860976802544348,
-0.3472487101555076,
0.18470917137696227,
-0.34724871015550757,
]
),
[
I(0),
X(0) @ Z(1) @ X(2),
X(0) @ Z(1) @ X(2) @ Z(3),
Y(0) @ Z(1) @ Y(2),
Y(0) @ Z(1) @ Y(2) @ Z(3),
Z(0),
Z(0) @ Z(1),
Z(0) @ Z(1) @ Z(2),
Z(0) @ Z(1) @ Z(2) @ Z(3),
Z(0) @ Z(2),
Z(0) @ Z(2) @ Z(3),
Z(1),
Z(1) @ Z(2) @ Z(3),
Z(1) @ Z(3),
Z(2),
],
),
),
(
["H", "H"],
np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]),
"jordan_wigner",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.jordan_wigner(h_f)
(
np.array(
[
0.2981788017,
0.2081336485,
0.2081336485,
0.1786097698,
0.042560361,
-0.042560361,
-0.042560361,
0.042560361,
-0.3472487379,
0.1329029281,
-0.3472487379,
0.175463289,
0.175463289,
0.1329029281,
0.1847091733,
]
),
[
I(0),
Z(0),
Z(1),
Z(0) @ Z(1),
Y(0) @ X(1) @ X(2) @ Y(3),
Y(0) @ Y(1) @ X(2) @ X(3),
X(0) @ X(1) @ Y(2) @ Y(3),
X(0) @ Y(1) @ Y(2) @ X(3),
Z(2),
Z(0) @ Z(2),
Z(3),
Z(0) @ Z(3),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(2) @ Z(3),
],
),
),
],
)
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
def test_differentiable_hamiltonian_molecule_class(symbols, geometry, mapping, h_ref_data):
r"""Test that molecular_hamiltonian generated using the molecule class
returns the correct Hamiltonian with the differentiable backend."""
geometry.requires_grad = True
args = [geometry.reshape(2, 3)]
molecule = qchem.Molecule(symbols, geometry)
h_args = qchem.molecular_hamiltonian(molecule, method="dhf", args=args, mapping=mapping)[0]
geometry.requires_grad = False
molecule = qchem.Molecule(symbols, geometry)
h_noargs = qchem.molecular_hamiltonian(molecule, method="dhf", mapping=mapping)[0]
ops = [
qml.operation.Tensor(*op) if isinstance(op, qml.ops.Prod) else op
for op in map(qml.simplify, h_ref_data[1])
]
h_ref = qml.Hamiltonian(h_ref_data[0], ops)
h_ref_coeffs, h_ref_ops = h_ref.terms()
h_args_coeffs, h_args_ops = h_args.terms()
h_noargs_coeffs, h_noargs_ops = h_noargs.terms()
assert all(coeff.requires_grad is True for coeff in h_args_coeffs)
assert all(coeff.requires_grad is False for coeff in h_noargs_coeffs)
assert np.allclose(np.sort(h_args_coeffs), np.sort(h_ref_coeffs))
assert qml.Hamiltonian(np.ones(len(h_args_coeffs)), h_args_ops).compare(
qml.Hamiltonian(np.ones(len(h_ref_coeffs)), h_ref_ops)
)
assert np.allclose(np.sort(h_noargs_coeffs), np.sort(h_ref_coeffs))
assert qml.Hamiltonian(np.ones(len(h_noargs_coeffs)), h_noargs_ops).compare(
qml.Hamiltonian(np.ones(len(h_ref_coeffs)), h_ref_ops)
)
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
@pytest.mark.parametrize(
("wiremap"),
[
["a", "b", "c", "d"],
[0, "z", 3, "ancilla"],
],
)
@pytest.mark.usefixtures("skip_if_no_openfermion_support")
def test_custom_wiremap_hamiltonian_pyscf(wiremap, tmpdir):
r"""Test that the generated Hamiltonian has the correct wire labels given by a custom wiremap."""
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])
method = "pyscf"
hamiltonian, _ = qchem.molecular_hamiltonian(
symbols=symbols,
coordinates=geometry,
method=method,
wires=wiremap,
outpath=tmpdir.strpath,
)
assert set(hamiltonian.wires) == set(wiremap)
@pytest.mark.parametrize(
("wiremap"),
[
["a", "b", "c", "d"],
[0, "z", 3, "ancilla"],
],
)
@pytest.mark.usefixtures("skip_if_no_openfermion_support")
def test_custom_wiremap_hamiltonian_pyscf_molecule_class(wiremap, tmpdir):
r"""Test that the generated Hamiltonian has the correct wire labels given by a custom wiremap."""
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])
method = "pyscf"
molecule = qchem.Molecule(symbols, geometry)
hamiltonian, _ = qchem.molecular_hamiltonian(
molecule,
method=method,
wires=wiremap,
outpath=tmpdir.strpath,
)
assert set(hamiltonian.wires) == set(wiremap)
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
@pytest.mark.parametrize(
("wiremap", "args"),
[
(
[0, "z", 3, "ancilla"],
None,
),
(
[0, "z", 3, "ancilla"],
[np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])],
),
],
)
def test_custom_wiremap_hamiltonian_dhf(wiremap, args, tmpdir):
r"""Test that the generated Hamiltonian has the correct wire labels given by a custom wiremap."""
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])
wiremap_dict = dict(zip(range(len(wiremap)), wiremap))
hamiltonian_ref, _ = qchem.molecular_hamiltonian(
symbols=symbols,
coordinates=geometry,
args=args,
outpath=tmpdir.strpath,
)
hamiltonian, _ = qchem.molecular_hamiltonian(
symbols=symbols,
coordinates=geometry,
wires=wiremap,
args=args,
outpath=tmpdir.strpath,
)
wiremap_calc = dict(zip(list(hamiltonian_ref.wires), list(hamiltonian.wires)))
assert wiremap_calc == wiremap_dict
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
@pytest.mark.parametrize(
("wiremap", "args"),
[
(
[0, "z", 3, "ancilla"],
None,
),
(
[0, "z", 3, "ancilla"],
[np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])],
),
],
)
def test_custom_wiremap_hamiltonian_dhf_molecule_class(wiremap, args, tmpdir):
r"""Test that the generated Hamiltonian has the correct wire labels given by a custom wiremap."""
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])
wiremap_dict = dict(zip(range(len(wiremap)), wiremap))
molecule = qchem.Molecule(symbols, geometry)
hamiltonian_ref, _ = qchem.molecular_hamiltonian(
molecule,
args=args,
outpath=tmpdir.strpath,
)
hamiltonian, _ = qchem.molecular_hamiltonian(
molecule,
wires=wiremap,
args=args,
outpath=tmpdir.strpath,
)
wiremap_calc = dict(zip(list(hamiltonian_ref.wires), list(hamiltonian.wires)))
assert wiremap_calc == wiremap_dict
file_content = """\
2
in Angstrom
H 0.00000 0.00000 -0.35000
H 0.00000 0.00000 0.35000
"""
def test_mol_hamiltonian_with_read_structure(tmpdir):
"""Test that the pipeline of using molecular_hamiltonian with
read_structure executes without errors."""
f_name = "h2.xyz"
filename = tmpdir.join(f_name)
with open(filename, "w") as f:
f.write(file_content)
symbols, coordinates = qchem.read_structure(str(filename), outpath=tmpdir)
H, num_qubits = qchem.molecular_hamiltonian(symbols, coordinates)
assert len(H.terms()) == 2
assert num_qubits == 4
def test_mol_hamiltonian_with_read_structure_molecule_class(tmpdir):
"""Test that the pipeline of using molecular_hamiltonian with
read_structure executes without errors."""
f_name = "h2.xyz"
filename = tmpdir.join(f_name)
with open(filename, "w") as f:
f.write(file_content)
symbols, coordinates = qchem.read_structure(str(filename), outpath=tmpdir)
molecule = qchem.Molecule(symbols, coordinates)
H, num_qubits = qchem.molecular_hamiltonian(molecule)
assert len(H.terms()) == 2
assert num_qubits == 4
def test_diff_hamiltonian_error():
r"""Test that molecular_hamiltonian raises an error with unsupported mapping."""
symbols = ["H", "H"]
geometry = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
with pytest.raises(
ValueError, match="Only 'dhf', 'pyscf' and 'openfermion' backends are supported"
):
qchem.molecular_hamiltonian(symbols, geometry, method="psi4")
with pytest.raises(ValueError, match="Open-shell systems are not supported"):
qchem.molecular_hamiltonian(symbols, geometry, mult=3)
def test_pyscf_hamiltonian_error():
r"""Test that molecular_hamiltonian raises an error for open-shell systems."""
symbols = ["H", "H"]
geometry = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
with pytest.raises(ValueError, match="Open-shell systems are not supported"):
qchem.molecular_hamiltonian(symbols, geometry, mult=3, method="pyscf")
molecule = qchem.Molecule(symbols, geometry, mult=3)
with pytest.raises(ValueError, match="Open-shell systems are not supported"):
qchem.molecular_hamiltonian(molecule, method="pyscf")
def test_diff_hamiltonian_error_molecule_class():
r"""Test that molecular_hamiltonian raises an error with unsupported mapping."""
symbols = ["H", "H"]
geometry = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
molecule = qchem.Molecule(symbols, geometry)
with pytest.raises(
ValueError, match="Only 'dhf', 'pyscf' and 'openfermion' backends are supported"
):
qchem.molecular_hamiltonian(molecule, method="psi4")
with pytest.raises(ValueError, match="'bksf' is not supported."):
qchem.molecular_hamiltonian(molecule, mapping="bksf")
@pytest.mark.parametrize(
("method", "args"),
[
(
"pyscf",
None,
),
(
"dhf",
None,
),
(
"dhf",
[np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])],
),
],
)
@pytest.mark.usefixtures("skip_if_no_openfermion_support", "use_legacy_and_new_opmath")
def test_real_hamiltonian(method, args, tmpdir):
r"""Test that the generated Hamiltonian has real coefficients."""
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])
hamiltonian, _ = qchem.molecular_hamiltonian(
symbols=symbols,
coordinates=geometry,
method=method,
args=args,
outpath=tmpdir.strpath,
)
assert np.isrealobj(hamiltonian.terms()[0])
@pytest.mark.parametrize(
("method", "args"),
[
(
"pyscf",
None,
),
(
"dhf",
None,
),
(
"dhf",
[np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])],
),
],
)
@pytest.mark.usefixtures("skip_if_no_openfermion_support", "use_legacy_and_new_opmath")
def test_real_hamiltonian_molecule_class(method, args, tmpdir):
r"""Test that the generated Hamiltonian has real coefficients."""
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]])
molecule = qchem.Molecule(symbols, geometry)
hamiltonian, _ = qchem.molecular_hamiltonian(
molecule,
method=method,
args=args,
outpath=tmpdir.strpath,
)
assert np.isrealobj(hamiltonian.terms()[0])
@pytest.mark.parametrize(
("symbols", "geometry", "core_ref", "one_ref", "two_ref"),
[
(
["H", "H"],
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 2.0]),
np.array([0.5]),
np.array([[-1.08269537e00, 1.88626892e-13], [1.88848936e-13, -6.04947784e-01]]),
np.array(
[
[
[[6.16219836e-01, -1.93289829e-13], [-1.93373095e-13, 2.00522469e-01]],
[[-1.93345340e-13, 2.00522469e-01], [6.13198399e-01, -1.86684002e-13]],
],
[
[[-1.93289829e-13, 6.13198399e-01], [2.00522469e-01, -1.86572979e-13]],
[[2.00522469e-01, -1.86961557e-13], [-1.86684002e-13, 6.43874664e-01]],
],
]
),
),
],
)
@pytest.mark.usefixtures("skip_if_no_openfermion_support")
def test_pyscf_integrals(symbols, geometry, core_ref, one_ref, two_ref):
r"""Test that _pyscf_integrals returns correct integrals."""
core, one, two = qchem.openfermion_pyscf._pyscf_integrals(symbols, geometry)
assert np.allclose(core, core_ref)
assert np.allclose(one, one_ref)
assert np.allclose(two, two_ref)
@pytest.mark.usefixtures("skip_if_no_openfermion_support", "use_legacy_and_new_opmath")
def test_molecule_as_kwargs(tmpdir):
r"""Test that molecular_hamiltonian function works with molecule as
keyword argument
"""
molecule = qchem.Molecule(
test_symbols,
test_coordinates,
)
built_hamiltonian, qubits = qchem.molecular_hamiltonian(
molecule=molecule,
method="pyscf",
active_electrons=2,
active_orbitals=2,
outpath=tmpdir.strpath,
)
if active_new_opmath():
assert not isinstance(built_hamiltonian, qml.Hamiltonian)
else:
assert isinstance(built_hamiltonian, qml.Hamiltonian)
assert qubits == 4
def test_error_raised_for_incompatible_type():
r"""Test that molecular_hamiltonian raises an error when input is not
a list or molecule object.
"""
with pytest.raises(
NotImplementedError,
match="molecular_hamiltonian supports only list or molecule object types.",
):
qchem.molecular_hamiltonian(symbols=1, coordinates=test_coordinates, method="dhf")
def test_error_raised_for_missing_molecule_information():
r"""Test that molecular_hamiltonian raises an error when symbols, and coordinates
information is not provided.
"""
with pytest.raises(
NotImplementedError,
match="The provided arguments do not contain information about symbols in the molecule.",
):
qchem.molecular_hamiltonian(charge=0, mult=1, method="dhf")
@pytest.mark.parametrize(
("symbols", "geometry", "charge", "mapping", "h_ref_data"),
[
(
["H", "H", "H"],
np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 2.0]]),
1,
"jordan_wigner",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.jordan_wigner(h_f)
(
np.array(
[
1.3657458030310135,
-0.03586487568545097,
-0.03201092703651771,
0.03586487568545097,
0.03201092703651771,
-0.031492075818254375,
-0.031492075818254375,
0.037654484403957786,
-0.032229274210852504,
-0.0022222066814484463,
-0.03371428249970282,
-0.030511339285364605,
0.03586487568545097,
0.03201092703651771,
-0.03586487568545097,
-0.03201092703651771,
-0.031492075818254375,
-0.031492075818254375,
0.037654484403957786,
-0.032229274210852504,
-0.0022222066814484463,
-0.03371428249970282,
-0.030511339285364605,
0.27235785388149386,
-0.03051133928536461,
-0.03051133928536461,
0.17448913735995256,
0.11784682872956924,
0.15371170441502022,
0.1487316290904712,
0.18074255612698886,
0.031492075818254375,
-0.031492075818254375,
0.037654484403957786,
-0.032229274210852504,
-0.03371428249970282,
-0.0022222066814484463,
-0.031492075818254375,
0.031492075818254375,
0.037654484403957786,
-0.032229274210852504,
-0.03371428249970282,
-0.0022222066814484463,
0.27235785388149386,
0.15371170441502022,
0.11784682872956924,
0.18074255612698886,
0.1487316290904712,
-0.03583418633226662,
0.03583418633226662,
0.03583418633226662,
-0.03583418633226662,
-0.06458411201474276,
0.16096866344343394,
0.1288375790750158,
0.16467176540728246,
-0.06458411201474279,
0.16467176540728246,
0.1288375790750158,
-0.8044935587718376,
0.20315172438516313,
-0.8044935587718377,
]
),
[
I(0),
X(0) @ X(1) @ Y(2) @ Y(3),
X(0) @ X(1) @ Y(4) @ Y(5),
X(0) @ Y(1) @ Y(2) @ X(3),
X(0) @ Y(1) @ Y(4) @ X(5),
X(0) @ Z(1) @ X(2) @ X(3) @ Z(4) @ X(5),
X(0) @ Z(1) @ X(2) @ Y(3) @ Z(4) @ Y(5),
X(0) @ Z(1) @ Z(2) @ Z(3) @ X(4),
X(0) @ Z(1) @ Z(2) @ Z(3) @ X(4) @ Z(5),
X(0) @ Z(1) @ Z(2) @ X(4),
X(0) @ Z(1) @ Z(3) @ X(4),
X(0) @ Z(2) @ Z(3) @ X(4),
Y(0) @ X(1) @ X(2) @ Y(3),
Y(0) @ X(1) @ X(4) @ Y(5),
Y(0) @ Y(1) @ X(2) @ X(3),
Y(0) @ Y(1) @ X(4) @ X(5),
Y(0) @ Z(1) @ Y(2) @ X(3) @ Z(4) @ X(5),
Y(0) @ Z(1) @ Y(2) @ Y(3) @ Z(4) @ Y(5),
Y(0) @ Z(1) @ Z(2) @ Z(3) @ Y(4),
Y(0) @ Z(1) @ Z(2) @ Z(3) @ Y(4) @ Z(5),
Y(0) @ Z(1) @ Z(2) @ Y(4),
Y(0) @ Z(1) @ Z(3) @ Y(4),
Y(0) @ Z(2) @ Z(3) @ Y(4),
Z(0),
Z(0) @ X(1) @ Z(2) @ Z(3) @ Z(4) @ X(5),
Z(0) @ Y(1) @ Z(2) @ Z(3) @ Z(4) @ Y(5),
Z(0) @ Z(1),
Z(0) @ Z(2),
Z(0) @ Z(3),
Z(0) @ Z(4),
Z(0) @ Z(5),
X(1) @ X(2) @ Y(3) @ Y(4),
X(1) @ Y(2) @ Y(3) @ X(4),
X(1) @ Z(2) @ Z(3) @ Z(4) @ X(5),
X(1) @ Z(2) @ Z(3) @ X(5),
X(1) @ Z(2) @ Z(4) @ X(5),
X(1) @ Z(3) @ Z(4) @ X(5),
Y(1) @ X(2) @ X(3) @ Y(4),
Y(1) @ Y(2) @ X(3) @ X(4),
Y(1) @ Z(2) @ Z(3) @ Z(4) @ Y(5),
Y(1) @ Z(2) @ Z(3) @ Y(5),
Y(1) @ Z(2) @ Z(4) @ Y(5),
Y(1) @ Z(3) @ Z(4) @ Y(5),
Z(1),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(1) @ Z(4),
Z(1) @ Z(5),
X(2) @ X(3) @ Y(4) @ Y(5),
X(2) @ Y(3) @ Y(4) @ X(5),
Y(2) @ X(3) @ X(4) @ Y(5),
Y(2) @ Y(3) @ X(4) @ X(5),
Z(2),
Z(2) @ Z(3),
Z(2) @ Z(4),
Z(2) @ Z(5),
Z(3),
Z(3) @ Z(4),
Z(3) @ Z(5),
Z(4),
Z(4) @ Z(5),
Z(5),
],
),
),
(
["H", "H"],
np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]]),
0,
"parity",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# binary_code = openfermion.parity_code(molecule.n_qubits)
# h_q = openfermion.transforms.binary_code_transform(h_f, binary_code)
(
np.array(
[
-0.3596823978788041,
0.050130618654510024,
0.050130618654510024,
0.050130618654510024,
0.050130618654510024,
0.13082413502487947,
0.13082413502487947,
0.1031689785681825,
0.1031689785681825,
0.15329959722269254,
0.15329959722269254,
0.15405495529252655,
-0.11496333923452409,
0.16096866344343408,
-0.11496333923452409,
]
),
[
I(0),
X(0) @ Z(1) @ X(2),
X(0) @ Z(1) @ X(2) @ Z(3),
Y(0) @ Y(2),
Y(0) @ Y(2) @ Z(3),
Z(0),
Z(0) @ Z(1),
Z(0) @ Z(1) @ Z(2),
Z(0) @ Z(1) @ Z(2) @ Z(3),
Z(0) @ Z(2),
Z(0) @ Z(2) @ Z(3),
Z(1),
Z(1) @ Z(2),
Z(1) @ Z(3),
Z(2) @ Z(3),
],
),
),
(
["H", "H"],
np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]]),
0,
"bravyi_kitaev",
# computed with OpenFermion; data reordered
# h_mol = molecule.get_molecular_hamiltonian()
# h_f = openfermion.transforms.get_fermion_operator(h_mol)
# h_q = openfermion.transforms.bravyi_kitaev(h_f)
(
np.array(
[
-0.3596823978788041,
0.050130618654510024,
0.050130618654510024,
0.050130618654510024,
0.050130618654510024,
0.13082413502487947,
0.13082413502487947,
0.15329959722269254,
0.15329959722269254,
0.1031689785681825,
0.1031689785681825,
0.15405495529252655,
-0.11496333923452409,
0.16096866344343408,
-0.11496333923452409,
]
),
[
I(0),
X(0) @ Z(1) @ X(2),
X(0) @ Z(1) @ X(2) @ Z(3),
Y(0) @ Z(1) @ Y(2),
Y(0) @ Z(1) @ Y(2) @ Z(3),
Z(0),
Z(0) @ Z(1),
Z(0) @ Z(1) @ Z(2),
Z(0) @ Z(1) @ Z(2) @ Z(3),
Z(0) @ Z(2),
Z(0) @ Z(2) @ Z(3),
Z(1),
Z(1) @ Z(2) @ Z(3),
Z(1) @ Z(3),
Z(2),
],
),
),
],
)
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
def test_mapped_hamiltonian_pyscf_openfermion(
symbols, geometry, charge, mapping, h_ref_data, tmpdir
):
r"""Test that molecular_hamiltonian returns the correct qubit Hamiltonian with the pyscf and openfermion
backend."""
methods = ["openfermion", "pyscf"]
for method in methods:
geometry.requires_grad = False
molecule = qchem.Molecule(symbols, geometry, charge=charge)
h = qchem.molecular_hamiltonian(
molecule, method=method, mapping=mapping, outpath=tmpdir.strpath
)[0]
ops = [
qml.operation.Tensor(*op) if isinstance(op, qml.ops.Prod) else op
for op in map(qml.simplify, h_ref_data[1])
]
h_ref = qml.Hamiltonian(h_ref_data[0], ops)
h_ref_coeffs, h_ref_ops = h_ref.terms()
h_coeffs, h_ops = h.terms()
assert np.allclose(np.sort(h_coeffs), np.sort(h_ref_coeffs))
assert qml.Hamiltonian(np.ones(len(h_coeffs)), h_ops).compare(
qml.Hamiltonian(np.ones(len(h_ref_coeffs)), h_ref_ops)
)
@pytest.mark.parametrize(
("method"),
[
"pyscf",
"dhf",
"openfermion",
],
)
def test_coordinate_units_for_molecular_hamiltonian(method, tmpdir):
r"""Test that molecular_hamiltonian generates the Hamiltonian for both Bohr and Angstrom units."""
symbols = ["H", "H"]
geometry_bohr = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
geometry_ang = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.529177210903]])
hamiltonian_bohr, _ = qchem.molecular_hamiltonian(
symbols,
geometry_bohr,
unit="bohr",
method=method,
outpath=tmpdir.strpath,
)
hamiltonian_ang, _ = qchem.molecular_hamiltonian(
symbols,
geometry_ang,
unit="angstrom",
method=method,
outpath=tmpdir.strpath,
)
qml.assert_equal(hamiltonian_ang, hamiltonian_bohr)
@pytest.mark.parametrize(
("method"),
[
"pyscf",
"dhf",
"openfermion",
],
)
def test_coordinate_units_for_molecular_hamiltonian_molecule_class(method, tmpdir):
r"""Test that molecular_hamiltonian generates the Hamiltonian for both Bohr and Angstrom units."""
symbols = ["H", "H"]
geometry_bohr = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
geometry_ang = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.529177210903]])
molecule_bohr = qchem.Molecule(symbols, geometry_bohr, unit="bohr")
hamiltonian_bohr, _ = qchem.molecular_hamiltonian(
molecule_bohr,
method=method,
outpath=tmpdir.strpath,
)
molecule_ang = qchem.Molecule(symbols, geometry_ang, unit="angstrom")
hamiltonian_ang, _ = qchem.molecular_hamiltonian(
molecule_ang,
method=method,
outpath=tmpdir.strpath,
)
qml.assert_equal(hamiltonian_ang, hamiltonian_bohr)
def test_unit_error_molecular_hamiltonian():
r"""Test that an error is raised if a wrong/not-supported unit for coordinates is entered."""
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
with pytest.raises(ValueError, match="The provided unit 'degrees' is not supported."):
qchem.molecular_hamiltonian(symbols, geometry, unit="degrees")
|
pennylane/tests/qchem/openfermion_pyscf_tests/test_molecular_hamiltonian.py/0
|
{
"file_path": "pennylane/tests/qchem/openfermion_pyscf_tests/test_molecular_hamiltonian.py",
"repo_id": "pennylane",
"token_count": 28386
}
| 85 |
# Copyright 2018-2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for functions needed for performing givens decomposition of a unitary.
"""
import pytest
from scipy.stats import unitary_group
from pennylane import numpy as np
from pennylane.qchem.givens_decomposition import _givens_matrix, givens_decomposition
@pytest.mark.parametrize("left", [True, False])
@pytest.mark.parametrize(
("a", "b"),
[
(1.2, 2.3),
(1.2j, 2.3j),
(1.5 + 2.3j, 2.1 - 3.7j),
(1.0, 0.0),
(0.0, 1.0),
(1.2, 2.3j),
],
)
def test_givens_matrix(a, b, left):
r"""Test that `_givens_matrix` builds the correct Givens rotation matrices."""
grot_mat = _givens_matrix(a, b, left)
assert np.isreal(grot_mat[0, 1]) and np.isreal(grot_mat[1, 1])
rotated_vector = grot_mat @ np.array([a, b]).T
result_element = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0
rvec = np.array([0.0, result_element]).T if left else np.array([result_element, 0.0]).T
assert np.allclose(rotated_vector, rvec)
res1 = np.round(grot_mat @ grot_mat.conj().T, 5)
res2 = np.round(grot_mat.conj().T @ grot_mat, 5)
assert np.all(res1 == res2) and np.all(res1 == np.eye(2))
@pytest.mark.parametrize("left", [True, False])
@pytest.mark.parametrize("row", [True, False])
@pytest.mark.parametrize("indices", [[0, 1], [2, 3], [1, 4], [0, 3]])
@pytest.mark.parametrize("shape", [(5, 5), (6, 6)])
def test_givens_rotate(shape, indices, row, left):
r"""Test that Givens rotation is performed correctly for matrices built via `_givens_matrix`."""
matrix = np.random.rand(*shape) * 1j + np.random.rand(*shape)
unitary, (i, j) = matrix.copy(), indices
if row:
a, b = matrix[indices, j - 1]
grot_mat = _givens_matrix(a, b, left)
unitary[indices] = grot_mat @ unitary[indices]
res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0
if left:
assert np.isclose(unitary[i, j - 1], 0.0) and np.isclose(unitary[j, j - 1], res)
else:
assert np.isclose(unitary[i, j - 1], res) and np.isclose(unitary[j, j - 1], 0.0)
else:
a, b = matrix[j - 1, indices].T
grot_mat = _givens_matrix(a, b, left)
unitary[:, indices] = unitary[:, indices] @ grot_mat.T
res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0
if left:
assert np.isclose(unitary[j - 1, i], 0.0) and np.isclose(unitary[j - 1, j], res)
else:
assert np.isclose(unitary[j - 1, indices[0]], res) and np.isclose(
unitary[j - 1, indices[1]], 0.0
)
@pytest.mark.parametrize("shape", [2, 3, 7, 8, 15, 16])
def test_givens_decomposition(shape):
r"""Test that `givens_decomposition` perform correct Givens decomposition."""
matrix = unitary_group.rvs(shape)
phase_mat, ordered_rotations = givens_decomposition(matrix)
decomposed_matrix = np.diag(phase_mat)
for grot_mat, (i, j) in ordered_rotations:
rotation_matrix = np.eye(shape, dtype=complex)
rotation_matrix[i, i], rotation_matrix[j, j] = grot_mat[0, 0], grot_mat[1, 1]
rotation_matrix[i, j], rotation_matrix[j, i] = grot_mat[0, 1], grot_mat[1, 0]
decomposed_matrix = decomposed_matrix @ rotation_matrix
# check if U = D x Ξ T_{m, n}
assert np.allclose(matrix, decomposed_matrix)
@pytest.mark.parametrize(
("unitary_matrix", "msg_match"),
[
(
np.array(
[
[0.51378719 + 0.0j, 0.0546265 + 0.79145487j, -0.2051466 + 0.2540723j],
[0.62651582 + 0.0j, -0.00828925 - 0.60570321j, -0.36704948 + 0.32528067j],
]
),
"The unitary matrix should be of shape NxN",
),
(
np.array(
[
[0.51378719 + 0.0j, 0.0546265 + 0.79145487j, -0.2051466 + 0.2540723j],
[0.62651582 + 0.0j, -0.00828925 - 0.60570321j, -0.36704948 + 0.32528067j],
]
).T,
"The unitary matrix should be of shape NxN",
),
],
)
def test_givens_decomposition_exceptions(unitary_matrix, msg_match):
"""Test that givens_decomposition throws an exception if the parameters have illegal shapes."""
with pytest.raises(ValueError, match=msg_match):
givens_decomposition(unitary_matrix)
|
pennylane/tests/qchem/test_givens_rotations.py/0
|
{
"file_path": "pennylane/tests/qchem/test_givens_rotations.py",
"repo_id": "pennylane",
"token_count": 2329
}
| 86 |
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the (reduced) density matrix transform."""
# pylint: disable=too-many-arguments
import pytest
import pennylane as qml
from pennylane import numpy as np
pytestmark = pytest.mark.all_interfaces
tf = pytest.importorskip("tensorflow", minversion="2.1")
torch = pytest.importorskip("torch")
jax = pytest.importorskip("jax")
angle_values = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4, np.pi]
devices = [
"default.qubit",
"default.mixed",
]
interfaces = [
"autograd",
"torch",
"tensorflow",
"jax",
]
wires_list = [[0], [1], [0, 1], [1, 0]]
class TestDensityMatrixQNode:
"""Tests for the (reduced) density matrix for QNodes returning states."""
def test_reduced_dm_cannot_specify_device(self):
"""Test that an error is raised if a device or device wires are given
to the reduced_dm transform manually."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(params):
qml.RY(params, wires=0)
qml.CNOT(wires=[0, 1])
return qml.state()
with pytest.raises(ValueError, match="Cannot provide a 'device' value"):
_ = qml.qinfo.reduced_dm(circuit, wires=[0], device=dev)
with pytest.raises(ValueError, match="Cannot provide a 'device_wires' value"):
_ = qml.qinfo.reduced_dm(circuit, wires=[0], device_wires=dev.wires)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("interface", interfaces)
@pytest.mark.parametrize("angle", angle_values)
@pytest.mark.parametrize("wires", wires_list)
def test_density_matrix_from_qnode(self, device, wires, angle, interface, tol):
"""Test the density matrix from matrix for single wires."""
dev = qml.device(device, wires=2)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.PauliX(0)
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = qml.qinfo.reduced_dm(circuit, wires=wires)(angle)
def expected_density_matrix(x, wires):
if wires == [0]:
return [[np.sin(x / 2) ** 2, 0], [0, np.cos(x / 2) ** 2]]
if wires == [1]:
return [[np.cos(x / 2) ** 2, 0], [0, np.sin(x / 2) ** 2]]
if wires == [0, 1]:
return [
[0, 0, 0, 0],
[0, np.sin(x / 2) ** 2, 0.0 - np.cos(x / 2) * np.sin(x / 2) * 1j, 0],
[0, 0.0 + np.cos(x / 2) * np.sin(x / 2) * 1j, np.cos(x / 2) ** 2, 0],
[0, 0, 0, 0],
]
if wires == [1, 0]:
return [
[0, 0, 0, 0],
[0, np.cos(x / 2) ** 2, 0.0 + np.cos(x / 2) * np.sin(x / 2) * 1j, 0],
[0, 0.0 - np.cos(x / 2) * np.sin(x / 2) * 1j, np.sin(x / 2) ** 2, 0],
[0, 0, 0, 0],
]
return None
assert np.allclose(expected_density_matrix(angle, wires), density_matrix, atol=tol, rtol=0)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("angle", angle_values)
def test_density_matrix_wire_labels(self, device, angle, tol):
"""Test that density matrix is correct with custom wire labels"""
wires = ["a", 8]
dev = qml.device(device, wires=wires)
@qml.qnode(dev)
def circuit(x):
qml.PauliX(wires=wires[0])
qml.IsingXX(x, wires=wires)
return qml.state()
dm0 = qml.qinfo.reduced_dm(circuit, wires=[wires[0]])(angle)
dm1 = qml.qinfo.reduced_dm(circuit, wires=[wires[1]])(angle)
exp0 = np.array([[np.sin(angle / 2) ** 2, 0], [0, np.cos(angle / 2) ** 2]])
exp1 = np.array([[np.cos(angle / 2) ** 2, 0], [0, np.sin(angle / 2) ** 2]])
assert np.allclose(exp0, dm0, atol=tol)
assert np.allclose(exp1, dm1, atol=tol)
def test_qnode_not_returning_state(self):
"""Test that the QNode of reduced_dm function must return state."""
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev)
def circuit():
qml.RZ(0, wires=[0])
return qml.expval(qml.PauliX(wires=0))
with pytest.raises(ValueError, match="The qfunc measurement needs to be State"):
qml.qinfo.reduced_dm(circuit, wires=[0])()
def test_density_matrix_qnode_jax_jit(self, tol):
"""Test reduced_dm jitting for QNode."""
import jax.numpy as jnp
from jax import jit
angle = jnp.array(0.1)
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, interface="jax-jit")
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = jit(qml.qinfo.reduced_dm(circuit, wires=[0]))(angle)
expected_density_matrix = [[np.cos(angle / 2) ** 2, 0], [0, np.sin(angle / 2) ** 2]]
assert np.allclose(density_matrix, expected_density_matrix, atol=tol, rtol=0)
def test_density_matrix_qnode_tf_jit(self):
"""Test jitting the density matrix from state vector function with Tf."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, interface="tf")
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = tf.function(
qml.qinfo.reduced_dm(circuit, wires=[0]),
jit_compile=True,
input_signature=(tf.TensorSpec(shape=(), dtype=tf.float32),),
)
density_matrix = density_matrix(tf.Variable(0.0, dtype=tf.float32))
assert np.allclose(density_matrix, [[1, 0], [0, 0]])
c_dtypes = [np.complex64, np.complex128]
@pytest.mark.parametrize("c_dtype", c_dtypes)
@pytest.mark.parametrize("wires", wires_list)
def test_density_matrix_c_dtype(self, wires, c_dtype):
"""Test different complex dtype."""
dev = qml.device("default.qubit.legacy", wires=2, c_dtype=c_dtype)
@qml.qnode(dev, diff_method=None)
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = qml.qinfo.reduced_dm(circuit, wires=wires)(0.5)
assert density_matrix.dtype == c_dtype
class TestBroadcasting:
"""Test that the reduced_dm transform supports broadcasting"""
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("interface", interfaces)
def test_sv_broadcast(self, device, interface, tol):
"""Test that broadcasting works for circuits returning state vectors"""
dev = qml.device(device, wires=2)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.PauliX(0)
qml.IsingXX(x, wires=[0, 1])
return qml.state()
x = qml.math.asarray([0.4, 0.6, 0.8], like=interface)
density_matrix = qml.qinfo.reduced_dm(circuit, wires=[0])(x)
expected = np.zeros((3, 2, 2))
expected[:, 0, 0] = np.sin(x / 2) ** 2
expected[:, 1, 1] = np.cos(x / 2) ** 2
assert qml.math.allclose(expected, density_matrix, atol=tol)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("interface", interfaces)
def test_dm_broadcast(self, device, interface, tol):
"""Test that broadcasting works for circuits returning density matrices"""
dev = qml.device(device, wires=2)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.PauliX(0)
qml.IsingXX(x, wires=[0, 1])
return qml.density_matrix(wires=[0, 1])
x = qml.math.asarray([0.4, 0.6, 0.8], like=interface)
density_matrix = qml.qinfo.reduced_dm(circuit, wires=[0])(x)
expected = np.zeros((3, 2, 2))
expected[:, 0, 0] = np.sin(x / 2) ** 2
expected[:, 1, 1] = np.cos(x / 2) ** 2
assert qml.math.allclose(expected, density_matrix, atol=tol)
|
pennylane/tests/qinfo/test_reduced_dm.py/0
|
{
"file_path": "pennylane/tests/qinfo/test_reduced_dm.py",
"repo_id": "pennylane",
"token_count": 4055
}
| 87 |
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the classical shadows transforms"""
# pylint: disable=too-few-public-methods
from functools import partial
import pytest
import pennylane as qml
from pennylane import numpy as np
from pennylane.shadows.transforms import _replace_obs
def hadamard_circuit(wires, shots=10000, interface="autograd"):
"""Hadamard circuit to put all qubits in equal superposition (locally)"""
dev = qml.device("default.qubit", wires=wires, shots=shots)
@qml.qnode(dev, interface=interface)
def circuit():
for i in range(wires):
qml.Hadamard(wires=i)
return qml.classical_shadow(wires=range(wires))
return circuit
def max_entangled_circuit(wires, shots=10000, interface="autograd"):
"""maximally entangled state preparation circuit"""
dev = qml.device("default.qubit", wires=wires, shots=shots)
@qml.qnode(dev, interface=interface)
def circuit():
qml.Hadamard(wires=0)
for i in range(1, wires):
qml.CNOT(wires=[0, i])
return qml.classical_shadow(wires=range(wires))
return circuit
def qft_circuit(wires, shots=10000, interface="autograd"):
"""Quantum Fourier Transform circuit"""
dev = qml.device("default.qubit", wires=wires, shots=shots)
one_state = np.zeros(wires)
one_state[-1] = 1
@qml.qnode(dev, interface=interface)
def circuit():
qml.BasisState(one_state, wires=range(wires))
qml.QFT(wires=range(wires))
return qml.classical_shadow(wires=range(wires))
return circuit
def basic_entangler_circuit(n_wires, shots=10000, interface="autograd"):
dev = qml.device("default.qubit", wires=n_wires, shots=shots)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.BasicEntanglerLayers(weights=x, wires=range(n_wires))
return qml.classical_shadow(wires=range(n_wires))
return circuit
def basic_entangler_circuit_exact_state(n_wires, sub_wires, interface="autograd"):
dev = qml.device("default.qubit", wires=n_wires)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.BasicEntanglerLayers(weights=x, wires=range(n_wires))
return qml.density_matrix(sub_wires)
return circuit
def basic_entangler_circuit_exact_expval(n_wires, interface="autograd"):
dev = qml.device("default.qubit", wires=n_wires)
@qml.qnode(dev, interface=interface)
def circuit(x, obs):
qml.BasicEntanglerLayers(weights=x, wires=range(n_wires))
return [qml.expval(ob) for ob in obs]
return circuit
class TestReplaceObs:
"""Test that the _replace_obs transform works as expected"""
def test_replace_tape(self):
"""Test that the transform works for tapes"""
tape = qml.tape.QuantumScript([], [qml.classical_shadow(wires=0)])
new_tapes, _ = _replace_obs(tape, qml.probs, wires=0)
assert len(new_tapes) == 1
assert new_tapes[0].operations == []
assert len(new_tapes[0].observables) == 1
assert isinstance(new_tapes[0].observables[0], qml.measurements.ProbabilityMP)
def test_replace_qnode(self):
"""Test that the transform works for QNodes"""
circuit = hadamard_circuit(2, shots=1000)
circuit = _replace_obs(circuit, qml.probs, wires=[0, 1])
res = circuit()
assert isinstance(res, np.ndarray)
assert res.shape == (4,)
@pytest.mark.autograd
class TestStateForward:
"""Test that the state reconstruction is correct for a variety of states"""
@pytest.mark.parametrize("wires", [1, 3])
@pytest.mark.parametrize("diffable", [True, False])
def test_hadamard_state(self, wires, diffable):
"""Test that the state reconstruction is correct for a uniform
superposition of qubits"""
circuit = hadamard_circuit(wires)
circuit = qml.shadows.shadow_state(circuit, wires=range(wires), diffable=diffable)
actual = circuit()
expected = np.ones((2**wires, 2**wires)) / (2**wires)
assert qml.math.allclose(actual, expected, atol=1e-1)
@pytest.mark.parametrize("wires", [1, 3])
@pytest.mark.parametrize("diffable", [True, False])
def test_max_entangled_state(self, wires, diffable):
"""Test that the state reconstruction is correct for a maximally entangled state"""
circuit = max_entangled_circuit(wires)
circuit = qml.shadows.shadow_state(circuit, wires=range(wires), diffable=diffable)
actual = circuit()
expected = np.zeros((2**wires, 2**wires))
expected[np.array([0, 0, -1, -1]), np.array([0, -1, 0, -1])] = 0.5
assert qml.math.allclose(actual, expected, atol=1e-1)
@pytest.mark.parametrize("diffable", [True, False])
def test_partial_state(self, diffable):
"""Test that the state reconstruction is correct for a subset
of the qubits"""
wires_list = [[0], [0, 1]]
circuit = max_entangled_circuit(3)
circuit = qml.shadows.shadow_state(circuit, wires=wires_list, diffable=diffable)
actual = circuit()
expected = [
np.array([[0.5, 0], [0, 0.5]]),
np.array([[0.5, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0.5]]),
]
assert qml.math.allclose(actual[0], expected[0], atol=1e-1)
assert qml.math.allclose(actual[1], expected[1], atol=1e-1)
def test_large_state_warning(self):
"""Test that a warning is raised when the system to get the state
of is large"""
circuit = hadamard_circuit(8, shots=1)
circuit.construct([], {})
msg = "Differentiable state reconstruction for more than 8 qubits is not recommended"
with pytest.warns(UserWarning, match=msg):
qml.shadows.shadow_state(circuit.qtape, wires=[0, 1, 2, 3, 4, 5, 6, 7], diffable=True)
def test_multi_measurement_error(self):
"""Test that an error is raised when classical shadows is returned
with other measurement processes"""
dev = qml.device("default.qubit", wires=2, shots=100)
@qml.qnode(dev)
def circuit_shadow():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.classical_shadow(wires=[0, 1]), qml.expval(qml.PauliZ(0))
res = circuit_shadow()
assert isinstance(res, tuple) and len(res) == 2
@qml.qnode(dev)
def circuit_expval():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.shadow_expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(0))
res = circuit_expval()
assert isinstance(res, tuple) and len(res) == 2
@pytest.mark.all_interfaces
class TestStateForwardInterfaces:
"""Test that state reconstruction works for all interfaces"""
@pytest.mark.parametrize("interface", ["autograd", "jax", "tf", "torch"])
@pytest.mark.parametrize("diffable", [True, False])
def test_qft_state(self, interface, diffable):
"""Test that the state reconstruction is correct for a QFT state"""
circuit = qft_circuit(3, interface=interface)
circuit = qml.shadows.shadow_state(circuit, wires=[0, 1, 2], diffable=diffable)
actual = circuit()
expected = np.exp(np.arange(8) * 2j * np.pi / 8) / np.sqrt(8)
expected = np.outer(expected, np.conj(expected))
assert qml.math.allclose(actual, expected, atol=1e-1)
class TestStateBackward:
"""Test that the gradient of the state reconstruction is correct"""
# make rotations close to pi / 2 to ensure gradients are not too small
x = np.random.uniform(
0.8, 2, size=qml.BasicEntanglerLayers.shape(n_layers=1, n_wires=3)
).tolist()
@pytest.mark.autograd
def test_backward_autograd(self):
"""Test the gradient of the state for the autograd interface"""
# pylint: disable=cell-var-from-loop
shadow_circuit = basic_entangler_circuit(3, shots=20000, interface="autograd")
sub_wires = [[0, 1], [1, 2]]
shadow_circuit = qml.shadows.shadow_state(shadow_circuit, wires=sub_wires, diffable=True)
x = np.array(self.x, requires_grad=True)
# for autograd in particular, take only the real part since it doesn't
# support complex differentiation
actual = qml.jacobian(lambda x: qml.math.real(qml.math.stack(shadow_circuit(x))))(x)
for act, w in zip(qml.math.unstack(actual), sub_wires):
exact_circuit = basic_entangler_circuit_exact_state(3, w, "autograd")
expected = qml.jacobian(lambda x: qml.math.real(exact_circuit(x)))(x)
assert qml.math.allclose(act, expected, atol=1e-1)
@pytest.mark.jax
def test_backward_jax(self):
"""Test the gradient of the state for the JAX interface"""
# pylint: disable=cell-var-from-loop
import jax
from jax import numpy as jnp
shadow_circuit = basic_entangler_circuit(3, shots=20000, interface="jax")
sub_wires = [[0, 1], [1, 2]]
shadow_circuit = qml.shadows.shadow_state(shadow_circuit, wires=sub_wires, diffable=True)
x = jnp.array(self.x)
actual = jax.jacobian(lambda x: qml.math.real(qml.math.stack(shadow_circuit(x))))(x)
for act, w in zip(qml.math.unstack(actual), sub_wires):
exact_circuit = basic_entangler_circuit_exact_state(3, w, "jax")
expected = jax.jacobian(lambda x: qml.math.real(exact_circuit(x)))(x)
assert qml.math.allclose(act, expected, atol=1e-1)
@pytest.mark.tf
def test_backward_tf(self):
"""Test the gradient of the state for the tensorflow interface"""
import tensorflow as tf
shadow_circuit = basic_entangler_circuit(3, shots=20000, interface="tf")
sub_wires = [[0, 1], [1, 2]]
shadow_circuit = qml.shadows.shadow_state(shadow_circuit, wires=sub_wires, diffable=True)
x = tf.Variable(self.x, dtype="float64")
with tf.GradientTape() as tape:
out = qml.math.stack(shadow_circuit(x))
actual = tape.jacobian(out, x)
for act, w in zip(qml.math.unstack(actual), sub_wires):
exact_circuit = basic_entangler_circuit_exact_state(3, w, "tf")
with tf.GradientTape() as tape2:
out2 = exact_circuit(x)
expected = tape2.jacobian(out2, x)
assert qml.math.allclose(act, expected, atol=1e-1)
@pytest.mark.torch
@pytest.mark.xfail(reason="see pytorch/pytorch/issues/94397")
def test_backward_torch(self):
"""Test the gradient of the state for the torch interface"""
import torch
shadow_circuit = basic_entangler_circuit(3, shots=20000, interface="torch")
sub_wires = [[0, 1], [1, 2]]
shadow_circuit = qml.shadows.shadow_state(shadow_circuit, wires=sub_wires, diffable=True)
x = torch.tensor(self.x, requires_grad=True)
actual = torch.autograd.functional.jacobian(lambda x: qml.math.stack(shadow_circuit(x)), x)
for act, w in zip(qml.math.unstack(actual), sub_wires):
exact_circuit = basic_entangler_circuit_exact_state(3, w, "torch")
expected = torch.autograd.functional.jacobian(exact_circuit, x)
assert qml.math.allclose(act, expected, atol=1e-1)
@pytest.mark.autograd
class TestExpvalTransform:
"""Test that the expval transform is applied correctly"""
def test_hadamard_forward(self):
"""Test that the expval estimation is correct for a uniform
superposition of qubits"""
obs = [
qml.PauliX(1),
qml.PauliX(0) @ qml.PauliX(2),
qml.PauliX(0) @ qml.Identity(1) @ qml.PauliX(2),
qml.PauliY(2),
qml.PauliY(1) @ qml.PauliZ(2),
qml.PauliX(0) @ qml.PauliY(1),
qml.PauliX(0) @ qml.PauliY(1) @ qml.Identity(2),
]
expected = [1, 1, 1, 0, 0, 0, 0]
circuit = hadamard_circuit(3, shots=100000)
circuit = qml.shadows.shadow_expval(circuit, obs)
actual = circuit()
assert qml.math.allclose(actual, expected, atol=1e-1)
def test_basic_entangler_backward(self):
"""Test the gradient of the expval transform"""
obs = [
qml.PauliX(1),
qml.PauliX(0) @ qml.PauliX(2),
qml.PauliX(0) @ qml.Identity(1) @ qml.PauliX(2),
qml.PauliY(2),
qml.PauliY(1) @ qml.PauliZ(2),
qml.PauliX(0) @ qml.PauliY(1),
qml.PauliX(0) @ qml.PauliY(1) @ qml.Identity(2),
]
shadow_circuit = basic_entangler_circuit(3, shots=20000, interface="autograd")
shadow_circuit = qml.shadows.shadow_expval(shadow_circuit, obs)
exact_circuit = basic_entangler_circuit_exact_expval(3, "autograd")
rng = np.random.default_rng(123)
x = rng.uniform(0.8, 2, size=qml.BasicEntanglerLayers.shape(n_layers=1, n_wires=3))
def shadow_cost(x):
res = shadow_circuit(x)
return qml.math.stack(res)
def exact_cost(x, obs):
res = exact_circuit(x, obs)
return qml.math.stack(res)
actual = qml.jacobian(shadow_cost)(x)
expected = qml.jacobian(exact_cost)(x, obs)
assert qml.math.allclose(actual, expected, atol=1e-1)
def test_non_shadow_error(self):
"""Test that an exception is raised when the decorated QNode does not
return shadows"""
dev = qml.device("default.qubit", wires=1, shots=100)
@partial(qml.shadows.shadow_expval, H=qml.PauliZ(0))
@qml.qnode(dev)
def circuit():
qml.Hadamard(0)
return qml.expval(qml.PauliZ(0))
msg = "Tape measurement must be ClassicalShadowMP, got 'ExpectationMP'"
with pytest.raises(ValueError, match=msg):
circuit()
|
pennylane/tests/shadow/test_shadow_transforms.py/0
|
{
"file_path": "pennylane/tests/shadow/test_shadow_transforms.py",
"repo_id": "pennylane",
"token_count": 6325
}
| 88 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the CVNeuralNetLayers template.
"""
import numpy as np
# pylint: disable=too-few-public-methods,protected-access
import pytest
import pennylane as qml
from pennylane import numpy as pnp
from pennylane.devices import DefaultGaussian
class DummyDevice(DefaultGaussian):
"""Dummy Gaussian device to allow Kerr operations"""
_operation_map = DefaultGaussian._operation_map.copy()
_operation_map["Kerr"] = lambda *x, **y: np.identity(2)
def expected_shapes(n_layers, n_wires):
# compute the expected shapes for a given number of wires
n_if = n_wires * (n_wires - 1) // 2
expected = (
[(n_layers, n_if)] * 2
+ [(n_layers, n_wires)] * 3
+ [(n_layers, n_if)] * 2
+ [(n_layers, n_wires)] * 4
)
return expected
class TestDecomposition:
"""Tests that the template defines the correct decomposition."""
QUEUES = [
(1, ["Rotation", "Squeezing", "Rotation", "Displacement", "Kerr"], [[0]] * 5),
(
2,
[
"Beamsplitter", # Interferometer 1
"Rotation", # Interferometer 1
"Rotation", # Interferometer 1
"Squeezing",
"Squeezing",
"Beamsplitter", # Interferometer 2
"Rotation", # Interferometer 2
"Rotation", # Interferometer 2
"Displacement",
"Displacement",
"Kerr",
"Kerr",
],
[[0, 1], [0], [1], [0], [1], [0, 1], [0], [1], [0], [1], [0], [1]],
),
]
@pytest.mark.parametrize("n_wires, expected_names, expected_wires", QUEUES)
def test_expansion(self, n_wires, expected_names, expected_wires):
"""Checks the queue for the default settings."""
shapes = expected_shapes(1, n_wires)
weights = [np.random.random(shape) for shape in shapes]
op = qml.CVNeuralNetLayers(*weights, wires=range(n_wires))
tape = qml.tape.QuantumScript(op.decomposition())
i = 0
for gate in tape.operations:
if gate.name != "Interferometer":
assert gate.name == expected_names[i]
assert gate.wires.labels == tuple(expected_wires[i])
i = i + 1
else:
for gate_inter in gate.decomposition():
assert gate_inter.name == expected_names[i]
assert gate_inter.wires.labels == tuple(expected_wires[i])
i = i + 1
def test_custom_wire_labels(self, tol):
"""Test that template can deal with non-numeric, nonconsecutive wire labels."""
shapes = expected_shapes(1, 3)
weights = [np.random.random(shape) for shape in shapes]
dev = DummyDevice(wires=3)
dev2 = DummyDevice(wires=["z", "a", "k"])
@qml.qnode(dev)
def circuit():
qml.CVNeuralNetLayers(*weights, wires=range(3))
return qml.expval(qml.Identity(0))
@qml.qnode(dev2)
def circuit2():
qml.CVNeuralNetLayers(*weights, wires=["z", "a", "k"])
return qml.expval(qml.Identity("z"))
circuit()
circuit2()
assert np.allclose(dev._state[0], dev2._state[0], atol=tol, rtol=0)
assert np.allclose(dev._state[1], dev2._state[1], atol=tol, rtol=0)
class TestInputs:
"""Test inputs and pre-processing."""
def test_cvqnn_layers_exception_nlayers(self):
"""Check exception if inconsistent number of layers"""
shapes = expected_shapes(1, 2)
weights = [np.random.random(shape) for shape in shapes[:-1]]
weights += [np.random.random((2, shapes[-1][1]))]
dev = DummyDevice(wires=2)
@qml.qnode(dev)
def circuit():
qml.CVNeuralNetLayers(*weights, wires=range(2))
return qml.expval(qml.QuadX(0))
with pytest.raises(ValueError, match="The first dimension of all parameters"):
circuit()
def test_cvqnn_layers_exception_second_dim(self):
"""Check exception if wrong dimension of weights"""
shapes = expected_shapes(1, 2)
weights = [np.random.random(shape) for shape in shapes[:-1]]
weights += [np.random.random((1, shapes[-1][1] - 1))]
dev = DummyDevice(wires=2)
@qml.qnode(dev)
def circuit():
qml.CVNeuralNetLayers(*weights, wires=range(2))
return qml.expval(qml.QuadX(0))
with pytest.raises(ValueError, match="Got unexpected shape for one or more parameters"):
circuit()
def test_id(self):
"""Tests that the id attribute can be set."""
shapes = expected_shapes(1, 2)
weights = [np.random.random(shape) for shape in shapes]
template = qml.CVNeuralNetLayers(*weights, wires=range(2), id="a")
assert template.id == "a"
class TestAttributes:
"""Test methods and attributes."""
@pytest.mark.parametrize(
"n_layers, n_wires",
[
(2, 3),
(2, 1),
(2, 2),
],
)
def test_shapes(self, n_layers, n_wires, tol):
"""Test that the shape method returns the correct shapes for
the weight tensors"""
shapes = qml.CVNeuralNetLayers.shape(n_layers, n_wires)
expected = expected_shapes(n_layers, n_wires)
assert np.allclose(shapes, expected, atol=tol, rtol=0)
def circuit_template(*weights):
qml.CVNeuralNetLayers(*weights, range(2))
return qml.expval(qml.QuadX(0))
def circuit_decomposed(*weights):
# Interferometer (replace with operation once this template is refactored)
qml.Beamsplitter(weights[0][0, 0], weights[1][0, 0], wires=[0, 1])
qml.Rotation(weights[2][0, 0], wires=0)
qml.Rotation(weights[2][0, 1], wires=1)
qml.Squeezing(weights[3][0, 0], weights[4][0, 0], wires=0)
qml.Squeezing(weights[3][0, 1], weights[4][0, 1], wires=1)
# Interferometer
qml.Beamsplitter(weights[5][0, 0], weights[6][0, 0], wires=[0, 1])
qml.Rotation(weights[7][0, 0], wires=0)
qml.Rotation(weights[7][0, 1], wires=1)
qml.Displacement(weights[8][0, 0], weights[9][0, 0], wires=0)
qml.Displacement(weights[8][0, 1], weights[9][0, 1], wires=1)
qml.Kerr(weights[10][0, 0], wires=0)
qml.Kerr(weights[10][0, 1], wires=1)
return qml.expval(qml.QuadX(0))
def test_adjoint():
"""Test that the adjoint method works"""
dev = DummyDevice(wires=2)
shapes = qml.CVNeuralNetLayers.shape(n_layers=1, n_wires=2)
weights = [np.random.random(shape) for shape in shapes]
@qml.qnode(dev)
def circuit():
qml.CVNeuralNetLayers(*weights, wires=[0, 1])
qml.adjoint(qml.CVNeuralNetLayers)(*weights, wires=[0, 1])
return qml.expval(qml.QuadX(0))
assert qml.math.allclose(circuit(), 0)
class TestInterfaces:
"""Tests that the template is compatible with all interfaces, including the computation
of gradients."""
def test_list_and_tuples(self, tol):
"""Tests common iterables as inputs."""
shapes = expected_shapes(1, 2)
weights = [np.random.random(shape) for shape in shapes]
dev = DummyDevice(wires=2)
circuit = qml.QNode(circuit_template, dev)
circuit2 = qml.QNode(circuit_decomposed, dev)
res = circuit(*weights)
res2 = circuit2(*weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
weights_tuple = tuple(w for w in weights)
res = circuit(*weights_tuple)
res2 = circuit2(*weights_tuple)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
@pytest.mark.autograd
def test_autograd(self, tol):
"""Tests the autograd interface."""
shapes = expected_shapes(1, 2)
weights = [np.random.random(shape) for shape in shapes]
weights = [pnp.array(w, requires_grad=True) for w in weights]
dev = DummyDevice(wires=2)
circuit = qml.QNode(circuit_template, dev)
circuit2 = qml.QNode(circuit_decomposed, dev)
res = circuit(*weights)
res2 = circuit2(*weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
grad_fn = qml.grad(circuit)
grads = grad_fn(*weights)
grad_fn2 = qml.grad(circuit2)
grads2 = grad_fn2(*weights)
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
@pytest.mark.jax
def test_jax(self, tol):
"""Tests the jax interface."""
import jax
import jax.numpy as jnp
shapes = expected_shapes(1, 2)
weights = [np.random.random(shape) for shape in shapes]
weights = [jnp.array(w) for w in weights]
dev = DummyDevice(wires=2)
circuit = qml.QNode(circuit_template, dev)
circuit2 = qml.QNode(circuit_decomposed, dev)
res = circuit(*weights)
res2 = circuit2(*weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
grad_fn = jax.grad(circuit)
grads = grad_fn(*weights)
grad_fn2 = jax.grad(circuit2)
grads2 = grad_fn2(*weights)
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
@pytest.mark.tf
def test_tf(self, tol):
"""Tests the tf interface."""
import tensorflow as tf
shapes = expected_shapes(1, 2)
weights = [np.random.random(shape) for shape in shapes]
weights = [tf.Variable(w) for w in weights]
dev = DummyDevice(wires=2)
circuit = qml.QNode(circuit_template, dev)
circuit2 = qml.QNode(circuit_decomposed, dev)
res = circuit(*weights)
res2 = circuit2(*weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
with tf.GradientTape() as tape:
res = circuit(*weights)
grads = tape.gradient(res, [*weights])
with tf.GradientTape() as tape2:
res2 = circuit2(*weights)
grads2 = tape2.gradient(res2, [*weights])
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
@pytest.mark.torch
def test_torch(self, tol):
"""Tests the torch interface."""
import torch
shapes = expected_shapes(1, 2)
weights = [np.random.random(size=shape) for shape in shapes]
weights = [torch.tensor(w, requires_grad=True) for w in weights]
dev = DummyDevice(wires=2)
circuit = qml.QNode(circuit_template, dev)
circuit2 = qml.QNode(circuit_decomposed, dev)
res = circuit(*weights)
res2 = circuit2(*weights)
assert qml.math.allclose(res, res2, atol=tol, rtol=0)
res = circuit(*weights)
res.backward()
grads = [w.grad for w in weights]
res2 = circuit2(*weights)
res2.backward()
grads2 = [w.grad for w in weights]
assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)
|
pennylane/tests/templates/test_layers/test_cv_neural_net.py/0
|
{
"file_path": "pennylane/tests/templates/test_layers/test_cv_neural_net.py",
"repo_id": "pennylane",
"token_count": 5272
}
| 89 |
# Copyright 2018-2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the aqft template.
"""
import numpy as np
import pytest
import pennylane as qml
def test_standard_validity():
"""Check the operation using the assert_valid function."""
op = qml.AQFT(order=2, wires=(0, 1, 2))
qml.ops.functions.assert_valid(op)
class TestAQFT:
"""Tests for the aqft operations"""
@pytest.mark.parametrize("order,n_qubits", [(o, w) for w in range(2, 10) for o in range(1, w)])
def test_AQFT_adjoint_identity(self, order, n_qubits, tol):
"""Test if after using the qml.adjoint transform the resulting operation is
the inverse of AQFT."""
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circ(n_qubits, order):
qml.adjoint(qml.AQFT)(order=order, wires=range(n_qubits))
qml.AQFT(order=order, wires=range(n_qubits))
return qml.state()
assert np.allclose(1, circ(n_qubits, order)[0], tol)
for i in range(1, n_qubits):
assert np.allclose(0, circ(n_qubits, order)[i], tol)
@pytest.mark.parametrize("order", [-1, -5.4])
def test_negative_order(self, order):
"""Test if ValueError is raised for negative orders"""
with pytest.raises(ValueError, match="Order can not be less than 0"):
qml.AQFT(order=order, wires=range(5))
@pytest.mark.parametrize("order", [1.2, 4.6])
def test_float_order(self, order):
"""Test if float order is handled correctly"""
with pytest.warns(UserWarning, match="The order must be an integer"):
op = qml.AQFT(order=order, wires=range(9))
assert op.hyperparameters["order"] == round(order)
@pytest.mark.parametrize("wires", range(3, 10))
def test_zero_order(self, wires):
"""Test if Hadamard transform is applied for zero order"""
with pytest.warns(UserWarning, match="order=0"):
op = qml.AQFT(order=0, wires=range(wires))
for gate in op.decomposition()[: -wires // 2]:
assert gate.name == "Hadamard"
@pytest.mark.parametrize("order", [4, 5, 6])
def test_higher_order(self, order):
"""Test if higher order recommends using QFT"""
with pytest.warns(UserWarning, match="Using the QFT class is recommended in this case"):
qml.AQFT(order=order, wires=range(5))
@pytest.mark.parametrize("wires", [3, 4, 5, 6, 7, 8, 9])
def test_matrix_higher_order(self, wires):
"""Test if the matrix from AQFT and QFT are same for higher order"""
m1 = qml.matrix(qml.AQFT(order=10, wires=range(wires)))
m2 = qml.matrix(qml.QFT(wires=range(wires)))
assert np.allclose(m1, m2)
@pytest.mark.parametrize("order,wires", [(o, w) for w in range(2, 10) for o in range(1, w)])
def test_gates(self, order, wires):
"""Test if the AQFT operation consists of only 3 type of gates"""
op = qml.AQFT(order=order, wires=range(wires))
decomp = op.decomposition()
for gate in decomp:
assert gate.name in ["Hadamard", "ControlledPhaseShift", "SWAP"]
|
pennylane/tests/templates/test_subroutines/test_aqft.py/0
|
{
"file_path": "pennylane/tests/templates/test_subroutines/test_aqft.py",
"repo_id": "pennylane",
"token_count": 1509
}
| 90 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the Permute template.
"""
import numpy as np
# pylint: disable=too-many-arguments
import pytest
import pennylane as qml
def test_standard_validity():
"""Check the operation using the assert_valid function."""
op = qml.Permute([0, 1, 2, 3], wires=(3, 2, 1, 0))
qml.ops.functions.assert_valid(op)
def test_repr():
op = qml.Permute([2, 1, 0], wires=(0, 1, 2))
assert repr(op) == "Permute((2, 1, 0), wires=[0, 1, 2])"
class TestDecomposition:
"""Tests that the template defines the correct decomposition."""
def test_identity_permutation_qnode(self, mocker):
"""Test that identity permutations have no effect on QNodes."""
dev = qml.device("default.qubit", wires=4)
@qml.qnode(dev, interface="autograd")
def identity_permutation():
qml.Permute([0, 1, 2, 3], wires=dev.wires)
return qml.expval(qml.PauliZ(0))
spy = mocker.spy(identity_permutation.device, "execute")
identity_permutation()
# expand the Permute operation
tape = spy.call_args[0][0][0]
assert len(tape.operations) == 0
def test_identity_permutation_tape(self):
"""Test that identity permutations have no effect on tapes."""
with qml.queuing.AnnotatedQueue() as q:
qml.Permute([0, "a", "c", "d"], wires=[0, "a", "c", "d"])
tape = qml.tape.QuantumScript.from_queue(q)
# expand the Permute operation
tape = tape.expand()
assert len(tape.operations) == 0
@pytest.mark.parametrize(
"permutation_order,expected_wires",
[
([1, 0], [(0, 1)]),
([1, 0, 2], [(0, 1)]),
([1, 0, 2, 3], [(0, 1)]),
([0, 2, 1, 3], [(1, 2)]),
([2, 3, 0, 1], [(0, 2), (1, 3)]),
],
)
def test_two_cycle_permutations_qnode(self, mocker, permutation_order, expected_wires):
"""Test some two-cycles on QNodes."""
dev = qml.device("default.qubit", wires=len(permutation_order))
@qml.qnode(dev, interface="autograd")
def two_cycle():
qml.Permute(permutation_order, wires=dev.wires)
return qml.expval(qml.PauliZ(0))
spy = mocker.spy(two_cycle.device, "execute")
two_cycle()
tape = spy.call_args[0][0][0]
# Check that the Permute operation was expanded to SWAPs when the QNode
# is evaluated, and that the wires are the same
assert all(op.name == "SWAP" for op in tape.operations)
assert [op.wires.labels for op in tape.operations] == expected_wires
@pytest.mark.parametrize(
# For tape need to specify the wire labels
"permutation_order,wire_order,expected_wires",
[
([1, 0], [0, 1], [(0, 1)]),
([1, 0, 2], [0, 1, 2], [(0, 1)]),
([1, 0, 2, 3], [0, 1, 2, 3], [(0, 1)]),
([0, 2, 1, 3], [0, 1, 2, 3], [(1, 2)]),
([2, 3, 0, 1], [0, 1, 2, 3], [(0, 2), (1, 3)]),
(["a", "b", 0, 1], [0, 1, "a", "b"], [(0, "a"), (1, "b")]),
],
)
def test_two_cycle_permutations_tape(self, permutation_order, wire_order, expected_wires):
"""Test some two-cycles on tapes."""
with qml.queuing.AnnotatedQueue() as q:
qml.Permute(permutation_order, wire_order)
tape = qml.tape.QuantumScript.from_queue(q)
# expand the Permute operation
tape = tape.expand()
# Ensure all operations are SWAPs, and that the wires are the same
assert all(op.name == "SWAP" for op in tape.operations)
assert [op.wires.labels for op in tape.operations] == expected_wires
@pytest.mark.parametrize(
"permutation_order,expected_wires",
[
([1, 2, 0], [(0, 1), (1, 2)]),
([3, 0, 1, 2], [(0, 3), (1, 3), (2, 3)]),
([1, 2, 3, 0], [(0, 1), (1, 2), (2, 3)]),
],
)
def test_cyclic_permutations_qnode(self, mocker, permutation_order, expected_wires):
"""Test more general cycles on QNodes."""
dev = qml.device("default.qubit", wires=len(permutation_order))
@qml.qnode(dev, interface="autograd")
def cycle():
qml.Permute(permutation_order, wires=dev.wires)
return qml.expval(qml.PauliZ(0))
spy = mocker.spy(cycle.device, "execute")
cycle()
tape = spy.call_args[0][0][0]
# Check that the Permute operation was expanded to SWAPs when the QNode
# is evaluated, and that the wires are the same
assert all(op.name == "SWAP" for op in tape.operations)
assert [op.wires.labels for op in tape.operations] == expected_wires
@pytest.mark.parametrize(
"permutation_order,wire_order,expected_wires",
[
([1, 2, 0], [0, 1, 2], [(0, 1), (1, 2)]),
(["d", "a", "b", "c"], ["a", "b", "c", "d"], [("a", "d"), ("b", "d"), ("c", "d")]),
(["b", 0, "d", "a"], ["a", "b", 0, "d"], [("a", "b"), ("b", 0), (0, "d")]),
],
)
def test_cyclic_permutations_tape(self, permutation_order, wire_order, expected_wires):
"""Test more general cycles on tapes."""
with qml.queuing.AnnotatedQueue() as q:
qml.Permute(permutation_order, wire_order)
tape = qml.tape.QuantumScript.from_queue(q)
# expand the Permute operation
tape = tape.expand()
# Ensure all operations are SWAPs, and that the wires are the same
assert all(op.name == "SWAP" for op in tape.operations)
assert [op.wires.labels for op in tape.operations] == expected_wires
@pytest.mark.parametrize(
"permutation_order,expected_wires",
[
([3, 0, 2, 1], [(0, 3), (1, 3)]),
([1, 3, 0, 4, 2], [(0, 1), (1, 3), (2, 3), (3, 4)]),
([5, 1, 4, 2, 3, 0], [(0, 5), (2, 4), (3, 4)]),
],
)
def test_arbitrary_permutations_qnode(self, mocker, permutation_order, expected_wires):
"""Test arbitrarily generated permutations on QNodes."""
dev = qml.device("default.qubit", wires=len(permutation_order))
@qml.qnode(dev, interface="autograd")
def arbitrary_perm():
qml.Permute(permutation_order, wires=dev.wires)
return qml.expval(qml.PauliZ(0))
spy = mocker.spy(arbitrary_perm.device, "execute")
arbitrary_perm()
tape = spy.call_args[0][0][0]
# Check that the Permute operation was expanded to SWAPs when the QNode
# is evaluated, and that the wires are the same
assert all(op.name == "SWAP" for op in tape.operations)
assert [op.wires.labels for op in tape.operations] == expected_wires
@pytest.mark.parametrize(
"permutation_order,wire_order,expected_wires",
[
([1, 3, 0, 2], [0, 1, 2, 3], [(0, 1), (1, 3), (2, 3)]),
(
["d", "a", "e", "b", "c"],
["a", "b", "c", "d", "e"],
[("a", "d"), ("b", "d"), ("c", "e")],
),
(
["p", "f", 4, "q", "z", 0, "c", "d"],
["z", 0, "d", "c", 4, "f", "q", "p"],
[("z", "p"), (0, "f"), ("d", 4), ("c", "q"), (4, "p")],
),
],
)
def test_arbitrary_permutations_tape(self, permutation_order, wire_order, expected_wires):
"""Test arbitrarily generated permutations on tapes."""
with qml.queuing.AnnotatedQueue() as q:
qml.Permute(permutation_order, wire_order)
tape = qml.tape.QuantumScript.from_queue(q)
# expand the Permute operation
tape = tape.expand()
# Ensure all operations are SWAPs, and that the wires are the same
assert all(op.name == "SWAP" for op in tape.operations)
assert [op.wires.labels for op in tape.operations] == expected_wires
@pytest.mark.parametrize(
"num_wires,permutation_order,wire_subset,expected_wires",
[
(3, [1, 0], [0, 1], [(0, 1)]),
(4, [3, 0, 2], [0, 2, 3], [(0, 3), (2, 3)]),
(6, [4, 2, 1, 3], [1, 2, 3, 4], [(1, 4), (3, 4)]),
],
)
def test_subset_permutations_qnode(
self, mocker, num_wires, permutation_order, wire_subset, expected_wires
):
"""Test permutation of wire subsets on QNodes."""
dev = qml.device("default.qubit", wires=num_wires)
@qml.qnode(dev, interface="autograd")
def subset_perm():
qml.Permute(permutation_order, wires=wire_subset)
return qml.expval(qml.PauliZ(0))
spy = mocker.spy(subset_perm.device, "execute")
subset_perm()
tape = spy.call_args[0][0][0]
# Check that the Permute operation was expanded to SWAPs when the QNode
# is evaluated, and that the wires are the same
assert all(op.name == "SWAP" for op in tape.operations)
assert [op.wires.labels for op in tape.operations] == expected_wires
@pytest.mark.parametrize(
"wire_labels,permutation_order,wire_subset,expected_wires",
[
([0, 1, 2], [1, 0], [0, 1], [(0, 1)]),
([0, 1, 2, 3], [3, 0, 2], [0, 2, 3], [(0, 3), (2, 3)]),
(
[0, 2, "a", "c", 1, 4],
[4, "c", 2, "a"],
[2, "a", "c", 4],
[(2, 4), ("a", "c"), ("c", 4)],
),
],
)
def test_subset_permutations_tape(
self, wire_labels, permutation_order, wire_subset, expected_wires
):
"""Test permutation of wire subsets on tapes."""
with qml.queuing.AnnotatedQueue() as q:
# Make sure all the wires are actually there
for wire in wire_labels:
qml.RZ(0, wires=wire)
qml.Permute(permutation_order, wire_subset)
tape = qml.tape.QuantumScript.from_queue(q)
# expand the Permute operation
tape = tape.expand()
# Make sure to start comparison after the set of RZs have been applied
assert all(op.name == "SWAP" for op in tape.operations[len(wire_labels) :])
assert [op.wires.labels for op in tape.operations[len(wire_labels) :]] == expected_wires
def test_custom_wire_labels(self, tol):
"""Test that template can deal with non-numeric, nonconsecutive wire labels."""
permutation = [3, 0, 2, 1]
permutation2 = ["o", "z", "k", "a"]
dev = qml.device("default.qubit", wires=4)
dev2 = qml.device("default.qubit", wires=["z", "a", "k", "o"])
@qml.qnode(dev)
def circuit():
qml.Permute(permutation, wires=range(4))
return qml.expval(qml.Identity(0)), qml.state()
@qml.qnode(dev2)
def circuit2():
qml.Permute(permutation2, wires=["z", "a", "k", "o"])
return qml.expval(qml.Identity("z")), qml.state()
res1, state1 = circuit()
res2, state2 = circuit2()
assert np.allclose(res1, res2, atol=tol, rtol=0)
assert np.allclose(state1, state2, atol=tol, rtol=0)
class TestInputs:
"""Test inputs and pre-processing."""
@pytest.mark.parametrize(
"permutation_order,expected_error_message",
[
([0], "Permutations must involve at least 2 qubits."),
([0, 1, 2], "Permutation must specify outcome of all wires."),
([0, 1, 1, 3], "Values in a permutation must all be unique"),
([4, 3, 2, 1], "not present in wire set"),
],
)
def test_invalid_inputs_qnodes(self, permutation_order, expected_error_message):
"""Tests if errors are thrown for invalid permutations with QNodes."""
dev = qml.device("default.qubit", wires=4)
@qml.qnode(dev)
def permute_qubits():
qml.Permute(permutation_order, wires=dev.wires)
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match=expected_error_message):
permute_qubits()
@pytest.mark.parametrize(
"permutation_order,expected_error_message",
[
([0], "Permutations must involve at least 2 qubits."),
([2, "c", "a", 0], "Permutation must specify outcome of all wires."),
([2, "a", "c", "c", 1], "Values in a permutation must all be unique"),
([2, "a", "d", "c", 1], r"not present in wire set"),
],
)
def test_invalid_inputs_tape(self, permutation_order, expected_error_message):
"""Tests if errors are thrown for invalid permutations with tapes."""
wire_labels = [0, 2, "a", "c", 1]
with qml.queuing.AnnotatedQueue() as q:
with pytest.raises(ValueError, match=expected_error_message):
qml.Permute(permutation_order, wires=wire_labels)
qml.tape.QuantumScript.from_queue(q)
def test_id(self):
"""Tests that the id attribute can be set."""
template = qml.Permute([0, 1, 2], wires=[0, 1, 2], id="a")
assert template.id == "a"
|
pennylane/tests/templates/test_subroutines/test_permute.py/0
|
{
"file_path": "pennylane/tests/templates/test_subroutines/test_permute.py",
"repo_id": "pennylane",
"token_count": 6434
}
| 91 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the MERA template.
"""
import numpy as np
# pylint: disable=too-many-arguments,too-few-public-methods
import pytest
import pennylane as qml
from pennylane.templates.tensornetworks.mera import MERA, compute_indices
def circuit0_block(wires):
qml.PauliX(wires=wires[1])
qml.PauliZ(wires=wires[0])
def circuit1_block(weights1, weights2, weights3, wires):
qml.RX(weights1, wires=wires[0])
qml.RX(weights2, wires=wires[1])
qml.RY(weights3, wires=wires[1])
def circuit2_block(weights, wires):
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
def circuit2_MERA(weights, wires):
qml.RY(weights[0][0], wires=wires[1])
qml.RY(weights[0][1], wires=wires[0])
qml.RY(weights[1][0], wires=wires[2])
qml.RY(weights[1][1], wires=wires[3])
qml.RY(weights[2][0], wires=wires[3])
qml.RY(weights[2][1], wires=wires[1])
qml.RY(weights[3][0], wires=wires[0])
qml.RY(weights[3][1], wires=wires[2])
qml.RY(weights[4][0], wires=wires[0])
qml.RY(weights[4][1], wires=wires[1])
def circuit3_block(weights, wires):
SELWeights = np.array(
[[[weights[0], weights[1], weights[2]], [weights[0], weights[1], weights[2]]]]
)
qml.StronglyEntanglingLayers(SELWeights, wires)
def circuit3_MERA(weights, wires):
SELWeights1 = np.array(
[
[
[weights[0][0], weights[0][1], weights[0][2]],
[weights[0][0], weights[0][1], weights[0][2]],
]
]
)
SELWeights2 = np.array(
[
[
[weights[1][0], weights[1][1], weights[1][2]],
[weights[1][0], weights[1][1], weights[1][2]],
]
]
)
SELWeights3 = np.array(
[
[
[weights[2][0], weights[2][1], weights[2][2]],
[weights[2][0], weights[2][1], weights[2][2]],
]
]
)
SELWeights4 = np.array(
[
[
[weights[3][0], weights[3][1], weights[3][2]],
[weights[3][0], weights[3][1], weights[3][2]],
]
]
)
SELWeights5 = np.array(
[
[
[weights[4][0], weights[4][1], weights[4][2]],
[weights[4][0], weights[4][1], weights[4][2]],
]
]
)
qml.StronglyEntanglingLayers(SELWeights1, wires=wires[1::-1])
qml.StronglyEntanglingLayers(SELWeights2, wires=wires[2::])
qml.StronglyEntanglingLayers(SELWeights3, wires=[wires[3], wires[1]])
qml.StronglyEntanglingLayers(SELWeights4, wires=[wires[0], wires[2]])
qml.StronglyEntanglingLayers(SELWeights5, wires=[wires[0], wires[1]])
# pylint: disable=protected-access
def test_flatten_unflatten():
"""Tests the flatten and unflatten methods."""
def block(weights, wires):
qml.CNOT(wires=[wires[0], wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires), n_block_wires)
template_weights = [[0.1, -0.3]] * n_blocks
wires = qml.wires.Wires((0, 1, 2, 3))
op = qml.MERA(wires, n_block_wires, block, n_params_block, template_weights)
data, metadata = op._flatten()
assert len(data) == 1
assert qml.math.allclose(data[0], template_weights)
assert metadata[0] == wires
assert dict(metadata[1]) == op.hyperparameters
# make sure metadata hashable
assert hash(metadata)
new_op = qml.MERA._unflatten(*op._flatten())
qml.assert_equal(new_op, op)
assert new_op._name == "MERA" # make sure acutally initialized
assert new_op is not op
class TestIndicesMERA:
"""Test function that computes MERA indices"""
@pytest.mark.parametrize(
("n_wires", "n_block_wires"),
[
(5, 3),
(9, 5),
(11, 7),
],
)
def test_exception_n_block_wires_uneven(self, n_wires, n_block_wires):
"""Verifies that an exception is raised if n_block_wires is not even."""
with pytest.raises(
ValueError, match=f"n_block_wires must be an even integer; got {n_block_wires}"
):
compute_indices(range(n_wires), n_block_wires)
@pytest.mark.parametrize(
("n_wires", "n_block_wires"),
[
(3, 4),
(6, 8),
(10, 14),
],
)
def test_exception_n_block_wires_large(self, n_wires, n_block_wires):
"""Verifies that an exception is raised when n_block_wires is too large."""
with pytest.raises(
ValueError,
match="n_block_wires must be smaller than or equal to the number of wires; "
f"got n_block_wires = {n_block_wires} and number of wires = {n_wires}",
):
compute_indices(range(n_wires), n_block_wires)
def test_exception_n_block_wires_small(self):
"""Verifies that an exception is raised when n_block_wires is less than 2."""
n_wires = 2
n_block_wires = 0
with pytest.raises(
ValueError,
match=f"number of wires in each block must be larger than or equal to 2; "
f"got n_block_wires = {n_block_wires}",
):
compute_indices(range(n_wires), n_block_wires)
@pytest.mark.parametrize(
("wires", "n_block_wires"),
[(range(5), 2), (range(12), 4), (range(16), 6)],
)
def test_warning_many_wires(self, wires, n_block_wires):
"""Verifies that a warning is raised if n_wires doesn't correspond to n_block_wires."""
n_wires = len(wires)
with pytest.warns(
Warning,
match=f"The number of wires should be n_block_wires times 2\\^n; got n_wires/n_block_wires = {n_wires/n_block_wires}",
):
compute_indices(range(n_wires), n_block_wires)
@pytest.mark.parametrize(
("wires", "n_block_wires", "expected_indices"),
[
((1, 2, 3, 4), 2, ((2, 1), (3, 4), (4, 2), (1, 3), (1, 2))),
(
range(12),
6,
(
(3, 4, 5, 0, 1, 2),
(6, 7, 8, 9, 10, 11),
(9, 10, 11, 3, 4, 5),
(0, 1, 2, 6, 7, 8),
(0, 1, 2, 3, 4, 5),
),
),
(("a", "b", "c", "d"), 2, (("b", "a"), ("c", "d"), ("d", "b"), ("a", "c"), ("a", "b"))),
],
)
def test_indices_output(self, wires, n_block_wires, expected_indices):
"""Verifies the indices are correct for both integer and string wire labels."""
indices = compute_indices(wires, n_block_wires)
assert indices == expected_indices
class TestTemplateInputs:
"""Test template inputs and pre-processing (ensure the correct exceptions are thrown for the inputs)"""
@pytest.mark.parametrize(
("block", "n_params_block", "wires", "n_block_wires", "msg_match"),
[
(None, None, [1, 2, 3, 4], 7, "n_block_wires must be an even integer; got 7"),
(
None,
None,
[1, 2, 3, 4],
6,
"n_block_wires must be smaller than or equal to the number of wires; "
"got n_block_wires = 6 and number of wires = 4",
),
(
None,
None,
[1, 2, 3, 4],
0,
"number of wires in each block must be larger than or equal to 2; "
"got n_block_wires = 0",
),
],
)
def test_exception_wrong_input(self, block, n_params_block, wires, n_block_wires, msg_match):
"""Verifies that an exception is raised if the number of wires or n_block_wires is incorrect."""
with pytest.raises(ValueError, match=msg_match):
MERA(wires, n_block_wires, block, n_params_block)
def test_warning_many_wires(self):
"""Verifies that a warning is raised if n_wires doesn't correspond to n_block_wires."""
n_block_wires = 4
wires = [1, 2, 3, 4, 5]
n_wires = len(wires)
n_params_block = 1
with pytest.warns(
Warning,
match=f"The number of wires should be n_block_wires times 2\\^n; "
f"got n_wires/n_block_wires = {n_wires/n_block_wires}",
):
MERA(wires, n_block_wires, block=None, n_params_block=n_params_block)
@pytest.mark.parametrize(
("block", "n_params_block", "wires", "n_block_wires", "block_weights", "msg_match"),
[
(
None,
2,
[1, 2, 3, 4],
2,
[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]],
"Weights tensor must have last dimension of length 2; got 3",
),
(
None,
2,
[1, 2, 3, 4],
2,
[[1, 2], [2, 3], [4, 5], [6, 7]],
"Weights tensor must have first dimension of length 5; got 4",
),
],
)
def test_exception_wrong_weight_shape(
self, block, n_params_block, wires, n_block_wires, block_weights, msg_match
):
"""Verifies that an exception is raised if the weights shape is incorrect."""
with pytest.raises(ValueError, match=msg_match):
MERA(wires, n_block_wires, block, n_params_block, block_weights)
@pytest.mark.parametrize(
("block", "n_params_block", "wires", "n_block_wires", "template_weights"),
[
(circuit0_block, 0, [1, 2, 3, 4], 2, None),
(
circuit1_block,
3,
[1, 2, 3, 4],
2,
[
[0.1, 0.1, 0.2],
[0.2, 0.2, 0.3],
[0.2, 0.3, 0.1],
[0.1, 0.1, 0.2],
[0.2, 0.2, 0.3],
],
),
(
circuit2_block,
2,
[0, 1, 2, 3],
2,
[[0.1, 0.2], [-0.2, 0.3], [0.3, 0.4], [-0.2, 0.3], [0.1, 0.2]],
),
(
circuit3_block,
3,
[1, 2, 3, 4],
2,
[
[0.1, 0.2, 0.3],
[0.2, 0.3, -0.4],
[0.5, 0.2, 0.3],
[0.2, 0.3, -0.4],
[0.1, 0.2, 0.3],
],
),
],
)
def test_block_params(self, block, n_params_block, wires, n_block_wires, template_weights):
"""Verify that the template works with arbitrary block parameters"""
dev = qml.device("default.qubit", wires=wires)
@qml.qnode(dev)
def circuit():
qml.MERA(wires, n_block_wires, block, n_params_block, template_weights)
return qml.expval(qml.PauliZ(wires=wires[-1]))
circuit()
class TestAttributes:
"""Tests additional methods and attributes"""
@pytest.mark.parametrize(
("wires", "n_block_wires"),
[(range(7), 4), (range(13), 6)],
)
def test_get_n_blocks_warning(self, wires, n_block_wires):
"""Test that get_n_blocks() warns the user when there are too many wires."""
with pytest.warns(
Warning,
match=f"The number of wires should be n_block_wires times 2\\^n; "
f"got n_wires/n_block_wires = {len(wires)/n_block_wires}",
):
qml.TTN.get_n_blocks(wires, n_block_wires)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize(
("wires", "n_block_wires", "expected_n_blocks"),
[
(range(4), 2, 5),
(range(5), 2, 5),
(range(6), 2, 5),
(range(10), 4, 5),
(range(25), 6, 13),
],
)
def test_get_n_blocks(self, wires, n_block_wires, expected_n_blocks):
"""Test that the number of blocks attribute returns the correct number of blocks."""
assert qml.MERA.get_n_blocks(wires, n_block_wires) == expected_n_blocks
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize(
("wires", "n_block_wires"),
[(range(4), 5), (range(9), 20)],
)
def test_get_n_blocks_error(self, wires, n_block_wires):
"""Test that the number of blocks attribute raises an error when n_block_wires is too large."""
with pytest.raises(
ValueError,
match=f"n_block_wires must be smaller than or equal to the number of wires; "
f"got n_block_wires = {n_block_wires} and number of wires = {len(wires)}",
):
qml.MERA.get_n_blocks(wires, n_block_wires)
class TestDifferentiability:
"""Test that the template is differentiable."""
@pytest.mark.parametrize(
("block", "n_params_block", "wires", "n_block_wires", "template_weights"),
[
(
circuit2_block,
2,
[0, 1, 2, 3],
2,
[[0.1, 0.2], [-0.2, 0.3], [0.3, 0.4], [0.1, 0.2], [-0.2, 0.3]],
)
],
)
def test_template_differentiable(
self, block, n_params_block, wires, n_block_wires, template_weights
):
"""Test that the template is differentiable for different inputs."""
dev = qml.device("default.qubit", wires=wires)
@qml.qnode(dev)
def circuit(template_weights):
qml.MERA(wires, n_block_wires, block, n_params_block, template_weights)
return qml.expval(qml.PauliZ(wires=wires[-1]))
qml.grad(circuit)(qml.numpy.array(template_weights, requires_grad=True))
class TestTemplateOutputs:
@pytest.mark.parametrize(
(
"block",
"n_params_block",
"wires",
"n_block_wires",
"template_weights",
"expected_circuit",
),
[
(
circuit2_block,
2,
[0, 1, 2, 3],
2,
[[0.1, 0.2], [-0.2, 0.3], [0.3, 0.4], [0.1, 0.2], [-0.2, 0.3]],
circuit2_MERA,
),
(
circuit3_block,
3,
[1, 2, 3, 4],
2,
[
[0.1, 0.2, 0.3],
[0.2, 0.3, -0.4],
[0.5, 0.2, 0.3],
[0.1, 0.2, 0.3],
[0.2, 0.3, -0.4],
],
circuit3_MERA,
),
],
)
def test_output(
self, block, n_params_block, wires, n_block_wires, template_weights, expected_circuit
):
"""Verifies that the output of the circuits is correct."""
dev = qml.device("default.qubit", wires=wires)
@qml.qnode(dev)
def circuit_template():
qml.MERA(wires, n_block_wires, block, n_params_block, template_weights)
return qml.expval(qml.PauliZ(wires=wires[1]))
template_result = circuit_template()
@qml.qnode(dev)
def circuit_manual():
expected_circuit(template_weights, wires)
return qml.expval(qml.PauliZ(wires=wires[1]))
manual_result = circuit_manual()
assert np.isclose(template_result, manual_result)
|
pennylane/tests/templates/test_tensornetworks/test_MERA.py/0
|
{
"file_path": "pennylane/tests/templates/test_tensornetworks/test_MERA.py",
"repo_id": "pennylane",
"token_count": 8574
}
| 92 |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane` :class:`QubitDevice` class.
"""
import copy
from random import random
import numpy as np
import pytest
import pennylane as qml
from pennylane import QubitDevice
from pennylane import numpy as pnp
from pennylane.measurements import (
Expectation,
ExpectationMP,
MeasurementProcess,
Probability,
ProbabilityMP,
Sample,
SampleMP,
Shots,
State,
StateMP,
Variance,
VarianceMP,
)
from pennylane.resource import Resources
from pennylane.tape import QuantumScript
from pennylane.wires import Wires
mock_qubit_device_paulis = ["PauliX", "PauliY", "PauliZ"]
mock_qubit_device_rotations = ["RX", "RY", "RZ"]
# pylint: disable=abstract-class-instantiated, no-self-use, redefined-outer-name, invalid-name
@pytest.fixture(scope="function")
def mock_qubit_device(monkeypatch):
"""A function to create a mock device that mocks most of the methods except for e.g. probability()"""
with monkeypatch.context() as m:
m.setattr(QubitDevice, "__abstractmethods__", frozenset())
m.setattr(QubitDevice, "_capabilities", mock_qubit_device_capabilities)
m.setattr(QubitDevice, "operations", ["PauliY", "RX", "Rot"])
m.setattr(QubitDevice, "observables", ["PauliZ"])
m.setattr(QubitDevice, "short_name", "MockDevice")
m.setattr(QubitDevice, "expval", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "var", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "sample", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "apply", lambda self, *args, **kwargs: None)
def get_qubit_device(wires=1):
return QubitDevice(wires=wires)
yield get_qubit_device
@pytest.fixture(scope="function")
def mock_qubit_device_extract_stats(monkeypatch):
"""A function to create a mock device that mocks the methods related to
statistics (expval, var, sample, probability)"""
with monkeypatch.context() as m:
m.setattr(QubitDevice, "__abstractmethods__", frozenset())
m.setattr(QubitDevice, "_capabilities", mock_qubit_device_capabilities)
m.setattr(QubitDevice, "operations", ["PauliY", "RX", "Rot"])
m.setattr(QubitDevice, "observables", ["PauliZ"])
m.setattr(QubitDevice, "short_name", "MockDevice")
m.setattr(QubitDevice, "expval", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "var", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "sample", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "state", 0)
m.setattr(QubitDevice, "density_matrix", lambda self, wires=None: 0)
m.setattr(QubitDevice, "probability", lambda self, wires=None, *args, **kwargs: 0)
m.setattr(QubitDevice, "apply", lambda self, x: x)
def get_qubit_device(wires=1):
return QubitDevice(wires=wires)
yield get_qubit_device
@pytest.fixture(scope="function")
def mock_qubit_device_with_original_statistics(monkeypatch):
"""A function to create a mock device that mocks only basis methods and uses the original
statistics related methods"""
with monkeypatch.context() as m:
m.setattr(QubitDevice, "__abstractmethods__", frozenset())
m.setattr(QubitDevice, "_capabilities", mock_qubit_device_capabilities)
m.setattr(QubitDevice, "operations", ["PauliY", "RX", "Rot"])
m.setattr(QubitDevice, "observables", ["PauliZ"])
m.setattr(QubitDevice, "short_name", "MockDevice")
def get_qubit_device(wires=1):
return QubitDevice(wires=wires)
yield get_qubit_device
mock_qubit_device_capabilities = {
"measurements": "everything",
"returns_state": True,
"noise_models": ["depolarizing", "bitflip"],
}
@pytest.fixture(scope="function")
def mock_qubit_device_with_paulis_and_methods(monkeypatch):
"""A function to create a mock device that supports Paulis in its capabilities"""
with monkeypatch.context() as m:
m.setattr(QubitDevice, "__abstractmethods__", frozenset())
m.setattr(QubitDevice, "_capabilities", mock_qubit_device_capabilities)
m.setattr(QubitDevice, "operations", mock_qubit_device_paulis)
m.setattr(QubitDevice, "observables", mock_qubit_device_paulis)
m.setattr(QubitDevice, "short_name", "MockDevice")
m.setattr(QubitDevice, "expval", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "var", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "sample", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "apply", lambda self, x, rotations: None)
def get_qubit_device(wires=1):
return QubitDevice(wires=wires)
yield get_qubit_device
@pytest.fixture(scope="function")
def mock_qubit_device_with_paulis_rotations_and_methods(monkeypatch):
"""A function to create a mock device that supports Paulis in its capabilities"""
with monkeypatch.context() as m:
m.setattr(QubitDevice, "__abstractmethods__", frozenset())
m.setattr(QubitDevice, "_capabilities", mock_qubit_device_capabilities)
m.setattr(QubitDevice, "operations", mock_qubit_device_paulis + mock_qubit_device_rotations)
m.setattr(QubitDevice, "observables", mock_qubit_device_paulis)
m.setattr(QubitDevice, "short_name", "MockDevice")
m.setattr(QubitDevice, "expval", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "var", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "sample", lambda self, *args, **kwargs: 0)
m.setattr(QubitDevice, "apply", lambda self, x, **kwargs: None)
def get_qubit_device(wires=1):
return QubitDevice(wires=wires)
yield get_qubit_device
# pylint: disable=protected-access
def _working_get_batch_size(tensor, expected_shape, expected_size):
size = QubitDevice._size(tensor)
if QubitDevice._ndim(tensor) > len(expected_shape) or size > expected_size:
return size // expected_size
return None
class TestOperations:
"""Tests the logic related to operations"""
# pylint: disable=pointless-statement
def test_op_queue_accessed_outside_execution_context(self, mock_qubit_device):
"""Tests that a call to op_queue outside the execution context raises the correct error"""
with pytest.raises(
ValueError, match="Cannot access the operation queue outside of the execution context!"
):
dev = mock_qubit_device()
dev.op_queue
def test_op_queue_is_filled_during_execution(
self, mock_qubit_device_with_paulis_and_methods, monkeypatch
):
"""Tests that the op_queue is correctly filled when apply is called and that accessing
op_queue raises no error"""
with qml.queuing.AnnotatedQueue() as q:
queue = [qml.PauliX(wires=0), qml.PauliY(wires=1), qml.PauliZ(wires=2)]
qml.expval(qml.PauliZ(0))
qml.var(qml.PauliZ(1))
tape = QuantumScript.from_queue(q)
call_history = []
with monkeypatch.context() as m:
m.setattr(
QubitDevice,
"apply",
lambda self, x, **kwargs: call_history.extend(x + kwargs.get("rotations", [])),
)
m.setattr(QubitDevice, "analytic_probability", lambda *args: None)
dev = mock_qubit_device_with_paulis_and_methods()
dev.execute(tape)
assert call_history == queue
assert len(call_history) == 3
assert isinstance(call_history[0], qml.PauliX)
assert call_history[0].wires == Wires([0])
assert isinstance(call_history[1], qml.PauliY)
assert call_history[1].wires == Wires([1])
assert isinstance(call_history[2], qml.PauliZ)
assert call_history[2].wires == Wires([2])
def test_unsupported_operations_raise_error(self, mock_qubit_device_with_paulis_and_methods):
"""Tests that the operations are properly applied and queued"""
with qml.queuing.AnnotatedQueue() as q:
_ = [qml.PauliX(wires=0), qml.PauliY(wires=1), qml.Hadamard(wires=2)]
qml.expval(qml.PauliZ(0))
qml.var(qml.PauliZ(1))
tape = QuantumScript.from_queue(q)
with pytest.raises(qml.DeviceError, match="Gate Hadamard not supported on device"):
dev = mock_qubit_device_with_paulis_and_methods()
dev.execute(tape)
numeric_queues = [
[qml.RX(0.3, wires=[0])],
[
qml.RX(0.3, wires=[0]),
qml.RX(0.4, wires=[1]),
qml.RX(0.5, wires=[2]),
],
]
observables = [[qml.PauliZ(0)], [qml.PauliX(0)], [qml.PauliY(0)]]
@pytest.mark.parametrize("observables", observables)
@pytest.mark.parametrize("queue", numeric_queues)
def test_passing_keyword_arguments_to_execute(
self, mock_qubit_device_with_paulis_rotations_and_methods, monkeypatch, queue, observables
):
"""Tests that passing keyword arguments to execute propagates those kwargs to the apply()
method"""
with qml.queuing.AnnotatedQueue() as q:
for op in queue + observables:
op.queue()
tape = QuantumScript.from_queue(q)
call_history = {}
with monkeypatch.context() as m:
m.setattr(QubitDevice, "apply", lambda self, x, **kwargs: call_history.update(kwargs))
dev = mock_qubit_device_with_paulis_rotations_and_methods()
dev.execute(tape, hash=tape.graph.hash)
assert len(call_history.items()) == 2
assert call_history["hash"] == tape.graph.hash
assert call_history["rotations"] == []
class TestObservables:
"""Tests the logic related to observables"""
# pylint: disable=no-self-use, redefined-outer-name, pointless-statement
def test_obs_queue_accessed_outside_execution_context(self, mock_qubit_device):
"""Tests that a call to op_queue outside the execution context raises the correct error"""
with pytest.raises(
ValueError,
match="Cannot access the observable value queue outside of the execution context!",
):
dev = mock_qubit_device()
dev.obs_queue
def test_unsupported_observables_raise_error(self, mock_qubit_device_with_paulis_and_methods):
"""Tests that the operations are properly applied and queued"""
with qml.queuing.AnnotatedQueue() as q:
_ = [qml.PauliX(wires=0), qml.PauliY(wires=1), qml.PauliZ(wires=2)]
qml.expval(qml.Hadamard(0))
qml.var(qml.PauliZ(1))
qml.sample(qml.PauliZ(2))
tape = QuantumScript.from_queue(q)
with pytest.raises(qml.DeviceError, match="Observable Hadamard not supported on device"):
dev = mock_qubit_device_with_paulis_and_methods()
dev.execute(tape)
def test_unsupported_observable_return_type_raise_error(
self, mock_qubit_device_with_paulis_and_methods, monkeypatch
):
"""Check that an error is raised if the return type of an observable is unsupported"""
# pylint: disable=too-few-public-methods
class UnsupportedMeasurement(MeasurementProcess):
@property
def return_type(self):
return "SomeUnsupportedReturnType"
with qml.queuing.AnnotatedQueue() as q:
qml.PauliX(wires=0)
UnsupportedMeasurement(obs=qml.PauliZ(0))
tape = QuantumScript.from_queue(q)
with monkeypatch.context() as m:
m.setattr(QubitDevice, "apply", lambda self, x, **kwargs: None)
with pytest.raises(
qml.QuantumFunctionError, match="Unsupported return type specified for observable"
):
dev = mock_qubit_device_with_paulis_and_methods()
dev.execute(tape)
# pylint: disable=too-few-public-methods
class TestParameters:
"""Test for checking device parameter mappings"""
# pylint: disable=pointless-statement
def test_parameters_accessed_outside_execution_context(self, mock_qubit_device):
"""Tests that a call to parameters outside the execution context raises the correct error"""
with pytest.raises(
ValueError,
match="Cannot access the free parameter mapping outside of the execution context!",
):
dev = mock_qubit_device()
dev.parameters
class TestExtractStatistics:
"""Test the statistics method"""
@pytest.mark.parametrize(
"measurement",
[
ExpectationMP(obs=qml.PauliX(0)),
VarianceMP(obs=qml.PauliX(0)),
SampleMP(obs=qml.PauliX(0)),
ProbabilityMP(obs=qml.PauliX(0)),
StateMP(),
],
)
def test_results_created(self, mock_qubit_device_extract_stats, measurement):
"""Tests that the statistics method simply builds a results list without any side-effects"""
qscript = QuantumScript(measurements=[measurement])
dev = mock_qubit_device_extract_stats()
results = dev.statistics(qscript)
assert results == [0]
def test_results_no_state(self, mock_qubit_device_extract_stats):
"""Tests that the statistics method raises an AttributeError when a State return type is
requested when QubitDevice does not have a state attribute"""
qscript = QuantumScript(measurements=[qml.state()])
dev = mock_qubit_device_extract_stats()
delattr(dev.__class__, "state")
_match = "The state is not available in the current"
with pytest.raises(qml.QuantumFunctionError, match=_match):
dev.statistics(qscript)
@pytest.mark.parametrize("returntype", [None])
def test_results_created_empty(self, mock_qubit_device_extract_stats, returntype):
"""Tests that the statistics method returns an empty list if the return type is None"""
class UnsupportedMeasurement(MeasurementProcess):
@property
def return_type(self):
return returntype
qscript = QuantumScript(measurements=[UnsupportedMeasurement()])
dev = mock_qubit_device_extract_stats()
results = dev.statistics(qscript)
assert results == []
@pytest.mark.parametrize("returntype", ["not None"])
def test_error_return_type_none(self, mock_qubit_device_extract_stats, returntype):
"""Tests that the statistics method raises an error if the return type is not well-defined and is not None"""
assert returntype not in [Expectation, Variance, Sample, Probability, State, None]
class UnsupportedMeasurement(MeasurementProcess):
@property
def return_type(self):
return returntype
qscript = QuantumScript(measurements=[UnsupportedMeasurement()])
with pytest.raises(qml.QuantumFunctionError, match="Unsupported return type"):
dev = mock_qubit_device_extract_stats()
dev.statistics(qscript)
class TestGenerateSamples:
"""Test the generate_samples method"""
def test_auxiliary_methods_called_correctly(self, mock_qubit_device, monkeypatch):
"""Tests that the generate_samples method calls on its auxiliary methods correctly"""
dev = mock_qubit_device()
number_of_states = 2**dev.num_wires
with monkeypatch.context() as m:
# Mock the auxiliary methods such that they return the expected values
m.setattr(QubitDevice, "sample_basis_states", lambda self, wires, b: wires)
m.setattr(QubitDevice, "states_to_binary", staticmethod(lambda a, b: (a, b)))
m.setattr(QubitDevice, "analytic_probability", lambda *args: None)
m.setattr(QubitDevice, "shots", 1000)
dev._samples = dev.generate_samples()
assert dev._samples == (number_of_states, dev.num_wires)
class TestSampleBasisStates:
"""Test the sample_basis_states method"""
def test_sampling_with_correct_arguments(self, mock_qubit_device, monkeypatch):
"""Tests that the sample_basis_states method samples with the correct arguments"""
shots = 1000
number_of_states = 4
dev = mock_qubit_device()
dev.shots = shots
state_probs = [0.1, 0.2, 0.3, 0.4]
with monkeypatch.context() as m:
# Mock the numpy.random.choice method such that it returns the expected values
m.setattr("numpy.random.choice", lambda x, y, p: (x, y, p))
res = dev.sample_basis_states(number_of_states, state_probs)
assert np.array_equal(res[0], np.array([0, 1, 2, 3]))
assert res[1] == shots
assert res[2] == state_probs
def test_raises_deprecation_warning(self, mock_qubit_device):
"""Test that sampling basis states on a device with shots=None produces a warning."""
dev = mock_qubit_device()
number_of_states = 4
dev.shots = None
state_probs = [0.1, 0.2, 0.3, 0.4]
with pytest.raises(
qml.QuantumFunctionError,
match="The number of shots has to be explicitly set on the device",
):
dev.sample_basis_states(number_of_states, state_probs)
def test_sampling_with_broadcasting(self, mock_qubit_device):
"""Tests that the sample_basis_states method samples with the correct arguments
when using broadcasted probabilities"""
shots = 1000
number_of_states = 4
dev = mock_qubit_device()
dev.shots = shots
state_probs = [[0.1, 0.2, 0.3, 0.4], [0.5, 0.2, 0.1, 0.2]]
# First run the sampling to see that it is using numpy.random.choice correctly
res = dev.sample_basis_states(number_of_states, state_probs)
assert qml.math.shape(res) == (2, shots)
assert set(res.flat).issubset({0, 1, 2, 3})
class TestStatesToBinary:
"""Test the states_to_binary method"""
def test_correct_conversion_two_states(self, mock_qubit_device):
"""Tests that the sample_basis_states method converts samples to binary correctly"""
wires = 4
shots = 10
number_of_states = 2**wires
basis_states = np.arange(number_of_states)
samples = np.random.choice(basis_states, shots)
dev = mock_qubit_device()
res = dev.states_to_binary(samples, wires)
format_smt = f"{{:0{wires}b}}"
expected = np.array([[int(x) for x in list(format_smt.format(i))] for i in samples])
assert np.all(res == expected)
test_binary_conversion_data = [
(np.array([2, 3, 2, 0, 0]), np.array([[1, 0], [1, 1], [1, 0], [0, 0], [0, 0]])),
(np.array([2, 3, 1, 3, 1]), np.array([[1, 0], [1, 1], [0, 1], [1, 1], [0, 1]])),
(
np.array([7, 7, 1, 5, 2]),
np.array([[1, 1, 1], [1, 1, 1], [0, 0, 1], [1, 0, 1], [0, 1, 0]]),
),
]
@pytest.mark.parametrize("samples, binary_states", test_binary_conversion_data)
def test_correct_conversion(self, mock_qubit_device, samples, binary_states, tol):
"""Tests that the states_to_binary method converts samples to binary correctly"""
dev = mock_qubit_device()
dev.shots = 5
wires = binary_states.shape[1]
res = dev.states_to_binary(samples, wires)
assert np.allclose(res, binary_states, atol=tol, rtol=0)
test_binary_conversion_data_broadcasted = [
(
np.array([[2, 3, 2, 0, 0], [3, 0, 0, 1, 1], [2, 2, 0, 1, 3]]),
np.array(
[
[[1, 0], [1, 1], [1, 0], [0, 0], [0, 0]],
[[1, 1], [0, 0], [0, 0], [0, 1], [0, 1]],
[[1, 0], [1, 0], [0, 0], [0, 1], [1, 1]],
]
),
),
(
np.array([[7, 7, 1, 5, 2], [3, 3, 2, 4, 6], [0, 0, 7, 2, 1]]),
np.array(
[
[[1, 1, 1], [1, 1, 1], [0, 0, 1], [1, 0, 1], [0, 1, 0]],
[[0, 1, 1], [0, 1, 1], [0, 1, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 0], [1, 1, 1], [0, 1, 0], [0, 0, 1]],
]
),
),
]
@pytest.mark.parametrize("samples, binary_states", test_binary_conversion_data_broadcasted)
def test_correct_conversion_broadcasted(self, mock_qubit_device, samples, binary_states, tol):
"""Tests that the states_to_binary method converts broadcasted
samples to binary correctly"""
dev = mock_qubit_device()
dev.shots = 5
wires = binary_states.shape[-1]
res = dev.states_to_binary(samples, wires)
assert np.allclose(res, binary_states, atol=tol, rtol=0)
class TestExpval:
"""Test the expval method"""
def test_analytic_expval(self, mock_qubit_device_with_original_statistics, monkeypatch):
"""Tests that expval method when the analytic attribute is True
Additional QubitDevice methods that are mocked:
-probability
"""
obs = qml.PauliX(0)
probs = [0.5, 0.5]
dev = mock_qubit_device_with_original_statistics()
assert dev.shots is None
with monkeypatch.context() as m:
m.setattr(QubitDevice, "probability", lambda self, wires=None: probs)
res = dev.expval(obs)
assert res == (obs.eigvals() @ probs).real
def test_analytic_expval_broadcasted(
self, mock_qubit_device_with_original_statistics, monkeypatch
):
"""Tests expval method when the analytic attribute is True and using broadcasting
Additional QubitDevice methods that are mocked:
-probability
"""
obs = qml.PauliX(0)
probs = np.array([[0.5, 0.5], [0.2, 0.8], [0.1, 0.9]])
dev = mock_qubit_device_with_original_statistics()
assert dev.shots is None
with monkeypatch.context() as m:
m.setattr(QubitDevice, "probability", lambda self, wires=None: probs)
res = dev.expval(obs)
assert np.allclose(res, (probs @ obs.eigvals()).real)
def test_non_analytic_expval(self, mock_qubit_device_with_original_statistics, monkeypatch):
"""Tests that expval method when the analytic attribute is False
Additional QubitDevice methods that are mocked:
-sample
-numpy.mean
"""
obs = qml.PauliX(0)
dev = mock_qubit_device_with_original_statistics()
dev.shots = 1000
with monkeypatch.context() as m:
m.setattr(QubitDevice, "sample", lambda self, obs, *args, **kwargs: obs)
m.setattr("numpy.mean", lambda obs, axis=None: obs)
res = dev.expval(obs)
assert res == np.array(obs) # no idea what is trying to cast obs to an array now.
def test_no_eigval_error(self, mock_qubit_device_with_original_statistics):
"""Tests that an error is thrown if expval is called with an observable that does
not have eigenvalues defined."""
dev = mock_qubit_device_with_original_statistics()
# observable with no eigenvalue representation defined
class MyObs(qml.operation.Observable):
num_wires = 1
def eigvals(self):
raise qml.operation.EigvalsUndefinedError
obs = MyObs(wires=0)
with pytest.raises(
qml.operation.EigvalsUndefinedError, match="Cannot compute analytic expectations"
):
dev.expval(obs)
class TestVar:
"""Test the var method"""
def test_analytic_var(self, mock_qubit_device_with_original_statistics, monkeypatch):
"""Tests that var method when the analytic attribute is True
Additional QubitDevice methods that are mocked:
-probability
"""
obs = qml.PauliX(0)
probs = [0.5, 0.5]
dev = mock_qubit_device_with_original_statistics()
assert dev.shots is None
with monkeypatch.context() as m:
m.setattr(QubitDevice, "probability", lambda self, wires=None: probs)
res = dev.var(obs)
assert res == (obs.eigvals() ** 2) @ probs - (obs.eigvals() @ probs).real ** 2
def test_analytic_var_broadcasted(
self, mock_qubit_device_with_original_statistics, monkeypatch
):
"""Tests var method when the analytic attribute is True and using broadcasting
Additional QubitDevice methods that are mocked:
-probability
"""
obs = qml.PauliX(0)
probs = [0.5, 0.5]
dev = mock_qubit_device_with_original_statistics()
assert dev.shots is None
with monkeypatch.context() as m:
m.setattr(QubitDevice, "probability", lambda self, wires=None: probs)
res = dev.var(obs)
assert np.allclose(res, probs @ (obs.eigvals() ** 2) - (probs @ obs.eigvals()).real ** 2)
def test_non_analytic_var(self, mock_qubit_device_with_original_statistics, monkeypatch):
"""Tests that var method when the analytic attribute is False
Additional QubitDevice methods that are mocked:
-sample
-numpy.var
"""
obs = qml.PauliX(0)
dev = mock_qubit_device_with_original_statistics()
dev.shots = 1000
with monkeypatch.context() as m:
m.setattr(QubitDevice, "sample", lambda self, obs, *args, **kwargs: obs)
m.setattr("numpy.var", lambda obs, axis=None: obs)
res = dev.var(obs)
assert res == np.array(obs)
def test_no_eigval_error(self, mock_qubit_device_with_original_statistics):
"""Tests that an error is thrown if var is called with an observable that does not have eigenvalues defined."""
dev = mock_qubit_device_with_original_statistics()
# pylint: disable=too-few-public-methods
class MyObs(qml.operation.Observable):
"""Observable with no eigenvalue representation defined."""
num_wires = 1
def eigvals(self):
raise qml.operation.EigvalsUndefinedError
obs = MyObs(wires=0)
with pytest.raises(
qml.operation.EigvalsUndefinedError, match="Cannot compute analytic variance"
):
dev.var(obs)
class TestSample:
"""Test the sample method"""
def test_only_ones_minus_ones(self, mock_qubit_device_with_original_statistics, tol):
"""Test that sample for a single Pauli observable only produces -1 and 1 samples"""
obs = qml.PauliX(0)
dev = mock_qubit_device_with_original_statistics()
dev._samples = np.array([[1, 0], [0, 0]])
res = dev.sample(obs)
assert np.shape(res) == (2,)
assert np.allclose(res**2, 1, atol=tol, rtol=0)
def test_correct_custom_eigenvalues(self, mock_qubit_device_with_original_statistics):
"""Test that sample for a product of Pauli observables produces samples of eigenvalues"""
obs = qml.PauliX(0) @ qml.PauliZ(1)
dev = mock_qubit_device_with_original_statistics(wires=2)
dev._samples = np.array([[1, 0], [0, 0]])
res = dev.sample(obs)
assert np.array_equal(res, np.array([-1, 1]))
def test_sample_with_no_observable_and_no_wires(
self, mock_qubit_device_with_original_statistics
):
"""Test that when we sample a device without providing an observable or wires then it
will return the raw samples"""
obs = qml.measurements.sample(op=None, wires=None)
dev = mock_qubit_device_with_original_statistics(wires=2)
generated_samples = np.array([[1, 0], [1, 1]])
dev._samples = generated_samples
res = dev.sample(obs)
assert np.array_equal(res, generated_samples)
def test_sample_with_no_observable_and_with_wires(
self, mock_qubit_device_with_original_statistics
):
"""Test that when we sample a device without providing an observable but we specify
wires then it returns the generated samples for only those wires"""
obs = qml.measurements.sample(op=None, wires=[1])
dev = mock_qubit_device_with_original_statistics(wires=2)
generated_samples = np.array([[1, 0], [1, 1]])
dev._samples = generated_samples
wire_samples = np.array([[0], [1]])
res = dev.sample(obs)
assert np.array_equal(res, wire_samples)
def test_no_eigval_error(self, mock_qubit_device_with_original_statistics):
"""Tests that an error is thrown if sample is called with an observable
that does not have eigenvalues defined."""
dev = mock_qubit_device_with_original_statistics()
dev._samples = np.array([[1, 0], [0, 0]])
class MyObs(qml.operation.Observable):
"""Observable with no eigenvalue representation defined."""
num_wires = 1
def eigvals(self):
raise qml.operation.EigvalsUndefinedError
with pytest.raises(qml.operation.EigvalsUndefinedError, match="Cannot compute samples"):
dev.sample(MyObs(wires=[0]))
class TestSampleWithBroadcasting:
"""Test the sample method when broadcasting is used"""
def test_only_ones_minus_ones(self, mock_qubit_device_with_original_statistics, tol):
"""Test that sample for a single Pauli observable only produces -1 and 1 samples
when using broadcasting"""
obs = qml.PauliX(0)
dev = mock_qubit_device_with_original_statistics()
dev._samples = np.array([[[0, 0], [0, 0]], [[1, 0], [1, 0]], [[0, 0], [1, 0]]])
res = dev.sample(obs)
assert np.allclose(res, [[1, 1], [-1, -1], [1, -1]], atol=tol, rtol=0)
def test_correct_custom_eigenvalues(self, mock_qubit_device_with_original_statistics):
"""Test that sample for a product of Pauli observables produces samples
of eigenvalues when using broadcasting"""
obs = qml.PauliX(0) @ qml.PauliZ(1)
dev = mock_qubit_device_with_original_statistics(wires=2)
dev._samples = np.array([[1, 0], [0, 0]])
dev._samples = np.array([[[1, 0], [0, 0]], [[0, 1], [1, 1]], [[1, 0], [0, 1]]])
res = dev.sample(obs)
assert np.array_equal(res, np.array([[-1, 1], [-1, 1], [-1, -1]]))
def test_sample_with_no_observable_and_no_wires(
self, mock_qubit_device_with_original_statistics
):
"""Test that when we sample a device without providing an observable or wires then it
will return the raw samples when using broadcasting"""
obs = qml.measurements.sample(op=None, wires=None)
dev = mock_qubit_device_with_original_statistics(wires=2)
generated_samples = np.array([[[1, 0], [1, 1]], [[1, 1], [0, 0]], [[0, 1], [1, 0]]])
dev._samples = generated_samples
res = dev.sample(obs)
assert np.array_equal(res, generated_samples)
def test_sample_with_no_observable_and_with_wires(
self, mock_qubit_device_with_original_statistics
):
"""Test that when we sample a device without providing an observable but we specify wires
then it returns the generated samples for only those wires when using broadcasting"""
obs = qml.measurements.sample(op=None, wires=[1])
dev = mock_qubit_device_with_original_statistics(wires=2)
generated_samples = np.array([[[1, 0], [1, 1]], [[1, 1], [0, 0]], [[0, 1], [1, 0]]])
dev._samples = generated_samples
wire_samples = np.array([[[0], [1]], [[1], [0]], [[1], [0]]])
res = dev.sample(obs)
assert np.array_equal(res, wire_samples)
def test_no_eigval_error(self, mock_qubit_device_with_original_statistics):
"""Tests that an error is thrown if sample is called with an observable
that does not have eigenvalues defined when using broadcasting."""
dev = mock_qubit_device_with_original_statistics()
dev._samples = np.array([[[1, 0], [1, 1]], [[1, 1], [0, 0]], [[0, 1], [1, 0]]])
class MyObs(qml.operation.Observable):
"""Observable with no eigenvalue representation defined."""
num_wires = 1
def eigvals(self):
raise qml.operation.EigvalsUndefinedError
with pytest.raises(qml.operation.EigvalsUndefinedError, match="Cannot compute samples"):
dev.sample(MyObs(wires=[0]))
class TestEstimateProb:
"""Test the estimate_probability method"""
@pytest.mark.parametrize(
"wires, expected", [([0], [0.5, 0.5]), (None, [0.5, 0, 0, 0.5]), ([0, 1], [0.5, 0, 0, 0.5])]
)
def test_estimate_probability(
self, wires, expected, mock_qubit_device_with_original_statistics, monkeypatch
):
"""Tests the estimate_probability method"""
dev = mock_qubit_device_with_original_statistics(wires=2)
samples = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
with monkeypatch.context() as m:
m.setattr(dev, "_samples", samples)
res = dev.estimate_probability(wires=wires)
assert np.allclose(res, expected)
@pytest.mark.parametrize(
"wires, expected",
[
([0], [[0.0, 0.5], [1.0, 0.5]]),
(None, [[0.0, 0.5], [0, 0], [0, 0.5], [1.0, 0]]),
([0, 1], [[0.0, 0.5], [0, 0], [0, 0.5], [1.0, 0]]),
],
)
def test_estimate_probability_with_binsize(
self, wires, expected, mock_qubit_device_with_original_statistics, monkeypatch
):
"""Tests the estimate_probability method with a bin size"""
dev = mock_qubit_device_with_original_statistics(wires=2)
samples = np.array([[1, 1], [1, 1], [1, 0], [0, 0]])
bin_size = 2
with monkeypatch.context() as m:
m.setattr(dev, "_samples", samples)
res = dev.estimate_probability(wires=wires, bin_size=bin_size)
assert np.allclose(res, expected)
@pytest.mark.parametrize(
"wires, expected",
[
([0], [[0.0, 1.0], [0.5, 0.5], [0.25, 0.75]]),
(None, [[0, 0, 0.25, 0.75], [0.5, 0, 0, 0.5], [0.25, 0, 0.25, 0.5]]),
([0, 1], [[0, 0, 0.25, 0.75], [0.5, 0, 0, 0.5], [0.25, 0, 0.25, 0.5]]),
],
)
def test_estimate_probability_with_broadcasting(
self, wires, expected, mock_qubit_device_with_original_statistics, monkeypatch
):
"""Tests the estimate_probability method with parameter broadcasting"""
dev = mock_qubit_device_with_original_statistics(wires=2)
samples = np.array(
[
[[1, 0], [1, 1], [1, 1], [1, 1]],
[[0, 0], [1, 1], [1, 1], [0, 0]],
[[1, 0], [1, 1], [1, 1], [0, 0]],
]
)
with monkeypatch.context() as m:
m.setattr(dev, "_samples", samples)
res = dev.estimate_probability(wires=wires)
assert np.allclose(res, expected)
@pytest.mark.parametrize(
"wires, expected",
[
(
[0],
[
[[0, 0, 0.5], [1, 1, 0.5]],
[[0.5, 0.5, 0], [0.5, 0.5, 1]],
[[0, 0.5, 1], [1, 0.5, 0]],
],
),
(
None,
[
[[0, 0, 0], [0, 0, 0.5], [0.5, 0, 0], [0.5, 1, 0.5]],
[[0.5, 0.5, 0], [0, 0, 0], [0, 0, 0], [0.5, 0.5, 1]],
[[0, 0.5, 0.5], [0, 0, 0.5], [0.5, 0, 0], [0.5, 0.5, 0]],
],
),
(
[0, 1],
[
[[0, 0, 0], [0, 0, 0.5], [0.5, 0, 0], [0.5, 1, 0.5]],
[[0.5, 0.5, 0], [0, 0, 0], [0, 0, 0], [0.5, 0.5, 1]],
[[0, 0.5, 0.5], [0, 0, 0.5], [0.5, 0, 0], [0.5, 0.5, 0]],
],
),
],
)
def test_estimate_probability_with_binsize_with_broadcasting(
self, wires, expected, mock_qubit_device_with_original_statistics, monkeypatch
):
"""Tests the estimate_probability method with a bin size and parameter broadcasting"""
dev = mock_qubit_device_with_original_statistics(wires=2)
bin_size = 2
samples = np.array(
[
[[1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [0, 1]],
[[0, 0], [1, 1], [1, 1], [0, 0], [1, 1], [1, 1]],
[[1, 0], [1, 1], [1, 1], [0, 0], [0, 1], [0, 0]],
]
)
with monkeypatch.context() as m:
m.setattr(dev, "_samples", samples)
res = dev.estimate_probability(wires=wires, bin_size=bin_size)
assert np.allclose(res, expected)
class TestMarginalProb:
"""Test the marginal_prob method"""
# pylint: disable=too-many-arguments
@pytest.mark.parametrize(
"wires, inactive_wires",
[
([0], [1, 2]),
([1], [0, 2]),
([2], [0, 1]),
([0, 1], [2]),
([0, 2], [1]),
([1, 2], [0]),
([0, 1, 2], []),
(Wires([0]), [1, 2]),
(Wires([0, 1]), [2]),
(Wires([0, 1, 2]), []),
],
)
def test_correct_arguments_for_marginals(
self, mock_qubit_device_with_original_statistics, mocker, wires, inactive_wires, tol
):
"""Test that the correct arguments are passed to the marginal_prob method"""
# Generate probabilities
probs = np.array([random() for i in range(2**3)])
probs /= sum(probs)
spy = mocker.spy(np, "sum")
dev = mock_qubit_device_with_original_statistics(wires=3)
_ = dev.marginal_prob(probs, wires=wires)
array_call = spy.call_args[0][0]
axis_call = spy.call_args[1]["axis"]
assert np.allclose(array_call.flatten(), probs, atol=tol, rtol=0)
assert axis_call == tuple(inactive_wires)
marginal_test_data = [
(np.array([0.1, 0.2, 0.3, 0.4]), np.array([0.4, 0.6]), [1]),
(np.array([0.1, 0.2, 0.3, 0.4]), np.array([0.3, 0.7]), Wires([0])),
(
np.array(
[
0.17794671,
0.06184147,
0.21909549,
0.04932204,
0.19595214,
0.19176834,
0.08495311,
0.0191207,
]
),
np.array([0.3970422, 0.28090525, 0.11116351, 0.21088904]),
[2, 0],
),
(
np.array([0.05, 0.07, 0.11, 0.13, 0.17, 0.19, 0.23, 0.05]),
np.array([0.05, 0.11, 0.17, 0.23, 0.07, 0.13, 0.19, 0.05]),
[2, 0, 1],
),
(
np.arange(1, 17) / 136,
np.array([3, 11, 19, 27, 7, 15, 23, 31]) / 136,
[2, 0, 1],
),
]
# pylint: disable=too-many-arguments
@pytest.mark.parametrize("probs, marginals, wires", marginal_test_data)
def test_correct_marginals_returned(
self, mock_qubit_device_with_original_statistics, probs, marginals, wires, tol
):
"""Test that the correct marginals are returned by the marginal_prob method"""
num_wires = int(np.log2(len(probs)))
dev = mock_qubit_device_with_original_statistics(num_wires)
res = dev.marginal_prob(probs, wires=wires)
assert np.allclose(res, marginals, atol=tol, rtol=0)
# pylint: disable=too-many-arguments, unused-argument
@pytest.mark.parametrize("probs, marginals, wires", marginal_test_data)
def test_correct_marginals_returned_wires_none(
self, mock_qubit_device_with_original_statistics, probs, marginals, wires, tol
):
"""Test that passing wires=None simply returns the original probability."""
num_wires = int(np.log2(len(probs)))
dev = mock_qubit_device_with_original_statistics(wires=num_wires)
dev.num_wires = num_wires
res = dev.marginal_prob(probs, wires=None)
assert np.allclose(res, probs, atol=tol, rtol=0)
# Note that the broadcasted probs enter `marginal_probs` as a flattened array
broadcasted_marginal_test_data = [
(
np.array([[0.1, 0.2, 0.3, 0.4], [0.8, 0.02, 0.05, 0.13], [0.6, 0.3, 0.02, 0.08]]),
np.array([[0.4, 0.6], [0.85, 0.15], [0.62, 0.38]]),
[1],
2,
),
(
np.array(
[
[0.17, 0.06, 0.21, 0.04, 0.19, 0.19, 0.08, 0.06],
[0.07, 0.04, 0.11, 0.04, 0.29, 0.04, 0.18, 0.23],
]
),
np.array([[0.38, 0.27, 0.1, 0.25], [0.18, 0.47, 0.08, 0.27]]),
[2, 0],
3,
),
(
np.array(
[
[0.05, 0.07, 0.11, 0.13, 0.17, 0.19, 0.23, 0.05],
np.arange(1, 9) / 36,
]
),
np.array(
[
[0.05, 0.11, 0.17, 0.23, 0.07, 0.13, 0.19, 0.05],
np.array([1, 3, 5, 7, 2, 4, 6, 8]) / 36,
],
),
[2, 0, 1],
3,
),
]
# pylint: disable=too-many-arguments
@pytest.mark.parametrize("probs, marginals, wires, num_wires", broadcasted_marginal_test_data)
def test_correct_broadcasted_marginals_returned(
self,
monkeypatch,
mock_qubit_device_with_original_statistics,
probs,
marginals,
wires,
num_wires,
tol,
):
"""Test that the correct marginals are returned by the marginal_prob method when
broadcasting is used"""
dev = mock_qubit_device_with_original_statistics(num_wires)
with monkeypatch.context() as m:
m.setattr(dev, "_get_batch_size", _working_get_batch_size)
res = dev.marginal_prob(probs, wires=wires)
assert np.allclose(res, marginals, atol=tol, rtol=0)
# pylint: disable=too-many-arguments, unused-argument
@pytest.mark.parametrize("probs, marginals, wires, num_wires", broadcasted_marginal_test_data)
def test_correct_broadcasted_marginals_returned_wires_none(
self, mock_qubit_device_with_original_statistics, probs, marginals, wires, num_wires, tol
):
"""Test that the correct marginals are returned by the marginal_prob method when
broadcasting is used"""
dev = mock_qubit_device_with_original_statistics(num_wires)
res = dev.marginal_prob(probs, wires=None)
assert np.allclose(res, probs.reshape((-1, 2**num_wires)), atol=tol, rtol=0)
# pylint: disable=too-few-public-methods
class TestActiveWires:
"""Test that the active_wires static method works as required."""
def test_active_wires_from_queue(self, mock_qubit_device):
queue = [qml.CNOT(wires=[0, 2]), qml.RX(0.2, wires=0), qml.expval(qml.PauliX(wires=5))]
dev = mock_qubit_device(wires=6)
res = dev.active_wires(queue)
assert res == Wires([0, 2, 5])
# pylint: disable=too-few-public-methods
class TestCapabilities:
"""Test that a default qubit device defines capabilities that all devices inheriting
from it will automatically have."""
def test_defines_correct_capabilities(self):
"""Test that the device defines the right capabilities"""
capabilities = {
"model": "qubit",
"supports_finite_shots": True,
"supports_tensor_observables": True,
"returns_probs": True,
"supports_broadcasting": False,
}
assert capabilities == QubitDevice.capabilities()
class TestNativeMidCircuitMeasurements:
"""Unit tests for mid-circuit measurements related functionality"""
class MCMDevice(qml.devices.DefaultQubitLegacy):
def apply(self, *args, **kwargs):
for op in args[0]:
if isinstance(op, qml.measurements.MidMeasureMP):
kwargs["mid_measurements"][op] = 0
@classmethod
def capabilities(cls):
default_capabilities = copy.copy(qml.devices.DefaultQubitLegacy.capabilities())
default_capabilities["supports_mid_measure"] = True
return default_capabilities
def test_qnode_native_mcm(self, mocker):
"""Tests that the legacy devices may support native MCM execution via the dynamic_one_shot transform."""
dev = self.MCMDevice(wires=1, shots=100)
dev.operations.add("MidMeasureMP")
spy = mocker.spy(qml.dynamic_one_shot, "_transform")
@qml.qnode(dev, interface=None, diff_method=None)
def func():
_ = qml.measure(0)
return qml.expval(op=qml.PauliZ(0))
res = func()
assert spy.call_count == 1
assert isinstance(res, float)
@pytest.mark.parametrize("postselect_mode", ["hw-like", "fill-shots"])
def test_postselect_mode_propagates_to_execute(self, monkeypatch, postselect_mode):
"""Test that the specified postselect mode propagates to execution as expected."""
dev = self.MCMDevice(wires=1, shots=100)
dev.operations.add("MidMeasureMP")
pm_propagated = False
def new_apply(*args, **kwargs): # pylint: disable=unused-argument
nonlocal pm_propagated
pm_propagated = kwargs.get("postselect_mode", -1) == postselect_mode
@qml.qnode(dev, postselect_mode=postselect_mode)
def func():
_ = qml.measure(0, postselect=1)
return qml.expval(op=qml.PauliZ(0))
with monkeypatch.context() as m:
m.setattr(dev, "apply", new_apply)
with pytest.raises(Exception):
# Error expected as mocked apply method does not adhere to expected output.
func()
assert pm_propagated is True
class TestExecution:
"""Tests for the execute method"""
def test_device_executions(self):
"""Test the number of times a qubit device is executed over a QNode's
lifetime is tracked by `num_executions`"""
dev_1 = qml.device("default.mixed", wires=2)
def circuit_1(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node_1 = qml.QNode(circuit_1, dev_1)
num_evals_1 = 10
for _ in range(num_evals_1):
node_1(0.432, 0.12)
assert dev_1.num_executions == num_evals_1
# test a second instance of a default qubit device
dev_2 = qml.device("default.mixed", wires=2)
def circuit_2(x):
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node_2 = qml.QNode(circuit_2, dev_2)
num_evals_2 = 5
for _ in range(num_evals_2):
node_2(0.432)
assert dev_2.num_executions == num_evals_2
# test a new circuit on an existing instance of a qubit device
def circuit_3(y):
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node_3 = qml.QNode(circuit_3, dev_1)
num_evals_3 = 7
for _ in range(num_evals_3):
node_3(0.12)
assert dev_1.num_executions == num_evals_1 + num_evals_3
# pylint: disable=protected-access
def test_get_diagonalizing_gates(self, mock_qubit_device):
"""Test the private _get_diagonalizing_gates helper method."""
circuit = qml.tape.QuantumScript([qml.RX(1, 0)], [qml.probs(), qml.expval(qml.PauliX(0))])
dev = mock_qubit_device(wires=1)
rotations = dev._get_diagonalizing_gates(circuit)
assert len(rotations) == 1
qml.assert_equal(rotations[0], qml.Hadamard(0))
# pylint: disable=too-few-public-methods, unused-argument
class TestExecutionBroadcasted:
"""Tests for the execute method with broadcasted parameters"""
def test_device_executions(self):
"""Test the number of times a qubit device is executed over a QNode's
lifetime is tracked by `num_executions`"""
dev_1 = qml.device("default.mixed", wires=2)
def circuit_1(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node_1 = qml.QNode(circuit_1, dev_1)
num_evals_1 = 10
for _ in range(num_evals_1):
node_1(0.432, np.array([0.12, 0.5, 3.2]))
assert dev_1.num_executions == num_evals_1 * 3
# test a second instance of a default qubit device
dev_2 = qml.device("default.mixed", wires=2)
assert dev_2.num_executions == 0
def circuit_2(x, y):
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node_2 = qml.QNode(circuit_2, dev_2)
num_evals_2 = 5
for _ in range(num_evals_2):
node_2(np.array([0.432, 0.61, 8.2]), 0.12)
assert dev_2.num_executions == num_evals_2 * 3
# test a new circuit on an existing instance of a qubit device
def circuit_3(x, y):
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
node_3 = qml.QNode(circuit_3, dev_1)
num_evals_3 = 7
for _ in range(num_evals_3):
node_3(np.array([0.432, 0.2]), np.array([0.12, 1.214]))
assert dev_1.num_executions == num_evals_1 * 3 + num_evals_3 * 2
class TestBatchExecution:
"""Tests for the batch_execute method."""
with qml.queuing.AnnotatedQueue() as q1:
qml.PauliX(wires=0)
qml.expval(qml.PauliZ(wires=0))
qml.expval(qml.PauliZ(wires=1))
tape1 = QuantumScript.from_queue(q1)
with qml.queuing.AnnotatedQueue() as q2:
qml.PauliX(wires=0)
qml.expval(qml.PauliZ(wires=0))
tape2 = QuantumScript.from_queue(q2)
@pytest.mark.parametrize("n_tapes", [1, 2, 3])
def test_calls_to_execute(self, n_tapes, mocker, mock_qubit_device_with_paulis_and_methods):
"""Tests that the device's execute method is called the correct number of times."""
dev = mock_qubit_device_with_paulis_and_methods(wires=2)
spy = mocker.spy(QubitDevice, "execute")
tapes = [self.tape1] * n_tapes
dev.batch_execute(tapes)
assert spy.call_count == n_tapes
@pytest.mark.parametrize("n_tapes", [1, 2, 3])
def test_calls_to_reset(self, n_tapes, mocker, mock_qubit_device_with_paulis_and_methods):
"""Tests that the device's reset method is called the correct number of times."""
dev = mock_qubit_device_with_paulis_and_methods(wires=2)
spy = mocker.spy(QubitDevice, "reset")
tapes = [self.tape1] * n_tapes
dev.batch_execute(tapes)
assert spy.call_count == n_tapes
@pytest.mark.parametrize("r_dtype", [np.float32, np.float64])
def test_result(self, mock_qubit_device_with_paulis_and_methods, r_dtype, tol):
"""Tests that the result has the correct shape and entry types."""
dev = mock_qubit_device_with_paulis_and_methods(wires=2)
dev.R_DTYPE = r_dtype
tapes = [self.tape1, self.tape2]
res = dev.batch_execute(tapes)
assert len(res) == 2
assert np.allclose(res[0], dev.execute(self.tape1), rtol=tol, atol=0)
assert np.allclose(res[1], dev.execute(self.tape2), rtol=tol, atol=0)
assert res[0][0].dtype == r_dtype
assert res[1].dtype == r_dtype
def test_result_empty_tape(self, mock_qubit_device_with_paulis_and_methods, tol):
"""Tests that the result has the correct shape and entry types for empty tapes."""
dev = mock_qubit_device_with_paulis_and_methods(wires=2)
empty_tape = QuantumScript()
tapes = [empty_tape] * 3
res = dev.batch_execute(tapes)
assert len(res) == 3
assert np.allclose(res[0], dev.execute(empty_tape), rtol=tol, atol=0)
# pylint: disable=too-few-public-methods
class TestGetBatchSize:
"""Tests for the helper method ``_get_batch_size`` of ``QubitDevice``."""
# pylint: disable=protected-access
@pytest.mark.parametrize("shape", [(4, 4), (1, 8), (4,)])
def test_batch_size_always_None(self, mock_qubit_device, shape):
"""Test that QubitDevice always reports a batch_size of None."""
dev = mock_qubit_device()
tensor0 = np.ones(shape, dtype=complex)
assert dev._get_batch_size(tensor0, shape, qml.math.prod(shape)) is None
tensor1 = np.arange(np.prod(shape)).reshape(shape)
assert dev._get_batch_size(tensor1, shape, qml.math.prod(shape)) is None
broadcasted_shape = (1,) + shape
tensor0 = np.ones(broadcasted_shape, dtype=complex)
assert (
dev._get_batch_size(tensor0, broadcasted_shape, qml.math.prod(broadcasted_shape))
is None
)
tensor1 = np.arange(np.prod(broadcasted_shape)).reshape(broadcasted_shape)
assert (
dev._get_batch_size(tensor1, broadcasted_shape, qml.math.prod(broadcasted_shape))
is None
)
broadcasted_shape = (3,) + shape
tensor0 = np.ones(broadcasted_shape, dtype=complex)
assert (
dev._get_batch_size(tensor0, broadcasted_shape, qml.math.prod(broadcasted_shape))
is None
)
tensor1 = np.arange(np.prod(broadcasted_shape)).reshape(broadcasted_shape)
assert (
dev._get_batch_size(tensor1, broadcasted_shape, qml.math.prod(broadcasted_shape))
is None
)
class TestResourcesTracker:
"""Test that the tracker can track resources and is integrated well with default qubit"""
qs_shots_wires_data = (
(qml.tape.QuantumScript([qml.Hadamard(0), qml.CNOT([0, 1])]), None, [0, 1]),
(qml.tape.QuantumScript([qml.PauliZ(0), qml.CNOT([0, 1]), qml.RX(1.23, 2)]), 10, [0, 1, 2]),
(
qml.tape.QuantumScript(
[
qml.Hadamard(0),
qml.RX(1.23, 1),
qml.CNOT([0, 1]),
qml.RX(4.56, 1),
qml.Hadamard(0),
qml.Hadamard(1),
],
),
(10, 10, 50),
[0, 1],
),
)
expected_resources = (
Resources(2, 2, {"Hadamard": 1, "CNOT": 1}, {1: 1, 2: 1}, 2, Shots(None)),
Resources(3, 3, {"PauliZ": 1, "CNOT": 1, "RX": 1}, {1: 2, 2: 1}, 2, Shots(10)),
Resources(2, 6, {"Hadamard": 3, "RX": 2, "CNOT": 1}, {1: 5, 2: 1}, 4, Shots((10, 10, 50))),
) # Resources(wires, gates, gate_types, gate_sizes, depth, shots)
devices = (
"default.qubit.legacy",
"default.qubit.autograd",
"default.qubit.jax",
"default.qubit.torch",
"default.qubit.tf",
)
@pytest.mark.all_interfaces
@pytest.mark.parametrize("dev_name", devices)
@pytest.mark.parametrize(
"qs_shots_wires, expected_resource", zip(qs_shots_wires_data, expected_resources)
)
def test_tracker_single_execution(self, dev_name, qs_shots_wires, expected_resource):
"""Test that the tracker accurately tracks resources in a single execution"""
qs, shots, wires = qs_shots_wires
qs._shots = qml.measurements.Shots(shots)
dev = qml.device(dev_name, shots=shots, wires=wires)
with qml.Tracker(dev) as tracker:
dev.execute(qs)
assert len(tracker.history["resources"]) == 1 # single execution
assert tracker.history["resources"][0] == expected_resource
@pytest.mark.all_interfaces
@pytest.mark.parametrize("dev_name", devices)
def test_tracker_multi_execution(self, dev_name):
"""Test that the tracker accurately tracks resources for multi executions"""
qs1 = qml.tape.QuantumScript([qml.Hadamard(0), qml.CNOT([0, 1])])
qs2 = qml.tape.QuantumScript([qml.PauliZ(0), qml.CNOT([0, 1]), qml.RX(1.23, 2)])
exp_res1 = Resources(2, 2, {"Hadamard": 1, "CNOT": 1}, {1: 1, 2: 1}, 2, Shots(10))
exp_res2 = Resources(3, 3, {"PauliZ": 1, "CNOT": 1, "RX": 1}, {1: 2, 2: 1}, 2, Shots(10))
dev = qml.device(dev_name, shots=10, wires=[0, 1, 2])
with qml.Tracker(dev) as tracker:
dev.batch_execute([qs1])
dev.batch_execute([qs1, qs2])
assert tracker.totals == {"batches": 2, "executions": 3, "shots": 30, "batch_len": 3}
assert len(tracker.history["resources"]) == 3 # 1 per qscript execution
for tracked_r, expected_r in zip(
tracker.history["resources"], [exp_res1, exp_res1, exp_res2]
):
assert tracked_r == expected_r
@pytest.mark.autograd
def test_tracker_grad(self):
"""Test that the tracker can track resources through a gradient computation"""
dev = qml.device("default.qubit", wires=1, shots=100)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0) # 2 term parameter shift
return qml.expval(qml.PauliZ(0))
x = pnp.array(0.1, requires_grad=True)
expected_resources = Resources(
num_wires=1,
num_gates=1,
gate_types={"RX": 1},
gate_sizes={1: 1},
shots=Shots(100),
depth=1,
)
with qml.Tracker(dev) as tracker:
qml.grad(circuit)(x)
assert tracker.totals["executions"] == 3
assert len(tracker.history["resources"]) == 3
assert tracker.history["resources"] == [
expected_resources,
expected_resources,
expected_resources,
]
class TestSamplesToCounts:
"""Tests for correctness of QubitDevice._samples_to_counts"""
def test_samples_to_counts_with_nan(self):
"""Test that the counts function disregards failed measurements (samples including
NaN values) when totalling counts"""
# generate 1000 samples for 2 wires, randomly distributed between 0 and 1
device = qml.device("default.mixed", wires=2, shots=1000)
sv = [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j]
device.target_device._state = np.outer(sv, sv)
device.target_device._samples = device.generate_samples()
samples = device.sample(qml.measurements.CountsMP())
# imitate hardware return with NaNs (requires dtype float)
samples = qml.math.cast_like(samples, np.array([1.2]))
samples[0][0] = np.NaN
samples[17][1] = np.NaN
samples[850][0] = np.NaN
result = device._samples_to_counts(samples, mp=qml.measurements.CountsMP(), num_wires=2)
# no keys with NaNs
assert len(result) == 4
assert set(result.keys()) == {"00", "01", "10", "11"}
# # NaNs were not converted into "0", but were excluded from the counts
total_counts = sum(result.values())
assert total_counts == 997
@pytest.mark.parametrize("all_outcomes", [True, False])
def test_samples_to_counts_with_many_wires(self, all_outcomes):
"""Test that the counts function correctly converts wire samples to strings when
the number of wires is 8 or more."""
# generate 1000 samples for 10 wires, randomly distributed between 0 and 1
n_wires = 10
shots = 100
device = qml.device("default.mixed", wires=n_wires, shots=shots)
sv = np.random.rand(*([2] * n_wires))
state = sv / np.linalg.norm(sv)
device.target_device._state = np.outer(state, state)
device.target_device._samples = device.generate_samples()
samples = device.sample(qml.measurements.CountsMP(all_outcomes=all_outcomes))
result = device._samples_to_counts(
samples, mp=qml.measurements.CountsMP(), num_wires=n_wires
)
# Check that keys are correct binary strings
assert all(0 <= int(sample, 2) <= 2**n_wires for sample in result.keys())
# # NaNs were not converted into "0", but were excluded from the counts
total_counts = sum(result.values())
assert total_counts == shots
|
pennylane/tests/test_qubit_device.py/0
|
{
"file_path": "pennylane/tests/test_qubit_device.py",
"repo_id": "pennylane",
"token_count": 28373
}
| 93 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the ``batch_inputs`` transform.
"""
# pylint: disable=too-few-public-methods,no-value-for-parameter,comparison-with-callable
from functools import partial
import pytest
import pennylane as qml
from pennylane import numpy as np
def test_simple_circuit():
"""Test that batching works for a simple circuit"""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(inputs, weights):
qml.RY(weights[0], wires=0)
qml.AngleEmbedding(inputs, wires=range(2), rotation="Y")
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(1))
batch_size = 5
inputs = np.random.uniform(0, np.pi, (batch_size, 2), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
res = circuit(inputs, weights)
assert res.shape == (batch_size,)
def test_simple_circuit_one_batch():
"""Test that batching works for a simple circuit when the batch size is 1"""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(inputs, weights):
qml.RY(weights[0], wires=0)
qml.AngleEmbedding(inputs, wires=range(2), rotation="Y")
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(1))
batch_size = 1
inputs = np.random.uniform(0, np.pi, (batch_size, 2), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
res = circuit(inputs, weights)
assert res.shape == (batch_size,)
def test_simple_circuit_with_prep():
"""Test that batching works for a simple circuit with a state preparation"""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=1)
@qml.qnode(dev)
def circuit(inputs, weights):
qml.StatePrep(np.array([0, 0, 1, 0]), wires=[0, 1])
qml.RX(inputs, wires=0)
qml.RY(weights[0], wires=0)
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(1))
batch_size = 5
inputs = np.random.uniform(0, np.pi, (batch_size,), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
res = circuit(inputs, weights)
assert res.shape == (batch_size,)
def test_circuit_non_param_operator_before_batched_operator():
"""Test a circuit where a non-parametric operation is located before a batched operator."""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev)
def circuit(input):
qml.CNOT(wires=[0, 1])
qml.RY(input, wires=1)
qml.RX(0.1, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
input = np.linspace(0.1, 0.5, batch_size, requires_grad=False)
res = circuit(input)
assert res.shape == (batch_size,)
def test_value_error():
"""Test if the batch_input raises relevant errors correctly"""
dev = qml.device("default.qubit", wires=2)
class Embedding(qml.AngleEmbedding):
"""Variant of qml.AngleEmbedding that does not provide fixed
``ndim_params`` in order to allow for the detection of inconsistent
batching in ``batch_input``."""
@property
def ndim_params(self):
return self._ndim_params
@partial(qml.batch_input, argnum=[0, 2])
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(input1, input2, weights):
Embedding(input1, wires=range(2), rotation="Y")
qml.RY(weights[0], wires=0)
qml.RY(input2[0], wires=0)
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(1))
batch_size = 5
input1 = np.random.uniform(0, np.pi, (batch_size, 2), requires_grad=False)
input2 = np.random.uniform(0, np.pi, (4, 1), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
with pytest.raises(ValueError, match="Batch dimension for all gate arguments"):
circuit(input1, input2, weights)
def test_batch_input_with_trainable_parameters_raises_error():
"""Test that using the batch_input method with trainable parameters raises a ValueError."""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev)
def circuit(input):
qml.RY(input, wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(0.1, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
input = np.linspace(0.1, 0.5, batch_size, requires_grad=True)
with pytest.raises(
ValueError,
match="Batched inputs must be non-trainable."
+ " Please make sure that the parameters indexed by "
+ "'argnum' are not marked as trainable.",
):
circuit(input)
def test_mottonenstate_preparation(mocker):
"""Test that batching works for MottonenStatePreparation"""
dev = qml.device("default.qubit", wires=3)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, interface="autograd")
def circuit(data, weights):
qml.templates.MottonenStatePreparation(data, wires=[0, 1, 2])
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.probs(wires=[0, 1, 2])
batch_size = 3
# create a batched input statevector
data = np.random.random((batch_size, 2**3), requires_grad=False)
data /= np.linalg.norm(data, axis=1).reshape(-1, 1) # normalize
# weights is not batched
weights = np.random.random((10, 3, 3), requires_grad=True)
spy = mocker.spy(circuit.device, "execute")
res = circuit(data, weights)
assert res.shape == (batch_size, 2**3)
assert len(spy.call_args[0][0]) == batch_size
# check the results against individually executed circuits (no batching)
@qml.qnode(dev)
def circuit2(data, weights):
qml.templates.MottonenStatePreparation(data, wires=[0, 1, 2])
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.probs(wires=[0, 1, 2])
indiv_res = []
for state in data:
indiv_res.append(circuit2(state, weights))
assert np.allclose(res, indiv_res)
def test_basis_state_preparation(mocker):
"""Test that batching works for BasisStatePreparation"""
dev = qml.device("default.qubit", wires=3)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, interface="autograd")
def circuit(data, weights):
qml.templates.BasisStatePreparation(data, wires=[0, 1, 2])
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.probs(wires=[0, 1, 2])
batch_size = 3
# create a batched input statevector
data = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], requires_grad=False)
# weights is not batched
weights = np.random.random((10, 3, 3), requires_grad=True)
spy = mocker.spy(circuit.device, "execute")
res = circuit(data, weights)
assert res.shape == (batch_size, 2**3)
assert len(spy.call_args[0][0]) == batch_size
# check the results against individually executed circuits (no batching)
@qml.qnode(dev)
def circuit2(data, weights):
qml.templates.BasisStatePreparation(data, wires=[0, 1, 2])
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.probs(wires=[0, 1, 2])
indiv_res = []
for state in data:
indiv_res.append(circuit2(state, weights))
assert np.allclose(res, indiv_res)
def test_qubit_state_prep(mocker):
"""Test that batching works for StatePrep"""
dev = qml.device("default.qubit", wires=3)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, interface="autograd")
def circuit(data, weights):
qml.StatePrep(data, wires=[0, 1, 2])
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.probs(wires=[0, 1, 2])
batch_size = 3
# create a batched input statevector
data = np.random.random((batch_size, 2**3), requires_grad=False)
data /= np.linalg.norm(data, axis=1).reshape(-1, 1) # normalize
# weights is not batched
weights = np.random.random((10, 3, 3), requires_grad=True)
spy = mocker.spy(circuit.device, "execute")
res = circuit(data, weights)
assert res.shape == (batch_size, 2**3)
assert len(spy.call_args[0][0]) == batch_size
# check the results against individually executed circuits (no batching)
@qml.qnode(dev)
def circuit2(data, weights):
qml.StatePrep(data, wires=[0, 1, 2])
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.probs(wires=[0, 1, 2])
indiv_res = []
for state in data:
indiv_res.append(circuit2(state, weights))
assert np.allclose(res, indiv_res)
def test_multi_returns():
"""Test that batching works for a simple circuit with multiple returns"""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(inputs, weights):
qml.RY(weights[0], wires=0)
qml.AngleEmbedding(inputs, wires=range(2), rotation="Y")
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(1)), qml.probs(wires=[0, 1])
batch_size = 6
inputs = np.random.uniform(0, np.pi, (batch_size, 2), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
res = circuit(inputs, weights)
assert isinstance(res, tuple)
assert len(res) == 2
assert res[0].shape == (batch_size,)
assert res[1].shape == (batch_size, 4)
def test_shot_vector():
"""Test that batching works for a simple circuit with a shot vector"""
dev = qml.device("default.qubit", wires=2, shots=(100, (200, 3), 300))
@partial(qml.batch_input, argnum=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(inputs, weights):
qml.RY(weights[0], wires=0)
qml.AngleEmbedding(inputs, wires=range(2), rotation="Y")
qml.RY(weights[1], wires=1)
return qml.probs(wires=[0, 1])
batch_size = 6
inputs = np.random.uniform(0, np.pi, (batch_size, 2), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
res = circuit(inputs, weights)
assert isinstance(res, tuple)
assert len(res) == 5
# pylint:disable=not-an-iterable
assert all(shot_res.shape == (batch_size, 4) for shot_res in res)
def test_multi_returns_shot_vector():
"""Test that batching works for a simple circuit with multiple returns
and with a shot vector"""
dev = qml.device("default.qubit", wires=2, shots=(100, (200, 3), 300))
@partial(qml.batch_input, argnum=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(inputs, weights):
qml.RY(weights[0], wires=0)
qml.AngleEmbedding(inputs, wires=range(2), rotation="Y")
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(1)), qml.probs(wires=[0, 1])
batch_size = 6
inputs = np.random.uniform(0, np.pi, (batch_size, 2), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
res = circuit(inputs, weights)
assert isinstance(res, tuple)
assert len(res) == 5
assert all(isinstance(shot_res, tuple) for shot_res in res)
assert all(len(shot_res) == 2 for shot_res in res)
assert all(shot_res[0].shape == (batch_size,) for shot_res in res)
assert all(shot_res[1].shape == (batch_size, 4) for shot_res in res)
class TestDiffSingle:
"""Test gradients for a single measurement"""
@pytest.mark.autograd
@pytest.mark.parametrize("diff_method", ["backprop", "adjoint", "parameter-shift"])
def test_autograd(self, diff_method, tol):
"""Test derivatives when using autograd"""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method)
def circuit(input, x):
qml.RY(input, wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
def cost(input, x):
return np.sum(circuit(input, x))
input = np.linspace(0.1, 0.5, batch_size, requires_grad=False)
x = np.array(0.1, requires_grad=True)
res = qml.grad(cost)(input, x)
expected = -np.sin(0.1) * sum(np.sin(input))
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.jax
@pytest.mark.parametrize("diff_method", ["backprop", "adjoint", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "jax"])
def test_jax(self, diff_method, tol, interface):
"""Test derivatives when using JAX"""
import jax
import jax.numpy as jnp
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
def cost(input, x):
return jnp.sum(circuit(input, x))
input = jnp.linspace(0.1, 0.5, batch_size)
x = jnp.array(0.1)
res = jax.grad(cost, argnums=1)(input, x)
expected = -np.sin(0.1) * sum(np.sin(input))
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.jax
@pytest.mark.parametrize("diff_method", ["adjoint", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "jax", "jax-jit"])
def test_jax_jit(self, diff_method, tol, interface):
"""Test derivatives when using JAX"""
import jax
import jax.numpy as jnp
jax.config.update("jax_enable_x64", True)
dev = qml.device("default.qubit", wires=2)
@jax.jit
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
def cost(input, x):
return jnp.sum(circuit(input, x))
input = jnp.linspace(0.1, 0.5, batch_size)
x = jnp.array(0.1)
res = jax.grad(cost, argnums=1)(input, x)
expected = -np.sin(0.1) * sum(np.sin(input))
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.torch
@pytest.mark.parametrize("diff_method", ["backprop", "adjoint", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "torch"])
def test_torch(self, diff_method, tol, interface):
"""Test derivatives when using torch"""
import torch
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
def cost(input, x):
return torch.sum(circuit(input, x))
input = torch.linspace(0.1, 0.5, batch_size, requires_grad=False)
x = torch.tensor(0.1, requires_grad=True)
loss = cost(input, x)
loss.backward()
res = x.grad
expected = -np.sin(0.1) * torch.sum(torch.sin(input))
assert qml.math.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.tf
@pytest.mark.parametrize("diff_method", ["backprop", "adjoint", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "tf"])
def test_tf(self, diff_method, tol, interface):
"""Test derivatives when using TF"""
import tensorflow as tf
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
input = tf.Variable(np.linspace(0.1, 0.5, batch_size), trainable=False)
x = tf.Variable(0.1, trainable=True)
with tf.GradientTape() as tape:
loss = tf.reduce_sum(circuit(input, x))
res = tape.gradient(loss, x)
expected = -np.sin(0.1) * tf.reduce_sum(tf.sin(input))
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.tf
@pytest.mark.parametrize("diff_method", ["backprop", "adjoint", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "tf", "tf-autograph"])
def test_tf_autograph(self, diff_method, tol, interface):
"""Test derivatives when using TF and autograph"""
import tensorflow as tf
dev = qml.device("default.qubit", wires=2)
@tf.function
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=1)
qml.CNOT(wires=[0, 1])
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
batch_size = 3
input = tf.Variable(np.linspace(0.1, 0.5, batch_size), trainable=False)
x = tf.Variable(0.1, trainable=True, dtype=tf.float64)
with tf.GradientTape() as tape:
loss = tf.reduce_sum(circuit(input, x))
res = tape.gradient(loss, x)
expected = -np.sin(0.1) * tf.reduce_sum(tf.sin(input))
assert np.allclose(res, expected, atol=tol, rtol=0)
class TestDiffMulti:
"""Test gradients for multiple measurements"""
@pytest.mark.autograd
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift"])
def test_autograd(self, diff_method, tol):
"""Test derivatives when using autograd"""
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method)
def circuit(input, x):
qml.RY(input, wires=0)
qml.RY(x, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.probs(wires=[0, 1])
def cost(input, x):
res = circuit(input, x)
return qml.math.concatenate([qml.math.expand_dims(res[0], 1), res[1]], axis=1)
batch_size = 3
input = np.linspace(0.1, 0.5, batch_size, requires_grad=False)
x = np.array(0.1, requires_grad=True)
res = cost(input, x)
expected = qml.math.transpose(
qml.math.stack(
[
np.cos(input + x),
np.cos((input + x) / 2) ** 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin((input + x) / 2) ** 2,
]
)
)
assert qml.math.allclose(res, expected, atol=tol)
grad = qml.jacobian(lambda x: cost(input, x))(x)
expected = qml.math.transpose(
qml.math.stack(
[
-np.sin(input + x),
-np.sin(input + x) / 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin(input + x) / 2,
]
)
)
assert qml.math.allclose(grad, expected, atol=tol, rtol=0)
@pytest.mark.jax
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "jax"])
def test_jax(self, diff_method, tol, interface):
"""Test derivatives when using JAX"""
import jax
import jax.numpy as jnp
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=0)
qml.RY(x, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.probs(wires=[0, 1])
batch_size = 3
input = jnp.linspace(0.1, 0.5, batch_size)
x = jnp.array(0.1)
res = circuit(input, x)
expected = (
jnp.cos(input + x),
qml.math.transpose(
qml.math.stack(
[
np.cos((input + x) / 2) ** 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin((input + x) / 2) ** 2,
]
)
),
)
assert isinstance(res, tuple)
assert len(res) == 2
for r, exp in zip(res, expected):
assert qml.math.allclose(r, exp, atol=tol)
grad = jax.jacobian(circuit, argnums=1)(input, x)
expected = (
-jnp.sin(input + x),
qml.math.transpose(
qml.math.stack(
[
-jnp.sin(input + x) / 2,
jnp.zeros_like(input),
jnp.zeros_like(input),
jnp.sin(input + x) / 2,
]
)
),
)
assert isinstance(grad, tuple)
assert len(grad) == 2
for g, exp in zip(grad, expected):
assert qml.math.allclose(g, exp, atol=tol, rtol=0)
@pytest.mark.jax
@pytest.mark.parametrize("diff_method", ["parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "jax", "jax-jit"])
def test_jax_jit(self, diff_method, tol, interface):
"""Test derivatives when using JAX and jitting"""
import jax
import jax.numpy as jnp
jax.config.update("jax_enable_x64", True)
dev = qml.device("default.qubit", wires=2)
@jax.jit
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=0)
qml.RY(x, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.probs(wires=[0, 1])
batch_size = 3
input = jnp.linspace(0.1, 0.5, batch_size)
x = jnp.array(0.1)
res = circuit(input, x)
expected = (
jnp.cos(input + x),
qml.math.transpose(
qml.math.stack(
[
np.cos((input + x) / 2) ** 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin((input + x) / 2) ** 2,
]
)
),
)
assert isinstance(res, tuple)
assert len(res) == 2
for r, exp in zip(res, expected):
assert qml.math.allclose(r, exp, atol=tol)
grad = jax.jacobian(circuit, argnums=1)(input, x)
expected = (
-jnp.sin(input + x),
qml.math.transpose(
qml.math.stack(
[
-jnp.sin(input + x) / 2,
jnp.zeros_like(input),
jnp.zeros_like(input),
jnp.sin(input + x) / 2,
]
)
),
)
assert isinstance(grad, tuple)
assert len(grad) == 2
for g, exp in zip(grad, expected):
assert qml.math.allclose(g, exp, atol=tol, rtol=0)
@pytest.mark.torch
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "torch"])
def test_torch(self, diff_method, tol, interface):
"""Test derivatives when using torch"""
import torch
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=0)
qml.RY(x, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.probs(wires=[0, 1])
batch_size = 3
input = torch.tensor(np.linspace(0.1, 0.5, batch_size), requires_grad=False)
x = torch.tensor(0.1, requires_grad=True)
res = circuit(input, x)
expected = (
torch.cos(input + x),
qml.math.transpose(
qml.math.stack(
[
torch.cos((input + x) / 2) ** 2,
torch.zeros_like(input),
torch.zeros_like(input),
torch.sin((input + x) / 2) ** 2,
]
)
),
)
assert isinstance(res, tuple)
assert len(res) == 2
for r, exp in zip(res, expected):
assert qml.math.allclose(r, exp, atol=tol)
grad = torch.autograd.functional.jacobian(lambda x: circuit(input, x), x)
expected = (
-torch.sin(input + x),
qml.math.transpose(
qml.math.stack(
[
-torch.sin(input + x) / 2,
torch.zeros_like(input),
torch.zeros_like(input),
torch.sin(input + x) / 2,
]
)
),
)
assert isinstance(grad, tuple)
assert len(grad) == 2
for g, exp in zip(grad, expected):
assert qml.math.allclose(g, exp, atol=tol, rtol=0)
@pytest.mark.tf
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "tf"])
def test_tf(self, diff_method, tol, interface):
"""Test derivatives when using TF"""
import tensorflow as tf
dev = qml.device("default.qubit", wires=2)
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=0)
qml.RY(x, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.probs(wires=[0, 1])
batch_size = 3
input = tf.Variable(np.linspace(0.1, 0.5, batch_size), trainable=False)
x = tf.Variable(0.1, trainable=True, dtype=tf.float64)
with tf.GradientTape() as tape:
res = circuit(input, x)
res = qml.math.concatenate([qml.math.expand_dims(res[0], 1), res[1]], axis=1)
expected = qml.math.transpose(
qml.math.stack(
[
np.cos(input + x),
np.cos((input + x) / 2) ** 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin((input + x) / 2) ** 2,
]
)
)
assert qml.math.allclose(res, expected, atol=tol)
grad = tape.jacobian(res, x)
expected = qml.math.transpose(
qml.math.stack(
[
-np.sin(input + x),
-np.sin(input + x) / 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin(input + x) / 2,
]
)
)
assert qml.math.allclose(grad, expected, atol=tol, rtol=0)
@pytest.mark.tf
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift"])
@pytest.mark.parametrize("interface", ["auto", "tf", "tf-autograph"])
def test_tf_autograph(self, diff_method, tol, interface):
"""Test derivatives when using TF and autograph"""
import tensorflow as tf
dev = qml.device("default.qubit", wires=2)
@tf.function
@partial(qml.batch_input, argnum=0)
@qml.qnode(dev, diff_method=diff_method, interface=interface)
def circuit(input, x):
qml.RY(input, wires=0)
qml.RY(x, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.probs(wires=[0, 1])
batch_size = 3
input = tf.Variable(np.linspace(0.1, 0.5, batch_size), trainable=False)
x = tf.Variable(0.1, trainable=True, dtype=tf.float64)
with tf.GradientTape() as tape:
res = circuit(input, x)
res = qml.math.concatenate([qml.math.expand_dims(res[0], 1), res[1]], axis=1)
expected = qml.math.transpose(
qml.math.stack(
[
np.cos(input + x),
np.cos((input + x) / 2) ** 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin((input + x) / 2) ** 2,
]
)
)
assert qml.math.allclose(res, expected, atol=tol)
grad = tape.jacobian(res, x)
expected = qml.math.transpose(
qml.math.stack(
[
-np.sin(input + x),
-np.sin(input + x) / 2,
np.zeros_like(input),
np.zeros_like(input),
np.sin(input + x) / 2,
]
)
)
assert qml.math.allclose(grad, expected, atol=tol, rtol=0)
def test_unbatched_not_copied():
"""Test that operators containing unbatched parameters are not copied"""
batch_size = 5
inputs = np.random.uniform(0, np.pi, (batch_size, 2), requires_grad=False)
weights = np.random.uniform(-np.pi, np.pi, (2,))
ops = [
qml.RY(weights[0], wires=0),
qml.AngleEmbedding(inputs, wires=range(2), rotation="Y"),
qml.RY(weights[1], wires=1),
]
meas = [qml.expval(qml.PauliZ(1))]
tape = qml.tape.QuantumScript(ops, meas)
tape.trainable_params = [0, 2]
new_tapes = qml.batch_input(tape, argnum=1)[0]
assert len(new_tapes) == batch_size
for new_tape in new_tapes:
# same instance of RY operators
assert new_tape.operations[0] is tape.operations[0]
assert new_tape.operations[2] is tape.operations[2]
# different instance of AngleEmbedding
assert new_tape.operations[1] is not tape.operations[1]
|
pennylane/tests/transforms/test_batch_input.py/0
|
{
"file_path": "pennylane/tests/transforms/test_batch_input.py",
"repo_id": "pennylane",
"token_count": 15427
}
| 94 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the optimization transform ``commute_controlled``.
"""
import pytest
from utils import check_matrix_equivalence, compare_operation_lists
import pennylane as qml
from pennylane import numpy as np
from pennylane.transforms.optimization import commute_controlled
from pennylane.wires import Wires
class TestCommuteControlled:
"""Tests for single-qubit gates being pushed through controlled gates."""
def test_invalid_direction(self):
"""Test that any direction other than 'left' or 'right' raises an error."""
def qfunc():
qml.PauliX(wires=2)
qml.CNOT(wires=[0, 2])
qml.RX(0.2, wires=2)
transformed_qfunc = commute_controlled(qfunc, direction="sideways")
with pytest.raises(ValueError, match="must be 'left' or 'right'"):
qml.tape.make_qscript(transformed_qfunc)()
@pytest.mark.parametrize("direction", [("left"), ("right")])
def test_gate_with_no_basis(self, direction):
"""Test that gates with no basis specified are ignored."""
def qfunc():
qml.PauliX(wires=2)
qml.ControlledQubitUnitary(np.array([[0, 1], [1, 0]]), control_wires=0, wires=2)
qml.PauliX(wires=2)
transformed_qfunc = commute_controlled(qfunc, direction=direction)
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["PauliX", "ControlledQubitUnitary", "PauliX"]
wires_expected = [Wires(2), Wires([0, 2]), Wires(2)]
compare_operation_lists(ops, names_expected, wires_expected)
@pytest.mark.parametrize("direction", [("left"), ("right")])
def test_gate_blocked_different_basis(self, direction):
"""Test that gates do not get pushed through controlled gates whose target bases don't match."""
def qfunc():
qml.PauliZ(wires="b")
qml.CNOT(wires=[2, "b"])
qml.PauliY(wires="b")
transformed_qfunc = commute_controlled(qfunc, direction=direction)
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["PauliZ", "CNOT", "PauliY"]
wires_expected = [Wires("b"), Wires([2, "b"]), Wires("b")]
compare_operation_lists(ops, names_expected, wires_expected)
def test_push_x_gates_right(self):
"""Test that X-basis gates before controlled-X-type gates on targets get pushed ahead."""
def qfunc():
qml.PauliX(wires=2)
qml.CNOT(wires=[0, 2])
qml.RX(0.2, wires=2)
qml.Toffoli(wires=[0, 1, 2])
qml.SX(wires=1)
qml.PauliX(wires=1)
qml.CRX(0.1, wires=[0, 1])
transformed_qfunc = commute_controlled(qfunc)
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["CNOT", "Toffoli", "PauliX", "RX", "CRX", "SX", "PauliX"]
wires_expected = [
Wires([0, 2]),
Wires([0, 1, 2]),
Wires(2),
Wires(2),
Wires([0, 1]),
Wires(1),
Wires(1),
]
compare_operation_lists(ops, names_expected, wires_expected)
def test_push_x_gates_left(self):
"""Test that X-basis gates after controlled-X-type gates on targets get pushed back."""
def qfunc():
qml.CNOT(wires=[0, 2])
qml.PauliX(wires=2)
qml.RX(0.2, wires=2)
qml.Toffoli(wires=[0, 1, 2])
qml.CRX(0.1, wires=[0, 1])
qml.SX(wires=1)
qml.PauliX(wires=1)
transformed_qfunc = commute_controlled(qfunc, direction="left")
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = [
"PauliX",
"RX",
"CNOT",
"Toffoli",
"SX",
"PauliX",
"CRX",
]
wires_expected = [
Wires(2),
Wires(2),
Wires([0, 2]),
Wires([0, 1, 2]),
Wires(1),
Wires(1),
Wires([0, 1]),
]
compare_operation_lists(ops, names_expected, wires_expected)
@pytest.mark.parametrize("direction", [("left"), ("right")])
def test_dont_push_x_gates(self, direction):
"""Test that X-basis gates before controlled-X-type gates on controls don't get pushed."""
def qfunc():
qml.PauliX(wires="a")
qml.CNOT(wires=["a", "c"])
qml.RX(0.2, wires="a")
qml.Toffoli(wires=["c", "a", "b"])
transformed_qfunc = commute_controlled(qfunc, direction=direction)
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["PauliX", "CNOT", "RX", "Toffoli"]
wires_expected = [Wires("a"), Wires(["a", "c"]), Wires("a"), Wires(["c", "a", "b"])]
compare_operation_lists(ops, names_expected, wires_expected)
def test_push_y_gates_right(self):
"""Test that Y-basis gates before controlled-Y-type gates on targets get pushed ahead."""
def qfunc():
qml.PauliY(wires=2)
qml.CRY(-0.5, wires=["a", 2])
qml.CNOT(wires=[1, 2])
qml.RY(0.3, wires=1)
qml.CY(wires=["a", 1])
transformed_qfunc = commute_controlled(qfunc)
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["CRY", "PauliY", "CNOT", "CY", "RY"]
wires_expected = [Wires(["a", 2]), Wires(2), Wires([1, 2]), Wires(["a", 1]), Wires(1)]
compare_operation_lists(ops, names_expected, wires_expected)
def test_push_y_gates_left(self):
"""Test that Y-basis gates after controlled-Y-type gates on targets get pushed behind."""
def qfunc():
qml.CRY(-0.5, wires=["a", 2])
qml.PauliY(wires=2)
qml.CNOT(wires=[1, 2])
qml.CY(wires=["a", 1])
qml.RY(0.3, wires=1)
transformed_qfunc = commute_controlled(qfunc, direction="left")
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["PauliY", "CRY", "CNOT", "RY", "CY"]
wires_expected = [Wires(2), Wires(["a", 2]), Wires([1, 2]), Wires(1), Wires(["a", 1])]
compare_operation_lists(ops, names_expected, wires_expected)
@pytest.mark.parametrize("direction", [("left"), ("right")])
def test_dont_push_y_gates(self, direction):
"""Test that Y-basis gates next to controlled-Y-type gates on controls don't get pushed."""
def qfunc():
qml.CRY(-0.2, wires=["a", 2])
qml.PauliY(wires="a")
qml.CNOT(wires=[1, 2])
qml.CY(wires=["a", 1])
qml.RY(0.3, wires="a")
transformed_qfunc = commute_controlled(qfunc, direction=direction)
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["CRY", "PauliY", "CNOT", "CY", "RY"]
wires_expected = [Wires(["a", 2]), Wires("a"), Wires([1, 2]), Wires(["a", 1]), Wires("a")]
compare_operation_lists(ops, names_expected, wires_expected)
def test_push_z_gates_right(self):
"""Test that Z-basis gates before controlled-Z-type gates on controls *and* targets get pushed ahead."""
def qfunc():
qml.PauliZ(wires=2)
qml.S(wires=0)
qml.CZ(wires=[0, 2])
qml.CNOT(wires=[0, 1])
qml.PhaseShift(0.2, wires=2)
qml.T(wires=0)
qml.PauliZ(wires=0)
qml.CRZ(0.5, wires=[0, 1])
transformed_qfunc = commute_controlled(qfunc)
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["CZ", "PauliZ", "CNOT", "PhaseShift", "CRZ", "S", "T", "PauliZ"]
wires_expected = (
[Wires([0, 2]), Wires(2), Wires([0, 1]), Wires(2)] + [Wires([0, 1])] + [Wires(0)] * 3
)
compare_operation_lists(ops, names_expected, wires_expected)
def test_push_z_gates_left(self):
"""Test that Z-basis after before controlled-Z-type gates on controls *and*
targets get pushed behind."""
def qfunc():
qml.CZ(wires=[0, 2])
qml.PauliZ(wires=2)
qml.S(wires=0)
qml.CNOT(wires=[0, 1])
qml.CRZ(0.5, wires=[0, 1])
qml.RZ(0.2, wires=2)
qml.T(wires=0)
qml.PauliZ(wires=0)
transformed_qfunc = commute_controlled(qfunc, direction="left")
ops = qml.tape.make_qscript(transformed_qfunc)().operations
names_expected = ["PauliZ", "S", "RZ", "T", "PauliZ", "CZ", "CNOT", "CRZ"]
wires_expected = [Wires(2), Wires(0), Wires(2), Wires(0), Wires(0), Wires([0, 2])] + [
Wires([0, 1])
] * 2
compare_operation_lists(ops, names_expected, wires_expected)
@pytest.mark.parametrize("direction", [("left"), ("right")])
def test_push_mixed_with_matrix(self, direction):
"""Test that arbitrary gates after controlled gates on controls *and*
targets get properly pushed."""
# pylint:disable=too-many-function-args
def qfunc():
qml.PauliX(wires=1)
qml.S(wires=0)
qml.CZ(wires=[0, 1])
qml.CNOT(wires=[1, 0])
qml.PauliY(wires=1)
qml.CRY(0.5, wires=[1, 0])
qml.PhaseShift(0.2, wires=0)
qml.PauliY(wires=1)
qml.T(wires=0)
qml.CRZ(-0.3, wires=[0, 1])
qml.RZ(0.2, wires=0)
qml.PauliZ(wires=0)
qml.PauliX(wires=1)
qml.CRY(0.2, wires=[1, 0])
transformed_qfunc = commute_controlled(qfunc, direction=direction)
original_ops = qml.tape.make_qscript(qfunc)().operations
transformed_ops = qml.tape.make_qscript(transformed_qfunc)().operations
assert len(original_ops) == len(transformed_ops)
# Compare matrices
compute_matrix = qml.matrix(qfunc, [0, 1])
matrix_expected = compute_matrix()
compute_transformed_matrix = qml.matrix(transformed_qfunc, [0, 1])
matrix_obtained = compute_transformed_matrix()
assert check_matrix_equivalence(matrix_expected, matrix_obtained)
# Example QNode and device for interface testing
dev = qml.device("default.qubit", wires=3)
def qfunc_all_ops(theta):
qml.PauliX(wires=2)
qml.S(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliY(wires=1)
qml.CRY(theta[0], wires=[2, 1])
qml.PhaseShift(theta[1], wires=0)
qml.T(wires=0)
qml.Toffoli(wires=[0, 1, 2])
return qml.expval(qml.PauliZ(0))
transformed_qfunc_all_ops = commute_controlled(qfunc_all_ops)
expected_op_list = ["PauliX", "CNOT", "CRY", "PauliY", "Toffoli", "S", "PhaseShift", "T"]
expected_wires_list = [
Wires(2),
Wires([0, 1]),
Wires([2, 1]),
Wires(1),
Wires([0, 1, 2]),
Wires(0),
Wires(0),
Wires(0),
]
class TestCommuteControlledInterfaces:
"""Test that single-qubit gates can be pushed through controlled gates in all interfaces."""
@pytest.mark.autograd
def test_commute_controlled_autograd(self):
"""Test QNode and gradient in autograd interface."""
original_qnode = qml.QNode(qfunc_all_ops, dev)
transformed_qnode = qml.QNode(transformed_qfunc_all_ops, dev)
input = np.array([0.1, 0.2], requires_grad=True)
# Check that the numerical output is the same
assert qml.math.allclose(original_qnode(input), transformed_qnode(input))
# Check that the gradient is the same
assert qml.math.allclose(
qml.grad(original_qnode)(input), qml.grad(transformed_qnode)(input)
)
# Check operation list
ops = transformed_qnode.qtape.operations
compare_operation_lists(ops, expected_op_list, expected_wires_list)
@pytest.mark.torch
def test_commute_controlled_torch(self):
"""Test QNode and gradient in torch interface."""
import torch
original_qnode = qml.QNode(qfunc_all_ops, dev)
transformed_qnode = qml.QNode(transformed_qfunc_all_ops, dev)
original_input = torch.tensor([1.2, -0.35], requires_grad=True)
transformed_input = torch.tensor([1.2, -0.35], requires_grad=True)
original_result = original_qnode(original_input)
transformed_result = transformed_qnode(transformed_input)
# Check that the numerical output is the same
assert qml.math.allclose(original_result, transformed_result)
# Check that the gradient is the same
original_result.backward()
transformed_result.backward()
assert qml.math.allclose(original_input.grad, transformed_input.grad)
# Check operation list
ops = transformed_qnode.qtape.operations
compare_operation_lists(ops, expected_op_list, expected_wires_list)
@pytest.mark.tf
def test_commute_controlled_tf(self):
"""Test QNode and gradient in tensorflow interface."""
import tensorflow as tf
original_qnode = qml.QNode(qfunc_all_ops, dev)
transformed_qnode = qml.QNode(transformed_qfunc_all_ops, dev)
original_input = tf.Variable([0.8, -0.6])
transformed_input = tf.Variable([0.8, -0.6])
original_result = original_qnode(original_input)
transformed_result = transformed_qnode(transformed_input)
# Check that the numerical output is the same
assert qml.math.allclose(original_result, transformed_result)
# Check that the gradient is the same
with tf.GradientTape() as tape:
loss = original_qnode(original_input)
original_grad = tape.gradient(loss, original_input)
with tf.GradientTape() as tape:
loss = transformed_qnode(transformed_input)
transformed_grad = tape.gradient(loss, transformed_input)
assert qml.math.allclose(original_grad, transformed_grad)
# Check operation list
ops = transformed_qnode.qtape.operations
compare_operation_lists(ops, expected_op_list, expected_wires_list)
@pytest.mark.jax
def test_commute_controlled_jax(self):
"""Test QNode and gradient in JAX interface."""
import jax
from jax import numpy as jnp
original_qnode = qml.QNode(qfunc_all_ops, dev)
transformed_qnode = qml.QNode(transformed_qfunc_all_ops, dev)
input = jnp.array([0.3, 0.4], dtype=jnp.float64)
# Check that the numerical output is the same
assert qml.math.allclose(original_qnode(input), transformed_qnode(input))
# Check that the gradient is the same
assert qml.math.allclose(
jax.grad(original_qnode)(input), jax.grad(transformed_qnode)(input)
)
# Check operation list
ops = transformed_qnode.qtape.operations
compare_operation_lists(ops, expected_op_list, expected_wires_list)
### Tape
with qml.queuing.AnnotatedQueue() as q:
qml.PauliX(wires=2)
qml.CNOT(wires=[0, 2])
qml.RX(0.2, wires=2)
qml.Toffoli(wires=[0, 1, 2])
qml.SX(wires=1)
qml.PauliX(wires=1)
qml.CRX(0.1, wires=[0, 1])
tape_circuit = qml.tape.QuantumTape.from_queue(q)
### QFunc
def qfunc_circuit():
"""Qfunc circuit"""
qml.PauliX(wires=2)
qml.CNOT(wires=[0, 2])
qml.RX(0.2, wires=2)
qml.Toffoli(wires=[0, 1, 2])
qml.SX(wires=1)
qml.PauliX(wires=1)
qml.CRX(0.1, wires=[0, 1])
### QNode
dev = qml.devices.DefaultQubit()
@qml.qnode(device=dev)
def qnode_circuit():
qml.PauliX(wires=2)
qml.CNOT(wires=[0, 2])
qml.RX(0.2, wires=2)
qml.Toffoli(wires=[0, 1, 2])
qml.SX(wires=1)
qml.PauliX(wires=1)
qml.CRX(0.1, wires=[0, 1])
return qml.expval(qml.PauliY(1) @ qml.PauliZ(2))
class TestTransformDispatch:
"""Test commute controlled on tape, qfunc and QNode."""
def test_tape(self):
"""Test the transform on tape."""
tapes, _ = commute_controlled(tape_circuit)
assert len(tapes) == 1
tape = tapes[0]
assert len(tape.operations) == 7
names_expected = ["CNOT", "Toffoli", "PauliX", "RX", "CRX", "SX", "PauliX"]
wires_expected = [
Wires([0, 2]),
Wires([0, 1, 2]),
Wires(2),
Wires(2),
Wires([0, 1]),
Wires(1),
Wires(1),
]
compare_operation_lists(tape.operations, names_expected, wires_expected)
def test_qfunc(self):
"""Test the transform on a qfunc inside a qnode."""
@qml.qnode(device=dev)
def new_circuit():
commute_controlled(qfunc_circuit)()
return qml.expval(qml.PauliX(0) @ qml.PauliX(2))
new_circuit()
assert len(new_circuit.tape.operations) == 7
names_expected = ["CNOT", "Toffoli", "PauliX", "RX", "CRX", "SX", "PauliX"]
wires_expected = [
Wires([0, 2]),
Wires([0, 1, 2]),
Wires(2),
Wires(2),
Wires([0, 1]),
Wires(1),
Wires(1),
]
compare_operation_lists(new_circuit.tape.operations, names_expected, wires_expected)
def test_qnode(self):
"""Test the transform on a qnode directly."""
transformed_qnode = commute_controlled(qnode_circuit)
assert not transformed_qnode.transform_program.is_empty()
assert len(transformed_qnode.transform_program) == 1
res = transformed_qnode()
expected = qnode_circuit()
assert np.allclose(res, expected)
|
pennylane/tests/transforms/test_optimization/test_commute_controlled.py/0
|
{
"file_path": "pennylane/tests/transforms/test_optimization/test_commute_controlled.py",
"repo_id": "pennylane",
"token_count": 8593
}
| 95 |
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the `pennylane.transforms.zx` folder.
"""
import sys
from functools import partial
import numpy as np
import pytest
import pennylane as qml
from pennylane.tape import QuantumScript
from pennylane.transforms import TransformError
pyzx = pytest.importorskip("pyzx")
pytestmark = pytest.mark.external
supported_operations = [
qml.PauliX(wires=0),
qml.PauliZ(wires=0),
qml.Hadamard(wires=0),
qml.S(wires=0),
qml.T(wires=0),
qml.SWAP(wires=[0, 1]),
qml.CNOT(wires=[0, 1]),
qml.CZ(wires=[0, 1]),
qml.CH(wires=[0, 1]),
]
supported_operations_params = [
qml.RX(0.3, wires=0),
qml.RZ(0.3, wires=0),
qml.CRZ(0.3, wires=[0, 1]),
]
expanded_operations = [qml.PauliY(wires=0), qml.PhaseShift(0.3, wires=0), qml.RY(0.3, wires=0)]
non_diagram_like_operations = [qml.CCZ(wires=[0, 1, 2]), qml.Toffoli(wires=[0, 1, 2])]
decompose_phases = [True, False]
qscript = [True, False]
def test_import_pyzx(monkeypatch):
"""Test if an ImportError is raised by to_zx function."""
with monkeypatch.context() as m:
m.setitem(sys.modules, "pyzx", None)
with pytest.raises(ImportError, match="This feature requires PyZX."):
qml.transforms.to_zx(qml.PauliX(wires=0))
with pytest.raises(ImportError, match="This feature requires PyZX."):
qml.transforms.to_zx(QuantumScript([qml.PauliX(wires=0), qml.PauliZ(wires=1)]))
class TestConvertersZX:
"""Test converters to_zx and from_zx."""
def test_invalid_argument(self):
"""Assert error raised when input is neither a tape, QNode, nor quantum function"""
with pytest.raises(
TransformError,
match="Input is not an Operator, tape, QNode, or quantum function",
):
_ = qml.transforms.to_zx(None)
@pytest.mark.parametrize("script", qscript)
@pytest.mark.parametrize("operation", supported_operations)
def test_supported_operation_no_params(self, operation, script):
"""Test to convert the script to a ZX graph and back for supported operations."""
I = qml.math.eye(2 ** len(operation.wires))
if script:
qs = QuantumScript([operation])
else:
qs = operation
matrix_qscript = qml.matrix(qs, wire_order=qs.wires)
zx_g = qml.transforms.to_zx(qs)
matrix_zx = zx_g.to_matrix()
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
if not np.allclose(mat_product[0, 0], 1.0):
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
qscript_back = qml.transforms.from_zx(zx_g)
assert isinstance(qscript_back, qml.tape.QuantumScript)
matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))
# Remove global phase
if not np.allclose(mat_product[0, 0], 1.0):
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
@pytest.mark.parametrize("script", qscript)
@pytest.mark.parametrize("operation", supported_operations_params)
def test_supported_operation_params(self, operation, script):
"""Test to convert the script to a ZX graph and back for supported operations with parameters."""
if script:
qs = QuantumScript([operation])
else:
qs = operation
I = qml.math.eye(2 ** len(operation.wires))
matrix_qscript = qml.matrix(qs, wire_order=qs.wires)
zx_g = qml.transforms.to_zx(qs)
matrix_zx = zx_g.to_matrix()
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
qscript_back = qml.transforms.from_zx(zx_g)
assert isinstance(qscript_back, qml.tape.QuantumScript)
matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
@pytest.mark.parametrize("script", qscript)
@pytest.mark.parametrize("operation", expanded_operations)
def test_operation_need_expansion(self, operation, script):
"""Test to convert the script to a ZX graph and back for operations that needs expansions."""
if script:
qs = QuantumScript([operation])
else:
qs = operation
I = qml.math.eye(2 ** len(operation.wires))
matrix_qscript = qml.matrix(qs, wire_order=qs.wires)
zx_g = qml.transforms.to_zx(qs)
matrix_zx = zx_g.to_matrix()
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
qscript_back = qml.transforms.from_zx(zx_g)
assert isinstance(qscript_back, qml.tape.QuantumScript)
matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
@pytest.mark.parametrize("operation", non_diagram_like_operations)
def test_non_diagram_like_op(self, operation):
"""Test operations that result in a non diagram like circuit."""
I = qml.math.eye(2 ** len(operation.wires))
qs = QuantumScript([operation], [])
matrix_qscript = qml.matrix(qs, wire_order=qs.wires)
zx_g = qml.transforms.to_zx(qs)
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
matrix_zx = zx_g.to_matrix()
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
with pytest.raises(qml.QuantumFunctionError, match="Graph doesn't seem circuit like"):
qml.transforms.from_zx(zx_g)
@pytest.mark.parametrize("decompose", decompose_phases)
def test_circuit(self, decompose):
"""Test a simple circuit."""
I = qml.math.eye(2**2)
operations = [
qml.RZ(5 / 4 * np.pi, wires=0),
qml.RZ(3 / 4 * np.pi, wires=1),
qml.PauliY(wires=1),
qml.RX(0.1, wires=0),
qml.PauliZ(wires=0),
qml.RY(0.2, wires=1),
qml.RZ(0.3, wires=1),
qml.PauliX(wires=1),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 0]),
qml.SWAP(wires=[0, 1]),
]
qs = QuantumScript(operations, [])
zx_g = qml.transforms.to_zx(qs)
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
matrix_qscript = qml.matrix(qs, wire_order=qs.wires)
matrix_zx = zx_g.to_matrix()
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
qscript_back = qml.transforms.from_zx(zx_g, decompose_phases=decompose)
assert isinstance(qscript_back, qml.tape.QuantumScript)
matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
def test_circuit_mod_5_4(self):
"""Test the circuit mod 5 4."""
operations = [
qml.PauliX(wires=4),
qml.Hadamard(wires=4),
qml.CNOT(wires=[3, 4]),
qml.CNOT(wires=[0, 4]),
qml.T(wires=4),
qml.CNOT(wires=[3, 4]),
qml.adjoint(qml.T)(wires=4),
qml.CNOT(wires=[0, 4]),
qml.CNOT(wires=[0, 3]),
qml.adjoint(qml.T)(wires=3),
qml.CNOT(wires=[0, 3]),
qml.CNOT(wires=[3, 4]),
qml.CNOT(wires=[2, 4]),
qml.adjoint(qml.T)(wires=4),
qml.CNOT(wires=[3, 4]),
qml.T(wires=4),
qml.CNOT(wires=[2, 4]),
qml.CNOT(wires=[2, 3]),
qml.T(wires=3),
qml.CNOT(wires=[2, 3]),
qml.Hadamard(wires=4),
qml.CNOT(wires=[3, 4]),
qml.Hadamard(wires=4),
qml.CNOT(wires=[2, 4]),
qml.adjoint(qml.T)(wires=4),
qml.CNOT(wires=[1, 4]),
qml.T(wires=4),
qml.CNOT(wires=[2, 4]),
qml.adjoint(qml.T)(wires=4),
qml.CNOT(wires=[1, 4]),
qml.T(wires=4),
qml.CNOT(wires=[1, 2]),
qml.adjoint(qml.T)(wires=2),
qml.CNOT(wires=[1, 2]),
qml.Hadamard(wires=4),
qml.CNOT(wires=[2, 4]),
qml.Hadamard(wires=4),
qml.CNOT(wires=[1, 4]),
qml.T(wires=4),
qml.CNOT(wires=[0, 4]),
qml.adjoint(qml.T)(wires=4),
qml.CNOT(wires=[1, 4]),
qml.T(wires=4),
qml.CNOT(wires=[0, 4]),
qml.adjoint(qml.T)(wires=4),
qml.CNOT(wires=[0, 1]),
qml.T(wires=1),
qml.CNOT(wires=[0, 1]),
qml.Hadamard(wires=4),
qml.CNOT(wires=[1, 4]),
qml.CNOT(wires=[0, 4]),
]
qs = QuantumScript(operations, [])
zx_g = qml.transforms.to_zx(qs)
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
matrix_qscript = qml.matrix(qs, wire_order=qs.wires)
matrix_zx = zx_g.to_matrix()
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
mat_product /= mat_product[0, 0]
I = qml.math.eye(2**5)
assert qml.math.allclose(mat_product, I)
qscript_back = qml.transforms.from_zx(zx_g)
assert isinstance(qscript_back, qml.tape.QuantumScript)
matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
def test_expand_measurements(self):
"""Test with expansion of measurements."""
I = qml.math.eye(2**2)
operations = [
qml.RX(0.1, wires=0),
qml.PauliZ(wires=0),
qml.RZ(0.3, wires=1),
qml.PauliX(wires=1),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 0]),
qml.SWAP(wires=[0, 1]),
]
measurements = [qml.expval(qml.PauliZ(0) @ qml.PauliX(1))]
qs = QuantumScript(operations, measurements)
zx_g = qml.transforms.to_zx(qs, expand_measurements=True)
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
# Add rotation Hadamard because of PauliX
operations.append(qml.Hadamard(wires=[1]))
operations_with_rotations = operations
qscript_with_rot = QuantumScript(operations_with_rotations, [])
matrix_qscript = qml.matrix(qscript_with_rot, wire_order=[0, 1])
matrix_zx = zx_g.to_matrix()
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
qscript_back = qml.transforms.from_zx(zx_g)
assert isinstance(qscript_back, qml.tape.QuantumScript)
matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
def test_embeddings(self):
"""Test with expansion of prep."""
I = qml.math.eye(2**2)
prep = [qml.AngleEmbedding(features=[1, 2], wires=range(2), rotation="Z")]
operations = [
qml.RX(0.1, wires=0),
qml.PauliZ(wires=0),
qml.RZ(0.3, wires=1),
qml.PauliX(wires=1),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 0]),
qml.SWAP(wires=[0, 1]),
]
qs = QuantumScript(prep + operations, [])
zx_g = qml.transforms.to_zx(qs)
assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)
matrix_qscript = qml.matrix(qs, wire_order=qs.wires)
matrix_zx = zx_g.to_matrix()
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
qscript_back = qml.transforms.from_zx(zx_g)
assert isinstance(qscript_back, qml.tape.QuantumScript)
matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))
# Check whether the two matrices are each others conjugate transposes
mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))
# Remove global phase
mat_product /= mat_product[0, 0]
assert qml.math.allclose(mat_product, I)
def test_no_decomposition(self):
"""Cross qubit connections is not diagram-like."""
graph = pyzx.Graph(None)
q_mapper = pyzx.circuit.gates.TargetMapper()
inputs = []
# Create the qubits in the graph and the qubit mapper
vertex = graph.add_vertex(pyzx.VertexType.BOUNDARY, 0, 0)
inputs.append(vertex)
q_mapper.set_prev_vertex(0, vertex)
q_mapper.set_next_row(0, 1)
q_mapper.set_qubit(0, 0)
# Cross qubit connection
r = q_mapper.next_row(0)
v1 = graph.add_vertex(pyzx.VertexType.Z, q_mapper.to_qubit(0), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(0), v1), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(0, v1)
q_mapper.set_qubit(1, 1)
q_mapper.set_next_row(1, r + 1)
q_mapper.set_next_row(0, r + 1)
r = max(q_mapper.next_row(1), q_mapper.next_row(0))
v2 = graph.add_vertex(pyzx.VertexType.Z, q_mapper.to_qubit(1), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(0), v2), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(0, v2)
q_mapper.set_next_row(1, r + 1)
q_mapper.set_next_row(0, r + 1)
r = max(q_mapper.next_row(1), q_mapper.next_row(0))
graph.add_edge((v1, v2), edgetype=pyzx.EdgeType.SIMPLE)
q_mapper.set_next_row(1, r + 1)
q_mapper.set_next_row(0, r + 1)
graph.scalar.add_power(1)
outputs = []
graph.set_inputs(tuple(inputs))
graph.set_outputs(tuple(outputs))
with pytest.raises(
qml.QuantumFunctionError,
match="Cross qubit connections, the graph is not circuit-like.",
):
qml.transforms.from_zx(graph)
def test_no_suitable_decomposition(self):
"""Test that an error is raised when no suitable decomposition is found."""
operations = [qml.sum(qml.PauliX(0), qml.PauliZ(0))]
qs = QuantumScript(operations, [])
with pytest.raises(
qml.QuantumFunctionError,
match="The expansion of the quantum tape failed, PyZX does not support",
):
qml.transforms.to_zx(qs)
def test_same_type_nodes_simple_edge(self):
"""Test that a Green-Green nodes with simple edge has no corresponding circuit."""
graph = pyzx.Graph(None)
q_mapper = pyzx.circuit.gates.TargetMapper()
c_mapper = pyzx.circuit.gates.TargetMapper()
inputs = []
# Create the qubits in the graph and the qubit mapper
for i in range(2):
vertex = graph.add_vertex(pyzx.VertexType.BOUNDARY, i, 0)
inputs.append(vertex)
q_mapper.set_prev_vertex(i, vertex)
q_mapper.set_next_row(i, 1)
q_mapper.set_qubit(i, i)
# Create Green Green with simple Edge
r = max(q_mapper.next_row(1), q_mapper.next_row(0))
v1 = graph.add_vertex(pyzx.VertexType.Z, q_mapper.to_qubit(1), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(1), v1), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(1, v1)
v2 = graph.add_vertex(pyzx.VertexType.Z, q_mapper.to_qubit(0), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(0), v2), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(0, v2)
graph.add_edge((v1, v2), edgetype=pyzx.EdgeType.SIMPLE)
q_mapper.set_next_row(1, r + 1)
q_mapper.set_next_row(0, r + 1)
graph.scalar.add_power(1)
row = max(q_mapper.max_row(), c_mapper.max_row())
outputs = []
for mapper in (q_mapper, c_mapper):
for label in mapper.labels():
qubit = mapper.to_qubit(label)
vertex = graph.add_vertex(pyzx.VertexType.BOUNDARY, qubit, row)
outputs.append(vertex)
pre_vertex = mapper.prev_vertex(label)
graph.add_edge(graph.edge(pre_vertex, vertex))
graph.set_inputs(tuple(inputs))
graph.set_outputs(tuple(outputs))
with pytest.raises(
qml.QuantumFunctionError,
match="Two green or respectively two red nodes connected by a ",
):
qml.transforms.from_zx(graph)
def test_different_type_node_hadamard_edge(self):
"""Test that a Green-Red nodes with Hadamard edge has no corresponding circuit."""
graph = pyzx.Graph(None)
q_mapper = pyzx.circuit.gates.TargetMapper()
c_mapper = pyzx.circuit.gates.TargetMapper()
inputs = []
# Create the qubits in the graph and the qubit mapper
for i in range(2):
vertex = graph.add_vertex(pyzx.VertexType.BOUNDARY, i, 0)
inputs.append(vertex)
q_mapper.set_prev_vertex(i, vertex)
q_mapper.set_next_row(i, 1)
q_mapper.set_qubit(i, i)
# Create Green Red with Hadamard Edge
r = max(q_mapper.next_row(1), q_mapper.next_row(0))
v1 = graph.add_vertex(pyzx.VertexType.Z, q_mapper.to_qubit(1), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(1), v1), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(1, v1)
v2 = graph.add_vertex(pyzx.VertexType.X, q_mapper.to_qubit(0), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(0), v2), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(0, v2)
graph.add_edge((v1, v2), edgetype=pyzx.EdgeType.HADAMARD)
q_mapper.set_next_row(1, r + 1)
q_mapper.set_next_row(0, r + 1)
graph.scalar.add_power(1)
row = max(q_mapper.max_row(), c_mapper.max_row())
outputs = []
for mapper in (q_mapper, c_mapper):
for label in mapper.labels():
qubit = mapper.to_qubit(label)
vertex = graph.add_vertex(pyzx.VertexType.BOUNDARY, qubit, row)
outputs.append(vertex)
pre_vertex = mapper.prev_vertex(label)
graph.add_edge(graph.edge(pre_vertex, vertex))
graph.set_inputs(tuple(inputs))
graph.set_outputs(tuple(outputs))
with pytest.raises(
qml.QuantumFunctionError,
match="A green and red node connected by a Hadamard edge ",
):
qml.transforms.from_zx(graph)
def test_cx_gate(self):
"""Test that CX node is converted to the right tape"""
graph = pyzx.Graph(None)
q_mapper = pyzx.circuit.gates.TargetMapper()
c_mapper = pyzx.circuit.gates.TargetMapper()
inputs = []
# Create the qubits in the graph and the qubit mapper
for i in range(2):
vertex = graph.add_vertex(pyzx.VertexType.BOUNDARY, i, 0)
inputs.append(vertex)
q_mapper.set_prev_vertex(i, vertex)
q_mapper.set_next_row(i, 1)
q_mapper.set_qubit(i, i)
# Create Green Red with Hadamard Edge
r = max(q_mapper.next_row(1), q_mapper.next_row(0))
v1 = graph.add_vertex(pyzx.VertexType.X, q_mapper.to_qubit(1), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(1), v1), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(1, v1)
v2 = graph.add_vertex(pyzx.VertexType.X, q_mapper.to_qubit(0), r)
graph.add_edge(graph.edge(q_mapper.prev_vertex(0), v2), pyzx.EdgeType.SIMPLE)
q_mapper.set_prev_vertex(0, v2)
graph.add_edge((v1, v2), edgetype=pyzx.EdgeType.HADAMARD)
q_mapper.set_next_row(1, r + 1)
q_mapper.set_next_row(0, r + 1)
graph.scalar.add_power(1)
row = max(q_mapper.max_row(), c_mapper.max_row())
outputs = []
for mapper in (q_mapper, c_mapper):
for label in mapper.labels():
qubit = mapper.to_qubit(label)
vertex = graph.add_vertex(pyzx.VertexType.BOUNDARY, qubit, row)
outputs.append(vertex)
pre_vertex = mapper.prev_vertex(label)
graph.add_edge(graph.edge(pre_vertex, vertex))
graph.set_inputs(tuple(inputs))
graph.set_outputs(tuple(outputs))
tape = qml.transforms.from_zx(graph)
expected_op = [qml.Hadamard(wires=[1]), qml.CNOT(wires=[1, 0]), qml.Hadamard(wires=[1])]
for op, op_ex in zip(tape.operations, expected_op):
qml.assert_equal(op, op_ex)
def test_qnode_decorator(self):
"""Test the QNode decorator."""
dev = qml.device("default.qubit", wires=2)
@partial(qml.transforms.to_zx, expand_measurements=True)
@qml.qnode(device=dev)
def circuit(p):
qml.RZ(p[0], wires=1)
qml.RZ(p[1], wires=1)
qml.RX(p[2], wires=0)
qml.PauliZ(wires=0)
qml.RZ(p[3], wires=1)
qml.PauliX(wires=1)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 0])
qml.SWAP(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
params = [5 / 4 * np.pi, 3 / 4 * np.pi, 0.1, 0.3]
g = circuit(params)
assert isinstance(g, pyzx.graph.graph_s.GraphS)
def test_qnode_decorator_no_params(self):
"""Test the QNode decorator."""
dev = qml.device("default.qubit", wires=2)
@partial(qml.transforms.to_zx, expand_measurements=True)
@qml.qnode(device=dev)
def circuit():
qml.PauliZ(wires=0)
qml.PauliX(wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
g = circuit()
assert isinstance(g, pyzx.graph.graph_s.GraphS)
|
pennylane/tests/transforms/test_zx.py/0
|
{
"file_path": "pennylane/tests/transforms/test_zx.py",
"repo_id": "pennylane",
"token_count": 12476
}
| 96 |
# Contributing Guide
To contribute to Qiskit Experiments, first read the overall [Qiskit project contributing
guidelines](https://github.com/Qiskit/qiskit/blob/main/CONTRIBUTING.md). In addition
to the general guidelines, the specific guidelines for contributing to Qiskit
Experiments are documented below.
Contents:
- [Contributing Guide](#contributing-guide)
- [Proposing a new experiment](#proposing-a-new-experiment)
- [Choosing an issue to work on](#choosing-an-issue-to-work-on)
- [Pull request checklist](#pull-request-checklist)
- [Testing your code](#testing-your-code)
- [STDOUT/STDERR and logging capture](#stdoutstderr-and-logging-capture)
- [Other testing related settings](#other-testing-related-settings)
- [Code style](#code-style)
- [Changelog generation](#changelog-generation)
- [Release notes](#release-notes)
- [Adding a new release note](#adding-a-new-release-note)
- [Linking to issues](#linking-to-issues)
- [Generating release notes](#generating-release-notes)
- [Documentation](#documentation)
- [Updating the documentation](#updating-the-documentation)
- [Building documentation locally](#building-documentation-locally)
- [Deprecation policy](#deprecation-policy)
- [Adding deprecation warnings](#adding-deprecation-warnings)
- [Development cycle](#development-cycle)
- [Branches](#branches)
- [Release cycle](#release-cycle)
### Proposing a new experiment
We welcome suggestions for new experiments to be added to Qiskit Experiments. Good
candidates for experiments should be either be well-known or based upon a research paper
or equivalent source, with a use case that is of interest to the Qiskit and quantum
experimentalist community.
If there is an experiment you would like to see added, you can propose it by creating a
[new experiment proposal
issue](https://github.com/Qiskit-Community/qiskit-experiments/issues/new?assignees=&labels=enhancement&template=NEW_EXPERIMENT.md&title=)
in GitHub. The issue template will ask you to fill in details about the experiment type,
protocol, analysis, and implementation, which will give us the necessary information to
decide whether the experiment is feasible to implement and useful to include in our
package library.
### Choosing an issue to work on
We use the following labels to help non-maintainers find issues best suited to their
interests and experience level:
* [good first
issue](https://github.com/Qiskit-Community/qiskit-experiments/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
- these issues are typically the simplest available to work on, perfect for newcomers.
They should already be fully scoped, with a clear approach outlined in the
descriptions.
* [help
wanted](https://github.com/Qiskit-Community/qiskit-experiments/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
- these issues are generally more complex than good first issues. They typically cover
work that core maintainers don't currently have capacity to implement and may require
more investigation/discussion. These are a great option for experienced contributors
looking for something a bit more challenging.
### Pull request checklist
When submitting a pull request for review, please ensure that:
1. The code follows the code style of the project and successfully passes the tests.
2. The API documentation has been updated accordingly.
3. You have updated the relevant documentation or written new docs. In case the PR needs
to be merged without delay (e.g. for a high priority fix), open an issue for updating
or adding the documentation later.
4. You've added tests that cover the changes you've made, if relevant.
5. If your change has an end user facing impact (new feature, deprecation, removal,
etc.), you've added or updated a reno release note for that change and tagged the PR
for the changelog.
6. If your code requires a change to dependencies, you've updated the corresponding
requirements file: `requirements.txt` for core dependencies,
`requirements-extras.txt` for dependencies for optional features, and `requirements-dev.txt`
for dependencies required for running tests and building documentation.
The sections below go into more detail on the guidelines for each point.
### Testing your code
It is important to verify that your code changes don't break any existing tests and that
any new tests you've added also run successfully. Before you open a new pull request for
your change, you'll want to run the test suite locally.
The easiest way to run the test suite is to use
[**tox**](https://tox.readthedocs.io/en/latest/#). You can install tox with pip: `pip
install -U tox`. Tox provides several advantages, but the biggest one is that it builds
an isolated virtualenv for running tests. This means it does not pollute your system
python when running. Additionally, the environment that tox sets up matches the CI
environment more closely and it runs the tests in parallel (resulting in much faster
execution). To run tests on all installed supported python versions and lint/style
checks you can simply run `tox`. Or if you just want to run the tests once for a
specific python version such as 3.10: `tox -epy310`.
If you just want to run a subset of tests you can pass a selection regex to the test
runner. For example, if you want to run all tests that have "dag" in the test id you can
run: `tox -- dag`. You can pass arguments directly to the test runner after the bare
`--`. To see all the options on test selection you can refer to the stestr manual:
https://stestr.readthedocs.io/en/stable/MANUAL.html#test-selection
If you want to run a single test module, test class, or individual test method you can
do this faster with the `-n`/`--no-discover` option. For example, to run a module:
```
tox -epy310 -- -n test.framework.test_composite
```
To run a class:
```
tox -epy310 -- -n test.framework.test_composite.TestCompositeExperimentData
```
To run a method:
```
tox -epy310 -- -n test.framework.test_composite.TestCompositeExperimentData.test_composite_save_load
```
Note that tests will fail automatically if they do not finish execution within 60 seconds.
#### STDOUT/STDERR and logging capture
When running tests in parallel using `stestr` either via tox
or in CI, we set the env variable `QISKIT_TEST_CAPTURE_STREAMS`, which will
capture any text written to stdout, stderr, and log messages and add them as attachments
to the tests run so output can be associated with the test case it originated from.
However, if you run tests with `stestr` outside of these mechanisms, by default the
streams are not captured. To enable stream capture, just set the
`QISKIT_TEST_CAPTURE_STREAMS` env variable to `1`. If this environment variable is set
outside of running with `stestr`, the streams (STDOUT, STDERR, and logging) will still
be captured but **not** displayed in the test runners output. If you are using the
stdlib unittest runner, a similar result can be accomplished by using the
[`--buffer`](https://docs.python.org/3/library/unittest.html#command-line-options)
option (e.g. `python -m unittest discover --buffer ./test/python`).
#### Other testing related settings
The test code defines some environment variables that may occasionally be useful to set:
+ `TEST_TIMEOUT`: An integer representing the maximum time a test can take
before it is considered a failure.
+ `QE_USE_TESTTOOLS`: Set this variable to `FALSE`, `0`, or `NO` to have the
tests use `unittest.TestCase` as the base class. Otherwise, the default is
`testtools.TestCase` which is an extension of `unittest.TestCase`. In some
situations, a developer may wish to use a workflow that is not compatible with
the `testtools` extensions.
### Code style
The qiskit-experiments repository uses `black` for code formatting and style and
`pylint` for linting. You can run these checks locally with
```
tox -elint
```
If there is a code formatting issue identified by black you can just run ``black``
locally to fix this (or ``tox -eblack`` which will install it and run it).
Because `pylint` analysis can be slow, there is also a `tox -elint-incr` target, which
only applies `pylint` to files which have changed from the source github. On rare
occasions this will miss some issues that would have been caught by checking the
complete source tree, but makes up for this by being much faster (and those rare
oversights will still be caught by the CI after you open a pull request).
### Changelog generation
The changelog is automatically generated as part of the release process automation. This
works through a combination of the git log and the pull request. When a release is
tagged and pushed to github the release automation bot looks at all commit messages from
the git log for the release. It takes the PR numbers from the git log (assuming a squash
merge) and checks if that PR had a `Changelog:` label on it. If there is a label it will
add the git commit message summary line from the git log for the release to the
changelog.
If there are multiple `Changelog:` tags on a PR, the git commit message summary line
from the git log will be used for each changelog category tagged.
The current categories for each label are as follows:
| PR Label | Changelog Category |
| ---------------------- | ------------------ |
| Changelog: Deprecation | Deprecated |
| Changelog: New Feature | Added |
| Changelog: API Change | Changed |
| Changelog: Removal | Removed |
| Changelog: Bugfix | Fixed |
### Release notes
All end user facing changes have to be documented with each release of Qiskit
Experiments. The expectation is that if your code contribution has user facing changes
that you will write the release documentation for these changes in the form of a release
note. This note must explain what was changed, why it was changed, and how users can
either use or adapt to the change. When a naive user with limited internal knowledge of
the project is upgrading from the previous release to the new one, they should be able
to read the release notes, understand if they need to update their existing code which
uses Qiskit Experiments, and how they would go about doing that. It ideally should
explain why they need to make this change too, to provide the necessary context.
To make sure we don't forget a release note or the details of user facing changes over a
release cycle, we require that all pull requests with user facing changes include a note
describing the changes along with the code. To accomplish this, we use the
[reno](https://docs.openstack.org/reno/latest/) tool which enables a git based workflow
for writing and compiling release notes.
Note that these notes are meant to document a release, not individual pull requests. So
if your pull request updates or reverts a change made in a previous pull request in the
same release, you should update the corresponding release note that already exists
instead of writing a new one, which would be confusing to users. You can use `git blame`
to see which previous pull requests(s) are relevant to the part of the code you're
editing, and see whether they are tagged with the milestone of the current release.
#### Adding a new release note
Making a new release note is quite straightforward. Ensure that you have reno installed
with:
pip install -U reno
Once you have reno installed, you can make a new release note by running in your local
repository checkout's root:
reno new short-description-string
where short-description-string is a brief string (with no spaces) that describes what's
in the release note. This will become the prefix for the release note file. Once that is
run, it will create a new yaml file in `releasenotes/notes`. Then open that yaml file in
a text editor and write the release note.
The basic structure of a release note is restructured text in yaml lists under category
keys. You add individual items under each category, and they will be grouped
automatically by release when the release notes are compiled. A single file can have as
many entries in it as needed, but to avoid potential conflicts, you'll want to create a
new file for each pull request that has user facing changes. When you open the newly
created file it will be a full template of the different categories with a description
of a category as a single entry in each category. You'll want to delete all the sections
you aren't using and update the contents for those you are. For example, the end result
should look something like:
```yaml
features_expclass:
- |
Introduced a new feature foo that adds support for doing something to
:class:`~qiskit.circuit.QuantumCircuit` objects. It can be used by using the foo function,
for example::
from qiskit import foo
from qiskit import QuantumCircuit
foo(QuantumCircuit())
- |
The :class:`~qiskit.circuit.QuantumCircuit` class has a new method :meth:`.foo`. This is
the equivalent of calling :func:`qiskit.foo` to do something to your
QuantumCircuit. This is the equivalent of running :func:`qiskit.foo` on
your circuit, but provides the convenience of running it natively on
an object. For example::
from qiskit import QuantumCircuit
circ = QuantumCircuit()
circ.foo()
deprecations:
- |
The :mod:`qiskit.bar` module has been deprecated and will be removed in a
future release. Its sole function, :func:`foobar` has been superseded by the
:func:`qiskit.foo` function which provides similar functionality but with
more accurate results and better performance. You should update your calls
:func:`qiskit.bar.foobar` calls to :func:`qiskit.foo`.
```
Note that we are using subsections within the `features`, `upgrade`, and `fixes` sections to
organize the notes by functional area. We strongly encourage you to file your note under the most
appropriate category. You can see the current list of categories in
[release_notes/config.yaml](https://github.com/Qiskit-Community/qiskit-experiments/blob/main/releasenotes/config.yaml).
You can use any restructured text feature in them (code sections, tables, enumerated
lists, bulleted list, etc.) to express what is being changed as needed. In general, you
want the release notes to include as much detail as needed so that users will understand
what has changed, why it changed, and how they'll have to update their code.
After you've finished writing your release notes you'll want to add the note file to
your commit with `git add` and commit them to your PR branch to make sure they're
included with the code in your PR.
##### Linking to issues
If you need to link to an issue or another GitHub artifact as part of the release note,
this should be done using an inline link with the text being the issue number. For
example you would write a release note with a link to issue 12345 as:
```yaml
fixes:
- |
Fixed a race condition in the function ``foo()``. Refer to
`#12345 <https://github.com/Qiskit-Community/qiskit-experiments/issues/12345>` for more
details.
```
#### Generating release notes
After adding your release note, you should generate it to check that the output looks as
expected. In general, the output from reno that we'll get is a `.rst` (ReStructuredText)
file that can be compiled by [sphinx](https://www.sphinx-doc.org/en/master/). If you
want to generate the full Qiskit Experiments release notes for all releases, simply run:
reno report
You can also use the ``--version`` argument to view a single release (after it has been
tagged):
reno report --version 0.9.0
At release time, ``reno report`` is used to generate the release notes for the release,
and the output will be submitted as a pull request to the documentation repository's
[release notes file](
https://github.com/Qiskit-Community/qiskit-experiments/blob/main/docs/release_notes.rst).
### Documentation
The [Qiskit Experiments documentation](https://qiskit-community.github.io/qiskit-experiments) is
rendered from `.rst` files as well as experiment and analysis class docstrings into HTML
files.
#### Updating the documentation
Any change that would affect existing documentation, or a new feature that requires a
documentation, should be updated correspondingly. Before updating, review the [existing
documentation](https://qiskit-community.github.io/qiskit-experiments) for their style and
content, and read the [documentation guidelines](docs/GUIDELINES.md) for further
details.
#### Building documentation locally
To check what the rendered html output of the API documentation, tutorials, and release
notes will look like for the current state of the repo, run:
tox -e docs
This will build all the documentation into `docs/_build/html`. The main page
`index.html` will link to the relevant pages in the subdirectories, or you can navigate
manually:
* `tutorials/`: Contains the built tutorials.
* `howtos/`: Contains the built how-to guides.
* `manuals/`: Contains the built experiment manuals.
* `apidocs/`: Contains the API docs automatically compiled from module docstrings.
* `release_notes.html`: Contains the release notes.
Sometimes Sphinx's caching can get in a bad state. First, try running `tox -e docs-clean`, which
will remove Sphinx's cache. If you are still having issues, try adding `-r` your command,
e.g. `tox -e docs -r`. `-r` tells Tox to reinstall the dependencies. If you encounter a build
error involving `config-inited`, you need to be in the root of
the qiskit-experiments git repository then run `git remote add upstream
https://github.com/Qiskit-Community/qiskit-experiments` and `git fetch upstream` before building.
There are a few other build options available:
* `tox -e docs-minimal`: build documentation without executing Jupyter code cells
* `tox -e docs-parallel`: do a full build with multiprocessing (may crash on Macs)
### Deprecation policy
Any change to the existing package code that affects how the user interacts with the package
should give the user clear instructions and advanced warning if the change is nontrivial.
Qiskit Experiments's deprecation policy is based on [Qiskit's
policy](https://github.com/Qiskit/qiskit/blob/1.0.0rc1/DEPRECATION.md) prior to its 1.0 release, but
we impose less stringent requirements such that developers can iterate more quickly.
Deprecations and feature removals can only happen on minor releases and not on patch releases.
The deprecation policy depends on the significance of the user-facing change, which we have divided into
three categories:
A **core feature change** is one that affects how the framework functions, for example a
change to `BaseExperiment`. The timeline for deprecating an existing core feature is as follows:
* Minor release 1: An alternative path is provided. A `PendingDeprecationWarning`
should be issued when the old path is used, indicating to users how to switch to
the new path and the release in which the old path will no longer be available. The
developer may choose to directly deprecate the feature and issue a `DeprecationWarning` instead,
in which case the release note should indicate the feature has been deprecated and how to switch
to the new path.
* Minor release 2: The `PendingDeprecationWarning` becomes a `DeprecationWarning`, or the
`DeprecationWarning` remains in place. The release note should indicate the feature has
been deprecated and how to switch to the new path.
* Minor release 3: The old feature is removed. The release note should indicate that the feature has
been removed and how to switch to the new path.
If the three-release cycle takes fewer than three months, the feature removal must wait for more
releases until three months has elapsed since the first issuing of the `PendingDeprecationWarning`
or `DeprecationWarning`.
A **non-core feature change** may be a change to a specific experiment class or modules such as the
plotter. The timeline is shortened for such a change:
* Minor release 1: An alternative path is provided. A `DeprecationWarning` should be issued
when the old path is used, indicating to users how to switch to the new path and the release
in which the old path will no longer be available.
* Minor release 2: The old feature is removed. The release note should indicate that the feature has
been removed and how to switch to the new path.
Lastly, a **minor, non-core change** could be a cosmetic change such as output file names or a
change to helper functions that isn't directly used in the package codebase. These can be made in
one release without a deprecation process as long as the change is clearly described in the
release notes.
#### Adding deprecation warnings
We use the deprecation wrappers in [Qiskit
Utilities](https://docs.quantum.ibm.com/api/qiskit/utils) to add warnings:
```python
from qiskit.utils.deprecation import deprecate_func
@deprecate_func(
since="0.5",
additional_msg="Use ``new_function`` instead.",
pending=True,
removal_timeline="after 0.7",
package_name="qiskit-experiments",
)
def old_function(*args, **kwargs):
pass
def new_function(*args, **kwargs):
pass
```
Note that all warnings emitted by Qiskit Experiments, including pre-deprecation and deprecation
warnings, will cause the CI to fail, but features up for deprecation should continue to be tested
until their removal. For more information on how to use wrappers and test deprecated functionality,
consult [Qiskit's
policy](https://github.com/Qiskit/qiskit/blob/1.0.0rc1/DEPRECATION.md#issuing-deprecation-warnings).
### Development cycle
The development cycle for Qiskit Experiments is all handled in the open using project
boards in GitHub for project management. We use
[milestones](https://github.com/Qiskit-Community/qiskit-experiments/milestones) in GitHub to track
work for specific releases. Features or other changes that we want to include in a
release will be tagged and discussed in GitHub.
### Branches
* `main`: The main branch is used for development of the next version of
qiskit-experiments. It will be updated frequently and should not be considered stable.
The API can and will change on main as we introduce and refine new features.
* `stable/*` branches: Branches under `stable/*` are used to maintain released versions
of qiskit-experiments. It contains the version of the code corresponding to the latest
release for that minor version on pypi. For example, `stable/0.1` contains the code for
the 0.1.0 release on pypi. The API on these branches are stable and the only changes
merged to it are bug fixes.
### Release cycle
When it is time to release a new minor version of qiskit-experiments, we will:
1. Create a new tag with the version number and push it to github
2. Change the `main` version to the next release version.
The release automation processes will be triggered by the new tag and perform the
following steps:
1. Create a stable branch for the new minor version from the release tag on the `main`
branch
2. Build and upload binary wheels to PyPI
3. Create a github release page with a generated changelog
4. Generate a PR on the meta-repository to bump the qiskit-experiments version and
meta-package version.
The `stable/*` branches should only receive changes in the form of bug fixes. If you're making a bug fix PR that you believe should be backported to the current stable release, tag it with `backport stable potential`.
|
qiskit-experiments/CONTRIBUTING.md/0
|
{
"file_path": "qiskit-experiments/CONTRIBUTING.md",
"repo_id": "qiskit-experiments",
"token_count": 6385
}
| 97 |
.. _qiskit-experiments-data-processing:
.. automodule:: qiskit_experiments.data_processing
:no-members:
:no-inherited-members:
:no-special-members:
|
qiskit-experiments/docs/apidocs/data_processing.rst/0
|
{
"file_path": "qiskit-experiments/docs/apidocs/data_processing.rst",
"repo_id": "qiskit-experiments",
"token_count": 60
}
| 98 |
Save and load experiment data with the cloud service
====================================================
.. note::
This guide is only for those who have access to the cloud service. You can
check whether you do by logging into the IBM Quantum interface
and seeing if you can see the `database <https://quantum.ibm.com/experiments>`__.
Problem
-------
You want to save and retrieve experiment data from the cloud service.
Solution
--------
Saving
~~~~~~
.. note::
This guide requires :external+qiskit_ibm_runtime:doc:`qiskit-ibm-runtime <index>` version 0.15 and up, which can be installed with ``python -m pip install qiskit-ibm-runtime``.
For how to migrate from the older ``qiskit-ibm-provider`` to :external+qiskit_ibm_runtime:doc:`qiskit-ibm-runtime <index>`,
consult the `migration guide <https://docs.quantum.ibm.com/api/migration-guides/qiskit-runtime-from-provider>`_.\
You must run the experiment on a real IBM
backend and not a simulator to be able to save the experiment data. This is done by calling
:meth:`~.ExperimentData.save`:
.. jupyter-input::
from qiskit_ibm_runtime import QiskitRuntimeService
from qiskit_experiments.library.characterization import T1
import numpy as np
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibm_osaka")
t1_delays = np.arange(1e-6, 600e-6, 50e-6)
exp = T1(physical_qubits=(0,), delays=t1_delays)
t1_expdata = exp.run(backend=backend).block_for_results()
t1_expdata.save()
.. jupyter-output::
You can view the experiment online at
https://quantum.ibm.com/experiments/10a43cb0-7cb9-41db-ad74-18ea6cf63704
Loading
~~~~~~~
Let's load a `previous T1
experiment <https://quantum.ibm.com/experiments/9640736e-d797-4321-b063-d503f8e98571>`__
(requires login to view), which we've made public by editing the ``Share level`` field:
.. jupyter-input::
from qiskit_experiments.framework import ExperimentData
load_expdata = ExperimentData.load("9640736e-d797-4321-b063-d503f8e98571", provider=service)
Now we can display the figure from the loaded experiment data:
.. jupyter-input::
load_expdata.figure(0)
.. image:: ./experiment_cloud_service/t1_loaded.png
The analysis results have been retrieved as well:
.. jupyter-input::
for result in load_expdata.analysis_results():
print(result)
.. jupyter-output::
AnalysisResult
- name: T1
- value: 0.0001040+/-0.0000028
- ΟΒ²: 0.8523786276663019
- quality: good
- extra: <1 items>
- device_components: ['Q0']
- verified: False
AnalysisResult
- name: @Parameters_T1Analysis
- value: CurveFitResult:
- fitting method: least_squares
- number of sub-models: 1
* F_exp_decay(x) = amp * exp(-x/tau) + base
- success: True
- number of function evals: 9
- degree of freedom: 9
- chi-square: 7.671407648996717
- reduced chi-square: 0.8523786276663019
- Akaike info crit.: 0.6311217041870707
- Bayesian info crit.: 2.085841653551072
- init params:
* amp = 0.923076923076923
* tau = 0.00016946294665316433
* base = 0.033466533466533464
- fit params:
* amp = 0.9266620487665083 Β± 0.007096409569790425
* tau = 0.00010401411623191737 Β± 2.767679521974391e-06
* base = 0.036302726197354626 Β± 0.0037184540724124844
- correlations:
* (tau, base) = -0.6740808746060173
* (amp, base) = -0.4231810882291163
* (amp, tau) = 0.09302612202500576
- quality: good
- device_components: ['Q0']
- verified: False
Discussion
----------
Note that calling :meth:`~.ExperimentData.save` before the experiment is complete will
instantiate an experiment entry in the database, but it will not have
complete data. To fix this, you can call :meth:`~.ExperimentData.save` again once the
experiment is done running.
Sometimes the metadata of an experiment can be very large and cannot be stored directly in the database.
In this case, a separate ``metadata.json`` file will be stored along with the experiment. Saving and loading
this file is done automatically in :meth:`~.ExperimentData.save` and :meth:`~.ExperimentData.load`.
Auto-saving an experiment
~~~~~~~~~~~~~~~~~~~~~~~~~
The :meth:`~.ExperimentData.auto_save` feature automatically saves changes to the
:class:`.ExperimentData` object to the cloud service whenever it's updated.
.. jupyter-input::
exp = T1(physical_qubits=(0,), delays=t1_delays)
t1_expdata = exp.run(backend=backend, shots=1000)
t1_expdata.auto_save = True
t1_expdata.block_for_results()
.. jupyter-output::
You can view the experiment online at https://quantum.ibm.com/experiments/cdaff3fa-f621-4915-a4d8-812d05d9a9ca
<ExperimentData[T1], backend: ibm_osaka, status: ExperimentStatus.DONE, experiment_id: cdaff3fa-f621-4915-a4d8-812d05d9a9ca>
Setting ``auto_save = True`` works by triggering :meth:`.ExperimentData.save`.
When working with composite experiments, setting ``auto_save`` will propagate this
setting to the child experiments.
Deleting an experiment
~~~~~~~~~~~~~~~~~~~~~~
Both figures and analysis results can be deleted. Note that unless you
have auto save on, the update has to be manually saved to the remote
database by calling :meth:`~.ExperimentData.save`. Because there are two analysis
results, one for the T1 parameter and one for the curve fitting results, we must
delete twice to fully remove the analysis results.
.. jupyter-input::
t1_expdata.delete_figure(0)
t1_expdata.delete_analysis_result(0)
t1_expdata.delete_analysis_result(0)
.. jupyter-output::
Are you sure you want to delete the experiment plot? [y/N]: y
Are you sure you want to delete the analysis result? [y/N]: y
Are you sure you want to delete the analysis result? [y/N]: y
Tagging and sharing experiments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tags and notes can be added to experiments to help identify specific experiments in the interface.
For example, an experiment can be tagged and made public with the following code.
.. jupyter-input::
t1_expdata.tags = ['tag1', 'tag2']
t1_expdata.share_level = "public"
t1_expdata.notes = "Example note."
Web interface
~~~~~~~~~~~~~
You can also view experiment results as well as change the tags and share level at the `IBM Quantum Experiments
pane <https://quantum.ibm.com/experiments?date_interval=last-90-days&owner=me>`__
on the cloud.
|
qiskit-experiments/docs/howtos/cloud_service.rst/0
|
{
"file_path": "qiskit-experiments/docs/howtos/cloud_service.rst",
"repo_id": "qiskit-experiments",
"token_count": 2270
}
| 99 |
Readout Mitigation
==================
Readout errors affect quantum computation during the measurement of the
qubits in a quantum device. By characterizing the readout errors, it is
possible to construct a *readout error mitigator* that is used both to
obtain a more accurate distribution of the outputs, and more accurate
measurements of expectation value for measurables.
The readout mitigator is generated from an *assignment matrix*: a
:math:`2^n \times 2^n` matrix :math:`A` such that :math:`A_{y,x}` is the
probability to observe :math:`y` given the true outcome should be
:math:`x`. The assignment matrix is used to compute the *mitigation
matrix* used in the readout error mitigation process itself.
A *Local readout mitigator* works under the assumption that readout
errors are mostly *local*, meaning readout errors for different qubits
are independent of each other. In this case, the assignment matrix is
the tensor product of :math:`n` :math:`2 \times 2` matrices, one for
each qubit, making it practical to store the assignment matrix in
implicit form, by storing the individual :math:`2 \times 2` assignment
matrices. The corresponding class in Qiskit is the
:class:`~qiskit.result.LocalReadoutMitigator`.
A *Correlated readout mitigator* uses the full :math:`2^n \times 2^n`
assignment matrix, meaning it can only be used for small values of
:math:`n`. The corresponding class in Qiskit is the
:class:`~qiskit.result.CorrelatedReadoutMitigator`.
This notebook demonstrates the usage of both the local and correlated
experiments to generate the corresponding mitigators.
.. note::
This tutorial requires the :external+qiskit_aer:doc:`qiskit-aer <index>` and :external+qiskit_ibm_runtime:doc:`qiskit-ibm-runtime <index>`
packages to run simulations. You can install them with ``python -m pip
install qiskit-aer qiskit-ibm-runtime``.
.. jupyter-execute::
import numpy as np
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit
from qiskit.visualization import plot_distribution
from qiskit_experiments.library import LocalReadoutError, CorrelatedReadoutError
from qiskit_aer import AerSimulator
from qiskit_ibm_runtime.fake_provider import FakePerth
from qiskit.result.mitigation.utils import (
expval_with_stddev,
str2diag,
counts_probability_vector
)
backend = AerSimulator.from_backend(FakePerth())
.. jupyter-execute::
shots = 1024
qubits = [0,1,2,3]
num_qubits = len(qubits)
Standard mitigation experiment
------------------------------
The default mitigation experiment is *local*, meaning error probability
is measured individually for each qubit. The experiment generates two
circuits, one for all β0β and one for all β1β results.
.. jupyter-execute::
exp = LocalReadoutError(qubits)
for c in exp.circuits():
print(c)
.. jupyter-execute::
exp.analysis.set_options(plot=True)
result = exp.run(backend)
mitigator = result.analysis_results("Local Readout Mitigator").value
The resulting measurement matrix can be illustrated by comparing it to
the identity.
.. jupyter-execute::
result.figure(0)
Mitigation matrices
-------------------
The individual mitigation matrices can be read off the mitigator.
.. jupyter-execute::
for m in mitigator._mitigation_mats:
print(m)
print()
Mitigation example
------------------
.. jupyter-execute::
qc = QuantumCircuit(num_qubits)
qc.sx(0)
for i in range(1, num_qubits):
qc.cx(i - 1, i)
qc.measure_all()
.. jupyter-execute::
counts = backend.run(qc, shots=shots, seed_simulator=42, method="density_matrix").result().get_counts()
unmitigated_probs = {label: count / shots for label, count in counts.items()}
.. jupyter-execute::
mitigated_quasi_probs = mitigator.quasi_probabilities(counts)
mitigated_stddev = mitigated_quasi_probs._stddev_upper_bound
mitigated_probs = (mitigated_quasi_probs.nearest_probability_distribution().binary_probabilities())
Probabilities
~~~~~~~~~~~~~
.. jupyter-execute::
legend = ['Mitigated Probabilities', 'Unmitigated Probabilities']
plot_distribution([mitigated_probs, unmitigated_probs], legend=legend, sort="value_desc", bar_labels=False)
Expectation value
-----------------
.. jupyter-execute::
diagonal_labels = ["ZZZZ", "ZIZI", "IZII", "1ZZ0"]
ideal_expectation = []
diagonals = [str2diag(d) for d in diagonal_labels]
qubit_index = {i: i for i in range(num_qubits)}
unmitigated_probs_vector, _ = counts_probability_vector(unmitigated_probs, qubit_index=qubit_index)
unmitigated_expectation = [expval_with_stddev(d, unmitigated_probs_vector, shots) for d in diagonals]
mitigated_expectation = [mitigator.expectation_value(counts, d) for d in diagonals]
.. jupyter-execute::
mitigated_expectation_values, mitigated_stddev = zip(*mitigated_expectation)
unmitigated_expectation_values, unmitigated_stddev = zip(*unmitigated_expectation)
legend = ['Mitigated Expectation', 'Unmitigated Expectation']
fig, ax = plt.subplots()
X = np.arange(4)
ax.bar(X + 0.00, mitigated_expectation_values, yerr=mitigated_stddev, color='b', width = 0.25, label="Mitigated Expectation")
ax.bar(X + 0.25, unmitigated_expectation_values, yerr=unmitigated_stddev, color='g', width = 0.25, label="Unmitigated Expectation")
ax.set_xticks([0.125 + i for i in range(len(diagonals))])
ax.set_xticklabels(diagonal_labels)
ax.legend()
Correlated readout mitigation
-----------------------------
In correlated readout mitigation on :math:`n` qubits, a circuit is
generated for each of the possible :math:`2^n` combinations of β0β and
β1β. This results in more accurate mitigation in the case where the
readout errors are correlated and not independent, but requires a large
amount of circuits and storage space, and so is infeasible for more than
a few qubits.
.. jupyter-execute::
qubits = [0,3]
num_qubits = len(qubits)
exp = CorrelatedReadoutError(qubits)
for c in exp.circuits():
print(c)
See also
--------
* API documentation: :mod:`~qiskit_experiments.library.characterization.LocalReadoutError`,
:mod:`~qiskit_experiments.library.characterization.CorrelatedReadoutError`
* Qiskit Textbook: `Measurement Error Mitigation <https://github.com/Qiskit/textbook/blob/main/notebooks/quantum-hardware/measurement-error-mitigation.ipynb>`__
|
qiskit-experiments/docs/manuals/measurement/readout_mitigation.rst/0
|
{
"file_path": "qiskit-experiments/docs/manuals/measurement/readout_mitigation.rst",
"repo_id": "qiskit-experiments",
"token_count": 2196
}
| 100 |
============
Introduction
============
What is Qiskit Experiments?
===========================
Qiskit Experiments is a package for running device characterization and calibration
experiments on top of the core functionality of Qiskit.
An **experiment** comprises a series of circuits and associated metadata.
Once the experiment circuits are executed on a quantum backend, either
real or simulated, analysis is run automatically on the jobs and results
in the form of data, fit parameters, and figures are generated.
In addition to the experiment framework itself, Qiskit Experiments also has a rich
library of experiments for calibrating and characterizing qubits.
What Qiskit Experiments can do
==============================
* Run characterization and calibration experiments such as quantum
volume and randomized benchmarking
* Run built-in or customized experiments with all the options available in Qiskit
* Specify fit series and parameters in the analysis
* Transform the data through the data processor
* Visualize data with support for custom drawing backends
* Save and retrieve timestamped calibration parameters for physical backends
.. _primer:
A quick primer
==============
The Qiskit Experiments package consists of the experimental framework and the experiment
library. The framework itself consists of ``Experiment`` and ``Analysis`` classes, the
latter of which uses the Data Processor, Curve Analysis, and Visualization modules
to process the data, fit it to specified models, and plot the results, respectively.
.. figure:: images/experimentarch.png
:width: 400
:align: center
:class: no-scaled-link
Experiments start with an ``Experiment`` class, which instantiates the circuits that
will be run and also the metadata and options that will be used for the experiment,
transpilation, execution, and analysis. During execution, circuits are automatically
packaged into one or more jobs for the specified backend device.
Each ``Experiment`` class is tied to its corresponding ``Analysis`` class. Once jobs
complete execution, the ``Analysis`` class processes and analyzes raw data to output
an ``ExperimentData`` class that contains
the resulting analysis results, figures, metadata, as well as the original raw data.
|
qiskit-experiments/docs/tutorials/intro.rst/0
|
{
"file_path": "qiskit-experiments/docs/tutorials/intro.rst",
"repo_id": "qiskit-experiments",
"token_count": 502
}
| 101 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Base class of curve analysis.
"""
from abc import ABC, abstractmethod
from typing import Dict, List, Union
import lmfit
from qiskit_experiments.data_processing import DataProcessor
from qiskit_experiments.data_processing.processor_library import get_processor
from qiskit_experiments.framework import (
AnalysisResultData,
BaseAnalysis,
ExperimentData,
Options,
)
from qiskit_experiments.visualization import (
BasePlotter,
CurvePlotter,
MplDrawer,
)
from .curve_data import CurveFitResult, ParameterRepr
from .scatter_table import ScatterTable
PARAMS_ENTRY_PREFIX = "@Parameters_"
DATA_ENTRY_PREFIX = "@Data_"
class BaseCurveAnalysis(BaseAnalysis, ABC):
"""Abstract superclass of curve analysis base classes.
Note that this class doesn't define the :meth:`_run_analysis` method,
and no actual fitting protocol is implemented in this base class.
However, this class defines several common methods that can be reused.
A curve analysis subclass can construct proper fitting protocol
by combining following methods, i.e. subroutines.
See :ref:`curve_analysis_workflow` for how these subroutines are called.
Subclass must implement following methods.
.. rubric:: _run_data_processing
This method performs data processing and returns the processed dataset.
Input data is a list of dictionaries, where each entry represents an outcome
of circuit sampling along with the metadata attached to it.
.. rubric:: _format_data
This method consumes the processed dataset and outputs the formatted dataset.
For example, this may include averaging Y values over the same X data points.
.. rubric:: _run_curve_fit
This method performs the fitting with the predefined fit models and the formatted dataset.
This is a core functionality of the :meth:`_run_analysis` method
that creates fit result objects from the formatted dataset.
Optionally, a subclass may override following methods.
These methods have default implementations as described below.
.. rubric:: _evaluate_quality
This method evaluates the quality of the fit based on the fit result.
This returns "good" when reduced chi-squared is less than 3.0.
Usually it returns string "good" or "bad" according to the evaluation.
This criterion can be updated by subclass.
.. rubric:: _run_curve_fit
This method performs the fitting with predefined fit models and the formatted dataset.
This method internally calls :meth:`_generate_fit_guesses` method.
Note that this is a core functionality of the :meth:`_run_analysis` method,
that creates fit result object from the formatted dataset.
.. rubric:: _create_analysis_results
This method creates analysis results for important fit parameters
that might be defined by analysis options ``result_parameters``.
.. rubric:: _create_figures
This method creates figures by consuming the scatter table data.
Figures are created when the analysis option ``plot`` is ``True``.
.. rubric:: _initialize
This method initializes analysis options against input experiment data.
Usually this method is called before other methods are called.
"""
@property
@abstractmethod
def parameters(self) -> List[str]:
"""Return parameters estimated by this analysis."""
@property
@abstractmethod
def name(self) -> str:
"""Return name of this analysis."""
@property
@abstractmethod
def models(self) -> List[lmfit.Model]:
"""Return fit models."""
@property
def plotter(self) -> BasePlotter:
"""A short-cut to the curve plotter instance."""
return self._options.plotter
@classmethod
def _default_options(cls) -> Options:
"""Return default analysis options.
Analysis Options:
plotter (BasePlotter): A curve plotter instance to visualize
the analysis result.
plot_raw_data (bool): Set ``True`` to draw processed data points,
dataset without formatting, on canvas. This is ``False`` by default.
plot_residuals (bool): Set ``True`` to draw the residuals data for the
fitting model. This is ``False`` by default.
plot (bool): Set ``True`` to create figure for fit result or ``False`` to
not create a figure. This overrides the behavior of ``generate_figures``.
return_fit_parameters (bool): (Deprecated) Set ``True`` to return all fit model parameters
with details of the fit outcome. Default to ``False``.
data_processor (Callable): A callback function to format experiment data.
This can be a :class:`.DataProcessor`
instance that defines the `self.__call__` method.
normalization (bool): Set ``True`` to normalize y values within range [-1, 1].
Default to ``False``.
average_method (Literal["sample", "iwv", "shots_weighted"]): Method
to average the y values when the same x values
appear multiple times. One of "sample", "iwv" (i.e. inverse
weighted variance), "shots_weighted". See :func:`.mean_xy_data`
for details. Default to "shots_weighted".
p0 (Dict[str, float]): Initial guesses for the fit parameters.
The dictionary is keyed on the fit parameter names.
bounds (Dict[str, Tuple[float, float]]): Boundary of fit parameters.
The dictionary is keyed on the fit parameter names and
values are the tuples of (min, max) of each parameter.
fit_method (str): Fit method that LMFIT minimizer uses.
Default to ``least_squares`` method which implements the
Trust Region Reflective algorithm to solve the minimization problem.
See LMFIT documentation for available options.
lmfit_options (Dict[str, Any]): Options that are passed to the
LMFIT minimizer. Acceptable options depend on fit_method.
x_key (str): Circuit metadata key representing a scanned value.
fit_category (str): Name of dataset in the scatter table to fit.
result_parameters (List[Union[str, ParameterRepr]): Parameters reported in the
database as a dedicated entry. This is a list of parameter representation
which is either string or ParameterRepr object. If you provide more
information other than name, you can specify
``[ParameterRepr("alpha", "\u03B1", "a.u.")]`` for example.
The parameter name should be defined in the series definition.
Representation should be printable in standard output, i.e. no latex syntax.
extra (Dict[str, Any]): A dictionary that is appended to all database entries
as extra information.
fixed_parameters (Dict[str, Any]): Fitting model parameters that are fixed
during the curve fitting. This should be provided with default value
keyed on one of the parameter names in the series definition.
filter_data (Dict[str, Any]): Dictionary of experiment data metadata to filter.
Experiment outcomes with metadata that matches with this dictionary
are used in the analysis. If not specified, all experiment data are
input to the curve fitter. By default, no filtering condition is set.
data_subfit_map (Dict[str, Dict[str, Any]]): The mapping of experiment result data
to sub-fit models. This dictionary is keyed on the LMFIT model name,
and the value is a sorting key-value pair that filters the experiment results,
and the filtering is done based on the circuit metadata.
"""
options = super()._default_options()
options.plotter = CurvePlotter(MplDrawer())
options.plot_raw_data = False
options.plot_residuals = False
options.return_fit_parameters = True
options.data_processor = None
options.normalization = False
options.average_method = "shots_weighted"
options.x_key = "xval"
options.fit_category = "formatted"
options.result_parameters = []
options.extra = {}
options.fit_method = "least_squares"
options.lmfit_options = {}
options.p0 = {}
options.bounds = {}
options.fixed_parameters = {}
options.filter_data = {}
options.data_subfit_map = {}
# Set automatic validator for particular option values
options.set_validator(field="data_processor", validator_value=DataProcessor)
options.set_validator(field="plotter", validator_value=BasePlotter)
return options
@abstractmethod
def _run_data_processing(
self,
raw_data: List[Dict],
category: str = "raw",
) -> ScatterTable:
"""Perform data processing from the experiment result payload.
Args:
raw_data: Payload in the experiment data.
category: Category string of the output dataset.
Returns:
Processed data that will be sent to the formatter method.
"""
@abstractmethod
def _format_data(
self,
curve_data: ScatterTable,
category: str = "formatted",
) -> ScatterTable:
"""Postprocessing for preparing the fitting data.
Args:
curve_data: Processed dataset created from experiment results.
category: Category string of the output dataset.
Returns:
New scatter table instance including fit data.
"""
@abstractmethod
def _run_curve_fit(
self,
curve_data: ScatterTable,
) -> CurveFitResult:
"""Perform curve fitting on given data collection and fit models.
Args:
curve_data: Formatted data to fit.
Returns:
The best fitting outcome with minimum reduced chi-squared value.
"""
def _evaluate_quality(
self,
fit_data: CurveFitResult,
) -> Union[str, None]:
"""Evaluate quality of the fit result.
Args:
fit_data: Fit outcome.
Returns:
String that represents fit result quality. Usually "good" or "bad".
"""
if 0 < fit_data.reduced_chisq < 3.0:
return "good"
return "bad"
def _create_analysis_results(
self,
fit_data: CurveFitResult,
quality: str,
**metadata,
) -> List[AnalysisResultData]:
"""Create analysis results for important fit parameters.
Args:
fit_data: Fit outcome.
quality: Quality of fit outcome.
Returns:
List of analysis result data.
"""
outcomes = []
# Create entries for important parameters
for param_repr in self.options.result_parameters:
if isinstance(param_repr, ParameterRepr):
p_name = param_repr.name
p_repr = param_repr.repr or param_repr.name
unit = param_repr.unit
else:
p_name = param_repr
p_repr = param_repr
unit = None
if unit:
par_metadata = metadata.copy()
par_metadata["unit"] = unit
else:
par_metadata = metadata
outcome = AnalysisResultData(
name=p_repr,
value=fit_data.ufloat_params[p_name],
chisq=fit_data.reduced_chisq,
quality=quality,
extra=par_metadata,
)
outcomes.append(outcome)
return outcomes
# pylint: disable=unused-argument
def _create_curve_data(
self,
curve_data: ScatterTable,
**metadata,
) -> List[AnalysisResultData]:
"""Create analysis results for raw curve data.
Args:
curve_data: Formatted data that is used for the fitting.
Returns:
List of analysis result data.
"""
samples = []
for model_name, sub_data in list(curve_data.dataframe.groupby("model_name")):
raw_datum = AnalysisResultData(
name=DATA_ENTRY_PREFIX + self.__class__.__name__,
value={
"xdata": sub_data.xval.to_numpy(),
"ydata": sub_data.yval.to_numpy(),
"sigma": sub_data.yerr.to_numpy(),
},
extra={
"name": model_name,
**metadata,
},
)
samples.append(raw_datum)
return samples
def _create_figures(
self,
curve_data: ScatterTable,
) -> List["matplotlib.figure.Figure"]:
"""Create a list of figures from the curve data.
Args:
curve_data: Scatter data table containing all data points.
Returns:
A list of figures.
"""
return []
def _initialize(
self,
experiment_data: ExperimentData,
):
"""Initialize curve analysis with experiment data.
This method is called ahead of other processing.
Args:
experiment_data: Experiment data to analyze.
"""
# Initialize data processor
# TODO move this to base analysis in follow-up
data_processor = self.options.data_processor or get_processor(experiment_data, self.options)
if not data_processor.is_trained:
data_processor.train(data=experiment_data.data())
self.set_options(data_processor=data_processor)
|
qiskit-experiments/qiskit_experiments/curve_analysis/base_curve_analysis.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/curve_analysis/base_curve_analysis.py",
"repo_id": "qiskit-experiments",
"token_count": 5630
}
| 102 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===========================================================
Data Processing (:mod:`qiskit_experiments.data_processing`)
===========================================================
.. currentmodule:: qiskit_experiments.data_processing
Data processing is the act of taking the data returned by the backend and
converting it into a format that can be analyzed.
It is implemented as a chain of data processing steps that transform various input data,
e.g. IQ data, into a desired format, e.g. population, which can be analyzed.
These data transformations may consist of multiple steps, such as kerneling and discrimination.
Each step is implemented by a :class:`~qiskit_experiments.data_processing.data_action.DataAction`
also called a `node`.
The data processor implements the :meth:`__call__` method. Once initialized, it
can thus be used as a standard python function:
.. code-block:: python
processor = DataProcessor(input_key="memory", [Node1(), Node2(), ...])
out_data = processor(in_data)
The data input to the processor is a sequence of dictionaries each representing the result
of a single circuit. The output of the processor is a numpy array whose shape and data type
depend on the combination of the nodes in the data processor.
Uncertainties that arise from quantum measurements or finite sampling can be taken into account
in the nodes: a standard error can be generated in a node and can be propagated
through the subsequent nodes in the data processor.
Correlation between computed values is also considered.
Classes
=======
.. autosummary::
:toctree: ../stubs/
DataProcessor
DataAction
TrainableDataAction
Data Processing Nodes
=====================
.. autosummary::
:toctree: ../stubs/
Probability
MarginalizeCounts
ToImag
ToReal
SVD
DiscriminatorNode
MemoryToCounts
AverageData
BasisExpectationValue
MinMaxNormalize
ShotOrder
RestlessNode
RestlessToCounts
RestlessToIQ
Discriminators
==============
.. autosummary::
:toctree: ../stubs/
BaseDiscriminator
SkLDA
SkQDA
"""
from .data_action import DataAction, TrainableDataAction
from .nodes import (
Probability,
MarginalizeCounts,
ToImag,
ToReal,
SVD,
DiscriminatorNode,
MemoryToCounts,
AverageData,
BasisExpectationValue,
MinMaxNormalize,
ShotOrder,
RestlessNode,
RestlessToCounts,
RestlessToIQ,
)
from .data_processor import DataProcessor
from .discriminator import BaseDiscriminator
from .sklearn_discriminators import SkLDA, SkQDA
|
qiskit-experiments/qiskit_experiments/data_processing/__init__.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/data_processing/__init__.py",
"repo_id": "qiskit-experiments",
"token_count": 904
}
| 103 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A table-like dataset for analysis results."""
from __future__ import annotations
import re
import threading
import uuid
import warnings
from typing import Any
import numpy as np
import pandas as pd
from qiskit_experiments.database_service.exceptions import ExperimentEntryNotFound
class AnalysisResultTable:
"""A table-like dataset for analysis results.
Default table columns are defined in the class attribute :attr:`.DEFAULT_COLUMNS`.
The table is automatically expanded when an extra key is included in the
input dictionary data. Missing columns in the input data are filled with a null value.
Table row index (i.e. entry ID) is created by truncating the result_id string which
is basically a UUID-4 string. A random unique ID is generated when the result_id
is missing in the input data.
Any operation on the table value via the instance methods guarantees thread safety.
"""
VALID_ID_REGEX = re.compile(r"\A(?P<short_id>\w{8})-\w{4}-\w{4}-\w{4}-\w{12}\Z")
DEFAULT_COLUMNS = [
"name",
"experiment",
"components",
"value",
"quality",
"experiment_id",
"result_id",
"tags",
"backend",
"run_time",
"created_time",
]
def __init__(self):
"""Create new dataset."""
self._data = pd.DataFrame(columns=self.DEFAULT_COLUMNS)
self._lock = threading.RLock()
@classmethod
def from_dataframe(cls, data: pd.DataFrame) -> "AnalysisResultTable":
"""Create new dataset with existing dataframe.
Args:
data: Bare dataframe object.
Returns:
A new AnalysisResults instance.
"""
instance = AnalysisResultTable()
instance._data = pd.concat([instance._data, data])
return instance
@property
def dataframe(self) -> pd.DataFrame:
"""Dataframe object of analysis results."""
with self._lock:
return self._data.copy(deep=False)
@property
def result_ids(self) -> list[str]:
"""Result IDs in current dataset."""
with self._lock:
return list(self._data.result_id)
@property
def columns(self) -> list[str]:
"""All columns in current dataset."""
with self._lock:
return list(self._data.columns)
def add_data(
self,
*,
result_id: str | None = None,
**data,
) -> str:
"""Add new data to this dataset.
Args:
result_id: A unique UUID-4 string for this data entry.
The full string is used to identify the data in the experiment service database,
and a short ID is created by truncating this string as a dataframe index.
data: Arbitrary key-value pairs representing a single data entry.
Missing values for default columns are filled with ``None``.
Returns:
Assigned analysis result ID.
"""
result_id = result_id or self._create_unique_hash()
if matched := re.match(self.VALID_ID_REGEX, result_id):
# Short unique index is generated from result id.
# Showing full result id unnecessary occupies horizontal space of the html table.
# This mechanism is inspired by the github commit hash.
index = matched.group("short_id")
else:
warnings.warn(
f"Result ID of {result_id} is not a valid UUID-4 string. ",
UserWarning,
)
index = result_id[:8]
with self._lock:
if index in self._data.index:
raise ValueError(
f"Table entry index {index} already exists. "
"Please use another ID to avoid index collision."
)
# Add missing columns to the table
if missing := data.keys() - set(self._data.columns):
for k in data:
# Order sensitive
if k in missing:
loc = len(self._data.columns)
self._data.insert(loc, k, value=None)
# A hack to avoid unwanted dtype update. Appending new row with .loc indexer
# performs enlargement and implicitly changes dtype. This often induces a confusion of
# NaN (numeric container) and None (object container) for missing values.
# Filling a row with None values before assigning actual values can keep column dtype,
# but this behavior might change in future pandas version.
# https://github.com/pandas-dev/pandas/issues/6485
# Also see test.framework.test_data_table.TestBaseTable.test_type_*
self._data.loc[index, :] = [None] * len(self._data.columns)
template = dict.fromkeys(self.columns, None)
template["result_id"] = result_id
template.update(data)
self._data.loc[index, :] = pd.array(list(template.values()), dtype=object)
return index
def get_data(
self,
key: str | int | slice | None = None,
columns: str | list[str] = "default",
) -> pd.DataFrame:
"""Get matched entries from this dataset.
Args:
key: Identifier of the entry of interest.
columns: List of names or a policy (default, minimal, all)
of data columns included in the returned data frame.
Returns:
Matched entries in a single data frame or series.
"""
if key is None:
with self._lock:
out = self._data.copy()
else:
uids = self._resolve_key(key)
with self._lock:
out = self._data.filter(items=uids, axis=0)
if columns != "all":
valid_columns = self._resolve_columns(columns)
out = out[valid_columns]
return out
def del_data(
self,
key: str | int,
) -> list[str]:
"""Delete matched entries from this dataset.
Args:
key: Identifier of the entry of interest.
Returns:
Deleted analysis result IDs.
"""
uids = self._resolve_key(key)
with self._lock:
self._data.drop(uids, inplace=True)
return uids
def clear(self):
"""Clear all table entries."""
with self._lock:
self._data = pd.DataFrame(columns=self.DEFAULT_COLUMNS)
def copy(self):
"""Create new thread-safe instance with the same data.
.. note::
This returns a new object with shallow copied data frame.
"""
with self._lock:
# Hold the lock so that no data can be added
new_instance = self.__class__()
new_instance._data = self._data.copy(deep=False)
return new_instance
def _create_unique_hash(self) -> str:
with self._lock:
n = 0
while n < 1000:
tmp_id = str(uuid.uuid4())
if tmp_id[:8] not in self._data.index:
return tmp_id
raise RuntimeError(
"Unique result_id string cannot be prepared for this table within 1000 trials. "
"Reduce number of entries, or manually provide a unique result_id."
)
def _resolve_columns(self, columns: str | list[str]):
with self._lock:
extra_columns = [c for c in self._data.columns if c not in self.DEFAULT_COLUMNS]
if columns == "default":
return [
"name",
"experiment",
"components",
"value",
"quality",
"backend",
"run_time",
] + extra_columns
if columns == "minimal":
return [
"name",
"components",
"value",
"quality",
] + extra_columns
if not isinstance(columns, str):
out = []
for column in columns:
if column in self._data.columns:
out.append(column)
else:
warnings.warn(
f"Specified column {column} does not exist in this table.",
UserWarning,
)
return out
raise ValueError(
f"Column group {columns} is not valid name. Use either 'all', 'default', 'minimal'."
)
def _resolve_key(self, key: int | slice | str) -> list[str]:
with self._lock:
if isinstance(key, int):
if key >= len(self):
raise ExperimentEntryNotFound(f"Analysis result {key} not found.")
return [self._data.index[key]]
if isinstance(key, slice):
keys = list(self._data.index)[key]
if len(keys) == 0:
raise ExperimentEntryNotFound(f"Analysis result {key} not found.")
return keys
if isinstance(key, str):
if key in self._data.index:
return [key]
# This key is name of entry
loc = self._data["name"] == key
if not any(loc):
raise ExperimentEntryNotFound(f"Analysis result {key} not found.")
return list(self._data.index[loc])
raise TypeError(f"Invalid key type {type(key)}. The key must be either int, slice, or str.")
def __len__(self):
return len(self._data)
def __contains__(self, item):
return item in self._data.index
def __json_encode__(self) -> dict[str, Any]:
with self._lock:
return {
"class": "AnalysisResultTable",
"data": self._data.to_dict(orient="index"),
}
@classmethod
def __json_decode__(cls, value: dict[str, Any]) -> "AnalysisResultTable":
if not value.get("class", None) == "AnalysisResultTable":
raise ValueError("JSON decoded value for AnalysisResultTable is not valid class type.")
instance = object.__new__(cls)
instance._lock = threading.RLock()
instance._data = pd.DataFrame.from_dict(
data=value.get("data", {}),
orient="index",
).replace({np.nan: None})
return instance
def __getstate__(self):
state = self.__dict__.copy()
del state["_lock"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._lock = threading.RLock()
|
qiskit-experiments/qiskit_experiments/framework/analysis_result_table.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/framework/analysis_result_table.py",
"repo_id": "qiskit-experiments",
"token_count": 5154
}
| 104 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Matplotlib helper functions
"""
from matplotlib.figure import Figure
from matplotlib.backends.backend_svg import FigureCanvasSVG
default_figure_canvas = FigureCanvasSVG # pylint: disable=invalid-name
"""Matplotlib canvas to use when rendering a figure. This needs to be a
canvas for a `non-interactive backend
<https://matplotlib.org/stable/tutorials/introductory/usage.html#the-builtin-backends>`_.
The default is `FigureCanvasSVG`."""
def get_non_gui_ax():
"""Return a matplotlib axes that can be used in a child thread.
Analysis/plotting is done in a separate thread (so it doesn't block the
main thread), but matplotlib doesn't support GUI mode in a child thread.
This function creates a separate Figure and attaches a non-GUI
SVG canvas to it.
Returns:
matplotlib.axes.Axes: A matplotlib axes that can be used in a child thread.
"""
figure = Figure()
_ = default_figure_canvas(figure)
return figure.subplots(squeeze=False)[0, 0]
|
qiskit-experiments/qiskit_experiments/framework/matplotlib.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/framework/matplotlib.py",
"repo_id": "qiskit-experiments",
"token_count": 454
}
| 105 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Analysis Classes"""
from .drag_analysis import DragCalAnalysis
from .fine_amplitude_analysis import FineAmplitudeAnalysis
from .ramsey_xy_analysis import RamseyXYAnalysis
from .t2ramsey_analysis import T2RamseyAnalysis
from .t2hahn_analysis import T2HahnAnalysis
from .t1_analysis import T1Analysis, T1KerneledAnalysis
from .tphi_analysis import TphiAnalysis
from .cr_hamiltonian_analysis import CrossResonanceHamiltonianAnalysis
from .readout_angle_analysis import ReadoutAngleAnalysis
from .local_readout_error_analysis import LocalReadoutErrorAnalysis
from .correlated_readout_error_analysis import CorrelatedReadoutErrorAnalysis
from .resonator_spectroscopy_analysis import ResonatorSpectroscopyAnalysis
from .zz_ramsey_analysis import ZZRamseyAnalysis
from .multi_state_discrimination_analysis import MultiStateDiscriminationAnalysis
|
qiskit-experiments/qiskit_experiments/library/characterization/analysis/__init__.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/library/characterization/analysis/__init__.py",
"repo_id": "qiskit-experiments",
"token_count": 353
}
| 106 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Cross resonance Hamiltonian tomography.
"""
from typing import List, Tuple, Sequence, Optional, Type
import numpy as np
from qiskit import pulse, circuit, QuantumCircuit
from qiskit.circuit.parameterexpression import ParameterValueType
from qiskit.exceptions import QiskitError
from qiskit.providers import Backend
from qiskit_experiments.framework import (
BaseExperiment,
BackendTiming,
Options,
)
from qiskit_experiments.library.characterization.analysis import CrossResonanceHamiltonianAnalysis
class CrossResonanceHamiltonian(BaseExperiment):
r"""Cross resonance Hamiltonian tomography experiment.
# section: overview
This experiment assumes the two qubit Hamiltonian in the form
.. math::
H = \frac{I \otimes A}{2} + \frac{Z \otimes B}{2}
where :math:`A` and :math:`B` are linear combinations of
the Pauli operators :math:`\in {X, Y, Z}`.
The coefficient of each Pauli term in the Hamiltonian
can be estimated with this experiment.
This experiment is performed by stretching the pulse duration of a cross resonance pulse
and measuring the target qubit by projecting onto the x, y, and z bases.
The control qubit state dependent (controlled-) Rabi oscillation on the
target qubit is observed by repeating the experiment with the control qubit
both in the ground and excited states. The fit for the oscillations in the
three bases with the two control qubit preparations tomographically
reconstructs the Hamiltonian in the form shown above.
See Ref. [1] for more details.
More specifically, the following circuits are executed in this experiment.
.. parsed-literal::
(X measurement)
βββββββββββββββββββββββββββ
q_0: β€ P ββ€0 βββββββββββββββββββββ
ββββββ cr_tone(duration) βββββββββββββββββββββ
q_1: ββββββ€1 ββ€ Rz(Ο/2) ββ€ βX ββ€Mβ
βββββββββββββββββββββββββββββββββββββββββ₯β
c: 1/ββββββββββββββββββββββββββββββββββββββββββββββ©β
0
(Y measurement)
βββββββββββββββββββββββββββ
q_0: β€ P ββ€0 ββββββββββ
ββββββ cr_tone(duration) ββββββββββ
q_1: ββββββ€1 ββ€ βX ββ€Mβ
ββββββββββββββββββββββββββββββ₯β
c: 1/βββββββββββββββββββββββββββββββββββ©β
0
(Z measurement)
βββββββββββββββββββββββββββ
q_0: β€ P ββ€0 ββββ
ββββββ cr_tone(duration) ββββ
q_1: ββββββ€1 ββ€Mβ
ββββββββββββββββββββββββ₯β
c: 1/βββββββββββββββββββββββββββββ©β
0
The ``P`` gate on the control qubit (``q_0``) indicates the state preparation.
Since this experiment requires two sets of sub experiments with the control qubit in the
excited and ground state, ``P`` will become ``X`` gate or just be omitted, respectively.
Here ``cr_tone`` is implemented by a single cross resonance tone
driving the control qubit at the frequency of the target qubit.
The pulse envelope might be a flat-topped Gaussian implemented by the parametric pulse
:class:`~qiskit.pulse.library.parametric_pulses.GaussianSquare`.
This experiment scans the total duration of the cross resonance pulse
including the pulse ramps at both edges. The pulse shape is defined by the
:class:`~qiskit.pulse.library.parametric_pulses.GaussianSquare`, and
an effective length of these Gaussian ramps with :math:`\sigma` can be computed by
.. math::
\tau_{\rm edges}' = \sqrt{2 \pi} \sigma,
which is usually shorter than the actual edge duration of
.. math::
\tau_{\rm edges} = 2 r \sigma,
where the :math:`r` is the ratio of the actual edge duration to :math:`\sigma`.
This effect must be considered in the following curve analysis to estimate
interaction rates.
# section: analysis_ref
:class:`CrossResonanceHamiltonianAnalysis`
# section: reference
.. ref_arxiv:: 1 1603.04821
# section: manual
.. ref_website:: Qiskit Textbook 6.7,
https://github.com/Qiskit/textbook/blob/main/notebooks/quantum-hardware-pulses/hamiltonian-tomography.ipynb
"""
# Number of CR pulses. The flat top duration per pulse is divided by this number.
num_pulses = 1
class CRPulseGate(circuit.Gate):
"""A pulse gate of cross resonance. Definition should be provided via calibration."""
def __init__(self, width: ParameterValueType):
super().__init__("cr_gate", 2, [width])
def __init__(
self,
physical_qubits: Tuple[int, int],
backend: Optional[Backend] = None,
cr_gate: Optional[Type[circuit.Gate]] = None,
durations: Optional[Sequence[int]] = None,
**kwargs,
):
"""Create a new experiment.
Args:
physical_qubits: Two-value tuple of qubit indices on which to run tomography.
The first index stands for the control qubit.
backend: Optional, the backend to run the experiment on.
cr_gate: Optional, circuit gate class representing the cross resonance pulse.
Providing this object allows us to run this experiment with circuit simulator,
and this object might be used for testing, development of analysis protocol,
and educational purpose without needing to wait for hardware queueing.
Note that this instance must provide matrix representation, such as
unitary gate or Hamiltonian gate, and the class is expected to be instantiated
with a single parameter ``width`` in units of sec.
durations: Optional. The total duration of cross resonance pulse(s) including
rising and falling edges. The minimum number should be larger than the
total lengths of these ramps. If not provided, then ``num_durations`` evenly
spaced durations between ``min_durations`` and ``max_durations`` are
automatically generated from these experiment options. The default numbers
are chosen to have a good sensitivity for the Hamiltonian coefficient
of interest at the rate around 1 MHz.
This argument should be provided in units of sec.
kwargs: Pulse parameters. See :meth:`experiment_options` for details.
Raises:
QiskitError: When ``qubits`` length is not 2.
"""
if len(physical_qubits) != 2:
raise QiskitError(
"Length of qubits is not 2. Please provide index for control and target qubit."
)
self._gate_cls = cr_gate or self.CRPulseGate
self._backend_timing = None
super().__init__(
physical_qubits, analysis=CrossResonanceHamiltonianAnalysis(), backend=backend
)
self.set_experiment_options(durations=durations, **kwargs)
@classmethod
def _default_experiment_options(cls) -> Options:
"""Default experiment options.
Experiment Options:
durations (np.ndarray): The total duration of the cross resonance pulse(s) to scan,
in units of sec. Values should be longer than pulse ramps.
min_durations (int): The minimum default pulse duration in samples.
max_durations (int): The maximum default pulse duration in samples.
num_durations (int): The number of measured durations. The experiment automatically
creates durations of linear increment along with ``min_durations`` and
``max_durations`` when user doesn't explicitly provide ``durations``.
amp (complex): Amplitude of the cross resonance tone.
amp_t (complex): Amplitude of the cancellation or rotary drive on target qubit.
sigma (float): Sigma of Gaussian rise and fall edges, in units of dt.
risefall (float): Ratio of edge durations to sigma.
"""
options = super()._default_experiment_options()
options.durations = None
options.min_durations = 60e-9
options.max_durations = 1200e-9
options.num_durations = 48
options.amp = 0.5
options.amp_t = 0.0
options.sigma = 64
options.risefall = 2
return options
def _set_backend(self, backend: Backend):
"""Set the backend for the experiment with timing analysis."""
super()._set_backend(backend)
self._backend_timing = BackendTiming(backend)
def _get_dt(self) -> float:
"""A helper function to get finite dt.
Returns:
Backend dt value.
"""
if not self._backend or self._backend_timing.dt is None:
# When backend timing is not initialized or backend doesn't report dt.
return 1.0
return self._backend_timing.dt
def _get_width(self, duration: ParameterValueType) -> ParameterValueType:
"""A helper function to get flat top width.
Args:
duration: Cross resonance pulse duration in units of sec.
Returns:
A flat top widths of cross resonance pulse in units of sec.
"""
sigma_sec = self.experiment_options.sigma * self._get_dt()
return duration - 2 * sigma_sec * self.experiment_options.risefall
def _get_durations(self) -> np.ndarray:
"""Return cross resonance pulse durations in units of sec."""
opt = self.experiment_options
if opt.durations is None:
return np.linspace(opt.min_durations, opt.max_durations, opt.num_durations)
return np.asarray(opt.durations, dtype=float)
def _build_cr_circuit(self, pulse_gate: circuit.Gate) -> QuantumCircuit:
"""Single tone cross resonance.
Args:
pulse_gate: A pulse gate to represent a single cross resonance pulse.
Returns:
A circuit definition for the cross resonance pulse to measure.
"""
cr_circuit = QuantumCircuit(2)
cr_circuit.append(pulse_gate, [0, 1])
return cr_circuit
def _build_default_schedule(self) -> pulse.ScheduleBlock:
"""GaussianSquared cross resonance pulse.
Returns:
A schedule definition for the cross resonance pulse to measure.
"""
opt = self.experiment_options
duration = circuit.Parameter("duration")
cr_drive = self._backend_data.control_channel(self.physical_qubits)[0]
c_drive = self._backend_data.drive_channel(self.physical_qubits[0])
t_drive = self._backend_data.drive_channel(self.physical_qubits[1])
with pulse.build(default_alignment="left", name="cr") as cross_resonance:
# add cross resonance tone
pulse.play(
pulse.GaussianSquare(
duration=duration,
amp=opt.amp,
sigma=opt.sigma,
risefall_sigma_ratio=opt.risefall,
),
cr_drive,
)
# add cancellation tone
if not np.isclose(opt.amp_t, 0.0):
pulse.play(
pulse.GaussianSquare(
duration=duration,
amp=opt.amp_t,
sigma=opt.sigma,
risefall_sigma_ratio=opt.risefall,
),
t_drive,
)
else:
pulse.delay(duration, t_drive)
# place holder for empty drive channels. this is necessary due to known pulse gate bug.
pulse.delay(duration, c_drive)
return cross_resonance
def circuits(self) -> List[QuantumCircuit]:
"""Return a list of experiment circuits.
Returns:
A list of :class:`QuantumCircuit`.
Raises:
QiskitError: When the backend is not set and cr gate is ``CRPulseGate`` type.
"""
if self._gate_cls is self.CRPulseGate:
if not self.backend:
# Backend is not set, but trying to provide CR gate as a pulse gate.
raise QiskitError(
"This experiment requires to have backend set to convert durations into samples "
"with backend reported dt value and also it requires the channel mapping from "
"the backend to build cross resonance pulse schedule. "
"Please provide valid backend object supporting 2Q pulse gate."
)
return self._pulse_gate_circuits()
return self._unitary_circuits()
def _pulse_gate_circuits(self):
"""Protocol to create circuits with pulse gate.
Pulse gate has backend timing constraints and duration should be in units of dt.
This method calls :meth:`_build_default_schedule` to generate actual schedule.
We assume backend has been set in this method call.
"""
schedule = self._build_default_schedule()
# Assume this parameter is in units of dt, because this controls pulse samples.
param_duration = next(iter(schedule.get_parameters("duration")))
# Gate duration will be shown in sec, which is more intuitive.
cr_gate = self._gate_cls(width=self._get_width(self._backend_timing.dt * param_duration))
# Create parameterized circuits with calibration.
tmp_circs = []
for control_state in (0, 1):
for meas_basis in ("x", "y", "z"):
tmp_qc = QuantumCircuit(2, 1)
if control_state:
tmp_qc.x(0)
tmp_qc.compose(
other=self._build_cr_circuit(cr_gate),
qubits=[0, 1],
inplace=True,
)
if meas_basis == "x":
tmp_qc.rz(np.pi / 2, 1)
if meas_basis in ("x", "y"):
tmp_qc.sx(1)
tmp_qc.measure(1, 0)
tmp_qc.metadata = {
"control_state": control_state,
"meas_basis": meas_basis,
}
tmp_qc.add_calibration(cr_gate, self.physical_qubits, schedule)
tmp_circs.append(tmp_qc)
circs = []
for duration in self._get_durations():
# Need to round pulse to satisfy hardware timing constraints.
# Convert into samples for assignment and validation.
valid_duration_dt = self._backend_timing.round_pulse(time=duration)
# Convert into sec to pass xval to analysis.
# Analysis expects xval of flat top widths in units of sec.
flat_top_width_sec = self._get_width(self._backend_timing.dt * valid_duration_dt)
if flat_top_width_sec < 0:
raise ValueError(
f"Input duration={duration} is less than pulse ramps lengths, resulting in "
f"a negative flat top length of {flat_top_width_sec} sec. "
f"This cross resonance schedule is invalid."
)
for circ in tmp_circs:
# Assign duration in dt to create pulse schedule.
assigned_circ = circ.assign_parameters(
{param_duration: valid_duration_dt},
inplace=False,
)
assigned_circ.metadata["xval"] = self.num_pulses * flat_top_width_sec
circs.append(assigned_circ)
return circs
def _unitary_circuits(self):
"""Protocol to create circuits with unitary gate.
Unitary gate has no timing constraints and accepts duration in sec.
Basically, this method doesn't require backend apart from conversion of
sigma in samples into sec.
"""
# Assume this parameter is in units of sec.
param_duration = circuit.Parameter("duration")
# Gate duration will be shown in sec, which is more intuitive.
cr_gate = self._gate_cls(width=self._get_width(param_duration))
# Create parameterized circuits without calibration.
tmp_circs = []
for control_state in (0, 1):
for meas_basis in ("x", "y", "z"):
tmp_qc = QuantumCircuit(2, 1)
if control_state:
tmp_qc.x(0)
tmp_qc.compose(
other=self._build_cr_circuit(cr_gate),
qubits=[0, 1],
inplace=True,
)
if meas_basis == "x":
tmp_qc.rz(np.pi / 2, 1)
if meas_basis in ("x", "y"):
tmp_qc.sx(1)
tmp_qc.measure(1, 0)
tmp_qc.metadata = {
"control_state": control_state,
"meas_basis": meas_basis,
}
tmp_circs.append(tmp_qc)
circs = []
for duration in self._get_durations():
flat_top_width_sec = self._get_width(duration)
if flat_top_width_sec < 0:
raise ValueError(
f"Input duration={duration} is less than pulse ramps lengths, resulting in "
f"a negative flat top length of {flat_top_width_sec} sec. "
f"This cross resonance schedule is invalid."
)
for circ in tmp_circs:
# Assign duration in sec since this is unitary gate.
assigned_circ = circ.assign_parameters(
{param_duration: duration},
inplace=False,
)
assigned_circ.metadata["xval"] = self.num_pulses * flat_top_width_sec
circs.append(assigned_circ)
return circs
def _finalize(self):
"""Set analysis option for initial guess that depends on experiment option values."""
edge_duration = np.sqrt(2 * np.pi) * self.experiment_options.sigma * self.num_pulses
for analysis in self.analysis.analyses():
init_guess = analysis.options.p0.copy()
if "t_off" in init_guess:
continue
init_guess["t_off"] = self._get_dt() * edge_duration
analysis.set_options(p0=init_guess)
def _metadata(self):
metadata = super()._metadata()
# Store measurement level and meas return if they have been
# set for the experiment
for run_opt in ["meas_level", "meas_return"]:
if hasattr(self.run_options, run_opt):
metadata[run_opt] = getattr(self.run_options, run_opt)
return metadata
class EchoedCrossResonanceHamiltonian(CrossResonanceHamiltonian):
r"""Echoed cross resonance Hamiltonian tomography experiment.
# section: overview
This is a variant of :class:`CrossResonanceHamiltonian`
for which the experiment framework is identical but the
cross resonance operation is realized as an echoed sequence
to remove unwanted single qubit rotations. The cross resonance
circuit looks like:
.. parsed-literal::
ββββββββββββββββββββββ βββββ ββββββββββββββββββββββ
q_0: β€0 ββββ€ X ββββ€0 βββββββββββ
β cr_tone(duration) ββββ΄ββββ΄βββ cr_tone(duration) βββββββββββ
q_1: β€1 ββ€ Rz(Ο) ββ€1 ββ€ Rz(-Ο) β
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Here two ``cr_tone`` are applied, where the latter one is with the
control qubit state flipped and with a phase flip of the target qubit frame.
This operation is equivalent to applying the ``cr_tone`` with a negative amplitude.
The Hamiltonian for this decomposition has no IX and ZI interactions,
and also a reduced IY interaction to some extent (not completely eliminated) [1].
Note that the CR Hamiltonian tomography experiment cannot detect the ZI term.
However, it is sensitive to the IX and IY terms.
# section: reference
.. ref_arxiv:: 1 2007.02925
"""
num_pulses = 2
def _build_cr_circuit(self, pulse_gate: circuit.Gate) -> QuantumCircuit:
"""Single tone cross resonance.
Args:
pulse_gate: A pulse gate to represent a single cross resonance pulse.
Returns:
A circuit definition for the cross resonance pulse to measure.
"""
cr_circuit = QuantumCircuit(2)
cr_circuit.append(pulse_gate, [0, 1])
cr_circuit.x(0)
cr_circuit.rz(np.pi, 1)
cr_circuit.append(pulse_gate, [0, 1])
cr_circuit.rz(-np.pi, 1)
return cr_circuit
|
qiskit-experiments/qiskit_experiments/library/characterization/cr_hamiltonian.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/library/characterization/cr_hamiltonian.py",
"repo_id": "qiskit-experiments",
"token_count": 9747
}
| 107 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
T2Hahn Echo Experiment class.
"""
from typing import List, Optional, Union, Sequence
import numpy as np
from qiskit import QuantumCircuit, QiskitError
from qiskit.circuit import Parameter
from qiskit.providers.backend import Backend
from qiskit_experiments.framework import BackendTiming, BaseExperiment, Options
from qiskit_experiments.library.characterization.analysis.t2hahn_analysis import T2HahnAnalysis
class T2Hahn(BaseExperiment):
r"""An experiment to measure the dephasing time insensitive to inhomogeneous
broadening using Hahn echos.
# section: overview
This experiment is used to estimate the :math:`T_2` time of a single qubit.
:math:`T_2` is the dephasing time or the transverse relaxation time of the qubit
on the Bloch sphere as a result of both energy relaxation and pure dephasing in
the transverse plane. Unlike :math:`T_2^*`, which is measured by
:class:`.T2Ramsey`, :math:`T_2` is insensitive to inhomogenous broadening.
This experiment consists of a series of circuits of the form
.. parsed-literal::
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
q_0: β€ Rx(Ο/2) ββ€ DELAY(t) ββ€ RX(Ο) ββ€ DELAY(t) ββ€ RX(Ο/2) ββ€Mβ
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ₯β
c: 1/βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ©β
0
for each *t* from the specified delay times
and the delays are specified by the user.
The delays that are specified are delay for each delay gate while
the delay in the metadata is the total delay which is delay * (num_echoes +1)
The circuits are run on the device or on a simulator backend.
# section: manual
:doc:`/manuals/characterization/t2hahn`
# section: analysis_ref
:class:`T2HahnAnalysis`
# section: example
.. jupyter-execute::
:hide-code:
# backend
from qiskit_experiments.test.t2hahn_backend import T2HahnBackend
conversion_factor = 1e-6
estimated_t2hahn = 20*conversion_factor
backend = T2HahnBackend(
t2hahn=[estimated_t2hahn],
frequency=[100100],
readout0to1 = [0.02],
readout1to0 = [0.02],
)
.. jupyter-execute::
import numpy as np
from qiskit_experiments.library.characterization.t2hahn import T2Hahn
delays = np.linspace(0, 50, 51)*1e-6
exp = T2Hahn(physical_qubits=(0, ),
delays=delays,
backend=backend)
exp.analysis.set_options(p0=None, plot=True)
exp_data = exp.run().block_for_results()
display(exp_data.figure(0))
exp_data.analysis_results(dataframe=True)
# section: reference
.. ref_arxiv:: 1 1904.06560
"""
@classmethod
def _default_experiment_options(cls) -> Options:
"""Default experiment options.
Experiment Options:
delays (Iterable[float]): Delay times of the experiments.
num_echoes (int): The number of echoes to preform.
"""
options = super()._default_experiment_options()
options.delays = None
options.num_echoes = 1
return options
def __init__(
self,
physical_qubits: Sequence[int],
delays: Union[List[float], np.array],
num_echoes: int = 1,
backend: Optional[Backend] = None,
):
"""
Initialize the T2 - Hahn Echo class.
Args:
physical_qubits: a single-element sequence containing the qubit whose T2 is to be
estimated.
delays: Total delay times of the experiments.
backend: Optional, the backend to run the experiment on.
num_echoes: The number of echoes to preform.
backend: Optional, the backend to run the experiment on.
Raises:
QiskitError : Error for invalid input.
"""
# Initialize base experiment
super().__init__(physical_qubits, analysis=T2HahnAnalysis(), backend=backend)
# Set experiment options
self.set_experiment_options(delays=delays, num_echoes=num_echoes)
self._verify_parameters()
def _verify_parameters(self):
"""
Verify input correctness, raise QiskitError if needed.
Raises:
QiskitError : Error for invalid input.
"""
if any(delay < 0 for delay in self.experiment_options.delays):
raise QiskitError(
f"The lengths list {self.experiment_options.delays} should only contain "
"non-negative elements."
)
def circuits(self) -> List[QuantumCircuit]:
"""
Return a list of experiment circuits.
Each circuit consists of RX(Ο/2) followed by a sequence of delay gate,
RX(Ο) for echo and delay gate again.
The sequence repeats for the number of echoes and terminates with RX(Β±Ο/2).
Returns:
The experiment circuits.
"""
timing = BackendTiming(self.backend)
delay_param = Parameter("delay")
num_echoes = self.experiment_options.num_echoes
# First X rotation in 90 degrees
template = QuantumCircuit(1, 1)
template.rx(np.pi / 2, 0) # Brings the qubit to the X Axis
if num_echoes == 0:
# if number of echoes is 0 then just apply the delay gate
template.delay(delay_param, 0, timing.delay_unit)
else:
for _ in range(num_echoes):
template.delay(delay_param, 0, timing.delay_unit)
template.rx(np.pi, 0)
template.delay(delay_param, 0, timing.delay_unit)
if num_echoes % 2 == 1:
template.rx(np.pi / 2, 0) # X90 again since the num of echoes is odd
else:
template.rx(-np.pi / 2, 0) # X(-90) again since the num of echoes is even
template.measure(0, 0) # measure
circuits = []
for delay in self.experiment_options.delays:
if num_echoes == 0:
single_delay = timing.delay_time(time=delay)
total_delay = single_delay
else:
# Equal delay is put before and after each echo, so each echo gets
# two delay gates. When there are multiple echoes, the total delay
# between echoes is 2 * single_delay, made up of two delay gates.
single_delay = timing.delay_time(time=delay / num_echoes / 2)
total_delay = single_delay * num_echoes * 2
assigned = template.assign_parameters(
{delay_param: timing.round_delay(time=single_delay)}, inplace=False
)
assigned.metadata = {"xval": total_delay}
circuits.append(assigned)
return circuits
def _metadata(self):
metadata = super()._metadata()
# Store measurement level and meas return if they have been
# set for the experiment
for run_opt in ["meas_level", "meas_return"]:
if hasattr(self.run_options, run_opt):
metadata[run_opt] = getattr(self.run_options, run_opt)
return metadata
|
qiskit-experiments/qiskit_experiments/library/characterization/t2hahn.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/library/characterization/t2hahn.py",
"repo_id": "qiskit-experiments",
"token_count": 3486
}
| 108 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Utilities for using the Clifford group in randomized benchmarking.
"""
import itertools
import os
from functools import lru_cache
from numbers import Integral
from typing import Optional, Union, Tuple, Sequence, Iterable
import numpy as np
from qiskit.circuit import CircuitInstruction, Qubit
from qiskit.circuit import Gate, Instruction
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.circuit.library import SdgGate, HGate, SGate, XGate, YGate, ZGate
from qiskit.compiler import transpile
from qiskit.exceptions import QiskitError
from qiskit.quantum_info import Clifford
from qiskit.transpiler import CouplingMap, PassManager
from qiskit.transpiler.passes.synthesis.high_level_synthesis import HLSConfig, HighLevelSynthesis
DEFAULT_SYNTHESIS_METHOD = "rb_default"
_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "data")
_CLIFFORD_COMPOSE_1Q = np.load(f"{_DATA_FOLDER}/clifford_compose_1q.npz")["table"]
_CLIFFORD_INVERSE_1Q = np.load(f"{_DATA_FOLDER}/clifford_inverse_1q.npz")["table"]
_CLIFFORD_INVERSE_2Q = np.load(f"{_DATA_FOLDER}/clifford_inverse_2q.npz")["table"]
_clifford_compose_2q_data = np.load(f"{_DATA_FOLDER}/clifford_compose_2q_dense_selected.npz")
_CLIFFORD_COMPOSE_2Q_DENSE = _clifford_compose_2q_data["table"]
# valid indices for the columns of the _CLIFFORD_COMPOSE_2Q_DENSE table
_valid_sparse_indices = _clifford_compose_2q_data["valid_sparse_indices"]
# map a clifford number to the index of _CLIFFORD_COMPOSE_2Q_DENSE
_clifford_num_to_dense_index = {idx: ii for ii, idx in enumerate(_valid_sparse_indices)}
_CLIFFORD_TENSOR_1Q = np.load(f"{_DATA_FOLDER}/clifford_tensor_1q.npz")["table"]
# Transpilation utilities
def _transpile_clifford_circuit(
circuit: QuantumCircuit, physical_qubits: Sequence[int]
) -> QuantumCircuit:
# Simplified transpile that only decomposes Clifford circuits and creates the layout.
return _apply_qubit_layout(_decompose_clifford_ops(circuit), physical_qubits=physical_qubits)
def _decompose_clifford_ops(circuit: QuantumCircuit) -> QuantumCircuit:
# Simplified QuantumCircuit.decompose, which decomposes only Clifford ops
res = circuit.copy_empty_like()
if hasattr(circuit, "_parameter_table"):
res._parameter_table = circuit._parameter_table
for inst in circuit:
if inst.operation.name.startswith("Clifford"): # Decompose
rule = inst.operation.definition.data
if len(rule) == 1 and len(inst.qubits) == len(rule[0].qubits):
if inst.operation.definition.global_phase:
res.global_phase += inst.operation.definition.global_phase
res._data.append(
CircuitInstruction(
operation=rule[0].operation,
qubits=inst.qubits,
clbits=inst.clbits,
)
)
else:
_circuit_compose(res, inst.operation.definition, qubits=inst.qubits)
else: # Keep the original instruction
res._data.append(inst)
return res
def _apply_qubit_layout(circuit: QuantumCircuit, physical_qubits: Sequence[int]) -> QuantumCircuit:
# Mapping qubits in circuit to physical qubits (layout)
res = QuantumCircuit(1 + max(physical_qubits), name=circuit.name, metadata=circuit.metadata)
res.add_bits(circuit.clbits)
for reg in circuit.cregs:
res.add_register(reg)
_circuit_compose(res, circuit, qubits=physical_qubits)
if hasattr(circuit, "_parameter_table"):
res._parameter_table = circuit._parameter_table
return res
def _circuit_compose(
self: QuantumCircuit, other: QuantumCircuit, qubits: Sequence[Union[Qubit, int]]
) -> QuantumCircuit:
# Simplified QuantumCircuit.compose with clbits=None, front=False, inplace=True, wrap=False
# without any validation, parameter_table/calibrations updates and copy of operations
# The input circuit `self` is changed inplace.
qubit_map = {
other.qubits[i]: (self.qubits[q] if isinstance(q, int) else q) for i, q in enumerate(qubits)
}
for instr in other:
self._data.append(
CircuitInstruction(
operation=instr.operation,
qubits=[qubit_map[q] for q in instr.qubits],
clbits=instr.clbits,
),
)
self.global_phase += other.global_phase
return self
def _synthesize_clifford(
clifford: Clifford,
basis_gates: Optional[Tuple[str]],
coupling_tuple: Optional[Tuple[Tuple[int, int]]] = None,
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
) -> QuantumCircuit:
"""Synthesize a circuit of a Clifford element. The resulting circuit contains only
``basis_gates`` and it complies with ``coupling_tuple``.
Args:
clifford: Clifford element to be converted
basis_gates: basis gates to use in the conversion
coupling_tuple: coupling map to use in the conversion in the form of tuple of edges
synthesis_method: conversion algorithm name
Returns:
Synthesized circuit
"""
qc = QuantumCircuit(clifford.num_qubits, name=str(clifford))
qc.append(clifford, qc.qubits)
return _synthesize_clifford_circuit(
qc,
basis_gates=basis_gates,
coupling_tuple=coupling_tuple,
synthesis_method=synthesis_method,
)
def _synthesize_clifford_circuit(
circuit: QuantumCircuit,
basis_gates: Optional[Tuple[str]],
coupling_tuple: Optional[Tuple[Tuple[int, int]]] = None,
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
) -> QuantumCircuit:
"""Convert a Clifford circuit into one composed of ``basis_gates`` with
satisfying ``coupling_tuple`` using the specified synthesis method.
Args:
circuit: Clifford circuit to be converted
basis_gates: basis gates to use in the conversion
coupling_tuple: coupling map to use in the conversion in the form of tuple of edges
synthesis_method: name of Clifford synthesis algorithm to use
Returns:
Synthesized circuit
"""
if basis_gates:
basis_gates = list(basis_gates)
coupling_map = CouplingMap(coupling_tuple) if coupling_tuple else None
# special handling for 1q or 2q case for speed
if circuit.num_qubits <= 2:
if synthesis_method == DEFAULT_SYNTHESIS_METHOD:
return transpile(
circuit,
basis_gates=basis_gates,
coupling_map=coupling_map,
optimization_level=1,
)
else:
# Provided custom synthesis method, re-synthesize Clifford circuit
# convert the circuit back to a Clifford object and then call the synthesis plugin
new_circuit = QuantumCircuit(circuit.num_qubits, name=circuit.name)
new_circuit.append(Clifford(circuit), new_circuit.qubits)
circuit = new_circuit
# for 3q+ or custom synthesis method, synthesizes clifford circuit
hls_config = HLSConfig(clifford=[(synthesis_method, {"basis_gates": basis_gates})])
pm = PassManager([HighLevelSynthesis(hls_config=hls_config, coupling_map=coupling_map)])
circuit = pm.run(circuit)
return circuit
@lru_cache(maxsize=256)
def _clifford_1q_int_to_instruction(
num: Integral,
basis_gates: Optional[Tuple[str]],
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
) -> Instruction:
return CliffordUtils.clifford_1_qubit_circuit(
num, basis_gates=basis_gates, synthesis_method=synthesis_method
).to_instruction()
@lru_cache(maxsize=11520)
def _clifford_2q_int_to_instruction(
num: Integral,
basis_gates: Optional[Tuple[str]],
coupling_tuple: Optional[Tuple[Tuple[int, int]]],
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
) -> Instruction:
return CliffordUtils.clifford_2_qubit_circuit(
num,
basis_gates=basis_gates,
coupling_tuple=coupling_tuple,
synthesis_method=synthesis_method,
).to_instruction()
def _hash_cliff(cliff):
return cliff.tableau.tobytes(), cliff.tableau.shape
def _dehash_cliff(cliff_hash):
tableau = np.frombuffer(cliff_hash[0], dtype=bool).reshape(cliff_hash[1])
return Clifford(tableau)
def _clifford_to_instruction(
clifford: Clifford,
basis_gates: Optional[Tuple[str]],
coupling_tuple: Optional[Tuple[Tuple[int, int]]],
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
) -> Instruction:
return _cached_clifford_to_instruction(
_hash_cliff(clifford),
basis_gates=basis_gates,
coupling_tuple=coupling_tuple,
synthesis_method=synthesis_method,
)
@lru_cache(maxsize=256)
def _cached_clifford_to_instruction(
cliff_hash: Tuple[str, Tuple[int, int]],
basis_gates: Optional[Tuple[str]],
coupling_tuple: Optional[Tuple[Tuple[int, int]]],
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
) -> Instruction:
return _synthesize_clifford(
_dehash_cliff(cliff_hash),
basis_gates=basis_gates,
coupling_tuple=coupling_tuple,
synthesis_method=synthesis_method,
).to_instruction()
# The classes VGate and WGate are not actually used in the code - we leave them here to give
# a better understanding of the composition of the layers for 2-qubit Cliffords.
class VGate(Gate):
"""V Gate used in Clifford synthesis."""
def __init__(self):
"""Create new V Gate."""
super().__init__("v", 1, [])
def _define(self):
"""V Gate definition."""
q = QuantumRegister(1, "q")
qc = QuantumCircuit(q)
qc.data = [(SdgGate(), [q[0]], []), (HGate(), [q[0]], [])]
self.definition = qc
class WGate(Gate):
"""W Gate used in Clifford synthesis."""
def __init__(self):
"""Create new W Gate."""
super().__init__("w", 1, [])
def _define(self):
"""W Gate definition."""
q = QuantumRegister(1, "q")
qc = QuantumCircuit(q)
qc.data = [(HGate(), [q[0]], []), (SGate(), [q[0]], [])]
self.definition = qc
class CliffordUtils:
"""Utilities for generating one- and two-qubit Clifford circuits and elements."""
NUM_CLIFFORD_1_QUBIT = 24
NUM_CLIFFORD_2_QUBIT = 11520
CLIFFORD_1_QUBIT_SIG = (2, 3, 4)
CLIFFORD_2_QUBIT_SIGS = [ # TODO: deprecate
(2, 2, 3, 3, 4, 4),
(2, 2, 3, 3, 3, 3, 4, 4),
(2, 2, 3, 3, 3, 3, 4, 4),
(2, 2, 3, 3, 4, 4),
]
@classmethod
@lru_cache(maxsize=24)
def clifford_1_qubit(cls, num):
"""Return the 1-qubit clifford element corresponding to `num`
where `num` is between 0 and 23.
"""
return Clifford(cls.clifford_1_qubit_circuit(num), validate=False)
@classmethod
@lru_cache(maxsize=11520)
def clifford_2_qubit(cls, num):
"""Return the 2-qubit clifford element corresponding to ``num``,
where ``num`` is between 0 and 11519.
"""
return Clifford(cls.clifford_2_qubit_circuit(num), validate=False)
@classmethod
@lru_cache(maxsize=24)
def clifford_1_qubit_circuit(
cls,
num,
basis_gates: Optional[Tuple[str, ...]] = None,
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
):
"""Return the 1-qubit clifford circuit corresponding to ``num``,
where ``num`` is between 0 and 23.
"""
unpacked = cls._unpack_num(num, cls.CLIFFORD_1_QUBIT_SIG)
i, j, p = unpacked[0], unpacked[1], unpacked[2]
qc = QuantumCircuit(1, name=f"Clifford-1Q({num})")
if i == 1:
qc.h(0)
if j == 1:
qc.sxdg(0)
if j == 2:
qc.s(0)
if p == 1:
qc.x(0)
if p == 2:
qc.y(0)
if p == 3:
qc.z(0)
if basis_gates:
qc = _synthesize_clifford_circuit(qc, basis_gates, synthesis_method=synthesis_method)
return qc
@classmethod
@lru_cache(maxsize=11520)
def clifford_2_qubit_circuit(
cls,
num,
basis_gates: Optional[Tuple[str, ...]] = None,
coupling_tuple: Optional[Tuple[Tuple[int, int]]] = None,
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
):
"""Return the 2-qubit clifford circuit corresponding to `num`
where `num` is between 0 and 11519.
"""
qc = QuantumCircuit(2, name=f"Clifford-2Q({num})")
for layer, idx in enumerate(_layer_indices_from_num(num)):
if basis_gates:
layer_circ = _transformed_clifford_layer(
layer, idx, basis_gates, coupling_tuple, synthesis_method=synthesis_method
)
else:
layer_circ = _CLIFFORD_LAYER[layer][idx]
_circuit_compose(qc, layer_circ, qubits=(0, 1))
return qc
@staticmethod
def _unpack_num(num, sig):
r"""Returns a tuple :math:`(a_1, \ldots, a_n)` where
:math:`0 \le a_i \le \sigma_i` where
sig=:math:`(\sigma_1, \ldots, \sigma_n)` and num is the sequential
number of the tuple
"""
res = []
for k in sig:
res.append(num % k)
num //= k
return res
# Constant mapping from 1Q single Clifford gate to 1Q Clifford numerical identifier.
# This table must be generated using `data.generate_clifford_data.gen_cliff_single_1q_gate_map`, or,
# equivalently, correspond to the ordering implicitly defined by CliffUtils.clifford_1_qubit_circuit.
_CLIFF_SINGLE_GATE_MAP_1Q = {
("id", (0,)): 0,
("h", (0,)): 1,
("sxdg", (0,)): 2,
("s", (0,)): 4,
("x", (0,)): 6,
("sx", (0,)): 8,
("y", (0,)): 12,
("z", (0,)): 18,
("sdg", (0,)): 22,
}
# Constant mapping from 2Q single Clifford gate to 2Q Clifford numerical identifier.
# This table must be generated using `data.generate_clifford_data.gen_cliff_single_2q_gate_map`, or,
# equivalently, correspond to the ordering defined by _layer_indices_from_num and _CLIFFORD_LAYER.
_CLIFF_SINGLE_GATE_MAP_2Q = {
("id", (0,)): 0,
("id", (1,)): 0,
("h", (0,)): 5760,
("h", (1,)): 2880,
("sxdg", (0,)): 6720,
("sxdg", (1,)): 3200,
("s", (0,)): 7680,
("s", (1,)): 3520,
("x", (0,)): 4,
("x", (1,)): 1,
("sx", (0,)): 6724,
("sx", (1,)): 3201,
("y", (0,)): 8,
("y", (1,)): 2,
("z", (0,)): 12,
("z", (1,)): 3,
("sdg", (0,)): 7692,
("sdg", (1,)): 3523,
("cx", (0, 1)): 16,
("cx", (1, 0)): 2336,
("cz", (0, 1)): 368,
("cz", (1, 0)): 368,
}
########
# Functions for 1-qubit integer Clifford operations
def compose_1q(lhs: Integral, rhs: Integral) -> Integral:
"""Return the composition of 1-qubit clifford integers."""
return _CLIFFORD_COMPOSE_1Q[lhs, rhs]
def inverse_1q(num: Integral) -> Integral:
"""Return the inverse of a 1-qubit clifford integer."""
return _CLIFFORD_INVERSE_1Q[num]
def num_from_1q_circuit(qc: QuantumCircuit) -> Integral:
"""Convert a given 1-qubit Clifford circuit to the corresponding integer.
Note: The circuit must consist of gates in :const:`_CLIFF_SINGLE_GATE_MAP_1Q`,
RZGate, Delay and Barrier.
"""
num = 0
for inst in qc:
rhs = _num_from_1q_gate(op=inst.operation)
num = _CLIFFORD_COMPOSE_1Q[num, rhs]
return num
def _num_from_1q_gate(op: Instruction) -> int:
"""
Convert a given 1-qubit clifford operation to the corresponding integer.
Note that supported operations are limited to ones in :const:`_CLIFF_SINGLE_GATE_MAP_1Q` or Rz gate.
Args:
op: operation to be converted.
Returns:
An integer representing a Clifford consisting of a single operation.
Raises:
QiskitError: If the input instruction is not a Clifford instruction.
QiskitError: If rz is given with a angle that is not Clifford.
"""
if op.name in {"delay", "barrier"}:
return 0
try:
name = _deparameterized_name(op)
return _CLIFF_SINGLE_GATE_MAP_1Q[(name, (0,))]
except QiskitError as err:
raise QiskitError(
f"Parameterized instruction {op.name} could not be converted to integer Clifford"
) from err
except KeyError as err:
raise QiskitError(
f"Instruction {op.name} could not be converted to integer Clifford"
) from err
def _deparameterized_name(inst: Instruction) -> str:
if inst.name == "rz":
if np.isclose(inst.params[0], np.pi) or np.isclose(inst.params[0], -np.pi):
return "z"
elif np.isclose(inst.params[0], np.pi / 2):
return "s"
elif np.isclose(inst.params[0], -np.pi / 2):
return "sdg"
else:
raise QiskitError(f"Wrong param {inst.params[0]} for rz in clifford")
return inst.name
########
# Functions for 2-qubit integer Clifford operations
def compose_2q(lhs: Integral, rhs: Integral) -> Integral:
"""Return the composition of 2-qubit clifford integers."""
num = lhs
for layer, idx in enumerate(_layer_indices_from_num(rhs)):
gate_numbers = _CLIFFORD_LAYER_NUMS[layer][idx]
for n in gate_numbers:
num = _CLIFFORD_COMPOSE_2Q_DENSE[num, _clifford_num_to_dense_index[n]]
return num
def inverse_2q(num: Integral) -> Integral:
"""Return the inverse of a 2-qubit clifford integer."""
return _CLIFFORD_INVERSE_2Q[num]
def num_from_2q_circuit(qc: QuantumCircuit) -> Integral:
"""Convert a given 2-qubit Clifford circuit to the corresponding integer.
Note: The circuit must consist of gates in :const:`_CLIFF_SINGLE_GATE_MAP_2Q`,
RZGate, Delay and Barrier.
"""
lhs = 0
for rhs in _clifford_2q_nums_from_2q_circuit(qc):
lhs = _CLIFFORD_COMPOSE_2Q_DENSE[lhs, _clifford_num_to_dense_index[rhs]]
return lhs
def _num_from_2q_gate(
op: Instruction, qubits: Optional[Union[Tuple[int, int], Tuple[int]]] = None
) -> int:
"""
Convert a given 1-qubit clifford operation to the corresponding integer.
Note that supported operations are limited to ones in `_CLIFF_SINGLE_GATE_MAP_2Q` or Rz gate.
Args:
op: operation of instruction to be converted.
qubits: qubits to which the operation applies
Returns:
An integer representing a Clifford consisting of a single operation.
Raises:
QiskitError: If the input instruction is not a Clifford instruction.
QiskitError: If rz is given with a angle that is not Clifford.
"""
if op.name in {"delay", "barrier"}:
return 0
qubits = qubits or (0, 1)
try:
name = _deparameterized_name(op)
return _CLIFF_SINGLE_GATE_MAP_2Q[(name, qubits)]
except QiskitError as err:
raise QiskitError(
f"Parameterized instruction {op.name} could not be converted to integer Clifford"
) from err
except KeyError as err:
raise QiskitError(
f"Instruction {op.name} on {qubits} could not be converted to integer Clifford"
) from err
def _append_v_w(qc, vw0, vw1):
if vw0 == "v":
qc.sdg(0)
qc.h(0)
elif vw0 == "w":
qc.h(0)
qc.s(0)
if vw1 == "v":
qc.sdg(1)
qc.h(1)
elif vw1 == "w":
qc.h(1)
qc.s(1)
def _create_cliff_2q_layer_0():
"""Layer 0 consists of 0 or 1 H gates on each qubit, followed by 0/1/2 V gates on each qubit.
Number of Cliffords == 36."""
circuits = []
num_h = [0, 1]
v_w_gates = ["i", "v", "w"]
for h0, h1, v0, v1 in itertools.product(num_h, num_h, v_w_gates, v_w_gates):
qc = QuantumCircuit(2)
for _ in range(h0):
qc.h(0)
for _ in range(h1):
qc.h(1)
_append_v_w(qc, v0, v1)
circuits.append(qc)
return circuits
def _create_cliff_2q_layer_1():
"""Layer 1 consists of one of the following:
- nothing
- cx(0,1) followed by 0/1/2 V gates on each qubit
- cx(0,1), cx(1,0) followed by 0/1/2 V gates on each qubit
- cx(0,1), cx(1,0), cx(0,1)
Number of Cliffords == 20."""
circuits = [QuantumCircuit(2)] # identity at the beginning
v_w_gates = ["i", "v", "w"]
for v0, v1 in itertools.product(v_w_gates, v_w_gates):
qc = QuantumCircuit(2)
qc.cx(0, 1)
_append_v_w(qc, v0, v1)
circuits.append(qc)
for v0, v1 in itertools.product(v_w_gates, v_w_gates):
qc = QuantumCircuit(2)
qc.cx(0, 1)
qc.cx(1, 0)
_append_v_w(qc, v0, v1)
circuits.append(qc)
qc = QuantumCircuit(2) # swap at the end
qc.cx(0, 1)
qc.cx(1, 0)
qc.cx(0, 1)
circuits.append(qc)
return circuits
def _create_cliff_2q_layer_2():
"""Layer 2 consists of a Pauli gate on each qubit {Id, X, Y, Z}.
Number of Cliffords == 16."""
circuits = []
pauli = ("i", XGate(), YGate(), ZGate())
for p0, p1 in itertools.product(pauli, pauli):
qc = QuantumCircuit(2)
if p0 != "i":
qc.append(p0, [0])
if p1 != "i":
qc.append(p1, [1])
circuits.append(qc)
return circuits
_CLIFFORD_LAYER = (
_create_cliff_2q_layer_0(),
_create_cliff_2q_layer_1(),
_create_cliff_2q_layer_2(),
)
_NUM_LAYER_1 = 20
_NUM_LAYER_2 = 16
def _clifford_2q_nums_from_2q_circuit(qc: QuantumCircuit) -> Iterable[Integral]:
"""Yield Clifford numbers that represents the 2Q Clifford circuit."""
for inst in qc:
qubits = tuple(qc.find_bit(q).index for q in inst.qubits)
yield _num_from_2q_gate(op=inst.operation, qubits=qubits)
# Construct mapping from Clifford layers to series of Clifford numbers
_CLIFFORD_LAYER_NUMS = [
[tuple(_clifford_2q_nums_from_2q_circuit(qc)) for qc in _CLIFFORD_LAYER[layer]]
for layer in [0, 1, 2]
]
@lru_cache(maxsize=256)
def _transformed_clifford_layer(
layer: int,
index: Integral,
basis_gates: Tuple[str, ...],
coupling_tuple: Optional[Tuple[Tuple[int, int]]],
synthesis_method: str = DEFAULT_SYNTHESIS_METHOD,
) -> QuantumCircuit:
# Return the index-th quantum circuit of the layer translated with the basis_gates.
# The result is cached for speed.
return _synthesize_clifford_circuit(
_CLIFFORD_LAYER[layer][index],
basis_gates=basis_gates,
coupling_tuple=coupling_tuple,
synthesis_method=synthesis_method,
)
def _num_from_layer_indices(triplet: Tuple[Integral, Integral, Integral]) -> Integral:
"""Return the clifford number corresponding to the input triplet."""
num = triplet[0] * _NUM_LAYER_1 * _NUM_LAYER_2 + triplet[1] * _NUM_LAYER_2 + triplet[2]
return num
def _layer_indices_from_num(num: Integral) -> Tuple[Integral, Integral, Integral]:
"""Return the triplet of layer indices corresponding to the input number."""
idx2 = num % _NUM_LAYER_2
num = num // _NUM_LAYER_2
idx1 = num % _NUM_LAYER_1
idx0 = num // _NUM_LAYER_1
return idx0, idx1, idx2
def _tensor_1q_nums(first: Integral, second: Integral) -> Integral:
"""Return the 2-qubit Clifford integer that is the tensor product of 1-qubit Cliffords."""
return _CLIFFORD_TENSOR_1Q[first, second]
|
qiskit-experiments/qiskit_experiments/library/randomized_benchmarking/clifford_utils.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/library/randomized_benchmarking/clifford_utils.py",
"repo_id": "qiskit-experiments",
"token_count": 10460
}
| 109 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Fitter basis classes for tomography analysis.
"""
from abc import ABC, abstractmethod
from typing import Sequence, Tuple, Optional
import numpy as np
from qiskit import QuantumCircuit
class BaseBasis(ABC):
"""Abstract base class for a measurement and preparation bases."""
def __init__(self, name: str):
"""Initialize a basis.
Args:
name: the name for the basis.
"""
self._name = name
def __hash__(self):
return hash((type(self), self._name))
def __eq__(self, value):
tup1 = (type(self), self.name)
tup2 = (type(value), getattr(value, "name", None))
return tup1 == tup2
@property
def name(self) -> str:
"""Return the basis name"""
return self._name
@abstractmethod
def index_shape(self, qubits: Sequence[int]) -> Tuple[int, ...]:
"""Return the shape for the specified number of indices.
Args:
qubits: the basis subsystems to return the index shape for.
Returns:
The shape of allowed values for the index on the specified qubits.
"""
@abstractmethod
def circuit(
self, index: Sequence[int], qubits: Optional[Sequence[int]] = None
) -> QuantumCircuit:
"""Return the basis preparation circuit.
Args:
index: a list of basis elements to tensor together.
qubits: Optional, the physical qubit subsystems for the index.
If None this will be set to ``(0, ..., N-1)`` for a
length N index.
Returns:
The logical basis circuit for the specified index and qubits.
.. note::
This returns a logical circuit on the specified number of qubits
and should be remapped to the corresponding physical qubits
during experiment transpilation.
"""
class PreparationBasis(BaseBasis):
"""Abstract base class for a tomography preparation basis.
Subclasses should implement the following abstract methods to
define a preparation basis:
* The :meth:`circuit` method which returns the logical preparation
:class:`~qiskit.circuit.QuantumCircuit` for basis element index on the specified
qubits. This circuit should be a logical circuit on the specified
number of qubits and will be remapped to the corresponding physical
qubits during transpilation.
* The :meth:`matrix` method which returns the density matrix prepared
by the bases element index on the specified qubits.
* The :meth:`index_shape` method which returns the shape of allowed
basis indices for the specified qubits, and their values.
* The :meth:`~.PreparationBasis.matrix_shape` method which returns the shape of subsystem
dimensions of the density matrix state on the specified qubits.
"""
@abstractmethod
def matrix_shape(self, qubits: Sequence[int]) -> Tuple[int, ...]:
"""Return the shape of subsystem dimensions of the state :attr:`~matrix`.
Args:
qubits: the physical qubit subsystems.
Returns:
A tuple of subsystem dimensions for the specified qubits.
"""
@abstractmethod
def matrix(self, index: Sequence[int], qubits: Optional[Sequence[int]] = None) -> np.ndarray:
"""Return the density matrix data array for the index and qubits.
This state is used by tomography fitters for reconstruction and should
correspond to the target state for the corresponding preparation
:meth:`circuit`.
Args:
index: a list of subsystem basis indices.
qubits: Optional, the physical qubit subsystems for the index.
If None this will be set to ``(0, ..., N-1)`` for a
length N index.
Returns:
The density matrix prepared by the specified index and qubits.
"""
class MeasurementBasis(BaseBasis):
"""Abstract base class for a tomography measurement basis.
Subclasses should implement the following abstract methods to
define a preparation basis:
* The :meth:`circuit` method which returns the logical measurement
:class:`~qiskit.circuit.QuantumCircuit` for basis element index on the specified
physical qubits. This circuit should be a logical circuit on the
specified number of qubits and will be remapped to the corresponding
physical qubits during transpilation. It should include classical
bits and the measure instructions for the basis measurement storing
the outcome value in these bits.
* The :meth:`matrix` method which returns the POVM element corresponding to the
basis element index and measurement outcome on the specified qubits. This should
return either a :class:`~qiskit.quantum_info.Statevector` for a PVM element, or
:class:`~qiskit.quantum_info.DensityMatrix` for a general POVM element.
* The :meth:`index_shape` method which returns the shape of allowed
basis indices for the specified qubits, and their values.
* The :meth:`~.PreparationBasis.matrix_shape` method which returns the shape of subsystem
dimensions of the POVM element matrices on the specified qubits.
* The :meth:`outcome_shape` method which returns the shape of allowed
outcome values for a measurement of specified qubits.
"""
@abstractmethod
def outcome_shape(self, qubits: Sequence[int]) -> Tuple[int, ...]:
"""Return the shape of allowed measurement outcomes on specified qubits.
Args:
qubits: the physical qubit subsystems.
Returns:
A tuple of the number of measurement outcomes for specified qubits.
"""
@abstractmethod
def matrix_shape(self, qubits: Sequence[int]) -> Tuple[int, ...]:
"""Return the shape of subsystem dimensions of a POVM :attr:`~matrix`.
Args:
qubits: the physical qubit subsystems.
Returns:
A tuple of subsystem dimensions for the specified qubits.
"""
@abstractmethod
def matrix(
self, index: Sequence[int], outcome: int, qubits: Optional[Sequence[int]] = None
) -> np.ndarray:
"""Return the POVM element for the basis index and outcome.
This POVM element is used by tomography fitters for reconstruction and
should correspond to the target measurement effect for the corresponding
measurement :meth:`circuit` and outcome.
Args:
index: a list of subsystem basis indices.
outcome: the composite system measurement outcome.
qubits: Optional, the physical qubit subsystems for the index.
If None this will be set to ``(0, ..., N-1)`` for a
length N index.
Returns:
The POVM matrix for the specified index and qubits.
"""
|
qiskit-experiments/qiskit_experiments/library/tomography/basis/base_basis.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/library/tomography/basis/base_basis.py",
"repo_id": "qiskit-experiments",
"token_count": 2600
}
| 110 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Quantum state tomography analysis
"""
from qiskit_experiments.framework import Options
from .basis import PauliMeasurementBasis
from .tomography_analysis import TomographyAnalysis
class StateTomographyAnalysis(TomographyAnalysis):
"""State tomography experiment analysis.
# section: overview
Fitter Functions
Built-in fitter functions may be selected using the following string
labels, refer to the corresponding functions documentation for additional
details on the fitters.
* ``"linear_inversion"``:
:func:`~qiskit_experiments.library.tomography.fitters.linear_inversion` (Default)
* ``"cvxpy_linear_lstsq"``:
:func:`~qiskit_experiments.library.tomography.fitters.cvxpy_linear_lstsq`
* ``"cvxpy_gaussian_lstsq"``:
:func:`~qiskit_experiments.library.tomography.fitters.cvxpy_gaussian_lstsq`
PSD Rescaling
For fitters that do not constrain the reconstructed state to be
`positive-semidefinite` (PSD) we construct the maximum-likelihood
nearest PSD state under the assumption of Gaussian measurement noise
using the rescaling method in Reference [1]. For fitters that already
support PSD constraints this option can be disabled by setting
``rescale_positive=False``.
# section: warning
The API for tomography fitters is still under development so may change
in future releases.
# section: note
Fitters starting with ``"cvxpy_*"`` require the optional CVXPY Python
package to be installed.
# section: reference
.. ref_arxiv:: 1 1106.5458
"""
@classmethod
def _default_options(cls) -> Options:
"""Default analysis options
Analysis Options:
measurement_basis (:class:`~qiskit_experiments.library.tomography.basis.MeasurementBasis`):
The measurement
:class:`~qiskit_experiments.library.tomography.basis.MeasurementBasis`
to use for tomographic state reconstruction.
fitter (str or Callable): The fitter function to use for reconstruction.
This can be a string to select one of the built-in fitters, or a callable to
supply a custom fitter function. See the `Fitter Functions` section for
additional information.
fitter_options (dict): Any addition kwarg options to be supplied to the fitter
function. For documentation of available kwargs refer to the fitter function
documentation.
rescale_positive (bool): If True rescale the state returned by the fitter
to be positive-semidefinite. See the `PSD Rescaling` section for
additional information (Default: True).
rescale_trace (bool): If True rescale the state returned by the fitter
have either trace 1 for :class:`~qiskit.quantum_info.DensityMatrix`,
or trace dim for :class:`~qiskit.quantum_info.Choi` matrices (Default: True).
measurement_qubits (Sequence[int]): Optional, the physical qubits with tomographic
measurements. If not specified will be set to ``[0, ..., N-1]`` for N-qubit
tomographic measurements.
target (str or :class:`~qiskit.quantum_info.DensityMatrix`
or :class:`~qiskit.quantum_info.Statevector`): Optional,
set a custom target quantum state for computing the
:func:`~qiskit.quantum_info.state_fidelity`
of the fitted state against (Default: None).
conditional_circuit_clbits (list[int]): Optional, the clbit indices in the
source circuit to be conditioned on when reconstructing the state.
Enabling this will return a list of reconstructed state components
conditional on the values of these clbit values. The integer value of the
conditioning clbits is stored in state analysis result extra field
`"conditional_circuit_outcome"`.
conditional_measurement_indices (list[int]): Optional, indices of tomography
measurement qubits to used for conditional state reconstruction. Enabling
this will return a list of reconstructed state components conditioned on
the remaining tomographic bases conditional on the basis index, and outcome
value for these measurements. The conditional measurement basis index and
integer value of the measurement outcome is stored in state analysis result
extra fields `"conditional_measurement_index"` and
`"conditional_measurement_outcome"` respectively.
"""
options = super()._default_options()
options.measurement_basis = PauliMeasurementBasis()
return options
|
qiskit-experiments/qiskit_experiments/library/tomography/qst_analysis.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/library/tomography/qst_analysis.py",
"repo_id": "qiskit-experiments",
"token_count": 2007
}
| 111 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Drawer abstract class."""
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import numpy as np
from qiskit_experiments.framework import Options
from ..style import PlotStyle
from ..utils import ExtentTuple
SeriesName = Union[str, int, float]
class BaseDrawer(ABC):
"""Abstract class for the serializable Qiskit Experiments figure drawer.
# section: overview
A drawer may be implemented by different drawer backends such as matplotlib or
Plotly. Sub-classes that wrap these backends by subclassing :class:`BaseDrawer` must
implement the following abstract methods.
.. describe:: initialize_canvas
This method should implement a protocol to initialize a drawer canvas with user
input ``axis`` object. Note that ``drawer`` supports visualization of experiment
results in multiple canvases tiled into N (row) x M (column) inset grids, which
is specified in the option ``subplots``. By default, this is N=1, M=1 and thus
no inset grid will be initialized.
This method should first check the drawer options (:attr:`options`) for the axis
object and initialize the axis only when it is not provided by the options. Once
axis is initialized, this is set to the instance member ``self._axis``.
.. describe:: format_canvas
This method formats the appearance of the canvas. Typically, it updates axis and
tick labels. Note that the axis SI unit may be specified in the drawer
figure_options. In this case, axis numbers should be auto-scaled with the unit
prefix.
.. rubric:: Drawing Methods
.. describe:: scatter
This method draws scatter points on the canvas, like a scatter-plot, with
optional error-bars in both the X and Y axes.
.. describe:: line
This method plots a line from provided X and Y values.
.. describe:: filled_y_area
This method plots a shaped region bounded by upper and lower Y-values. This
method is typically called with interpolated x and a pair of y values that
represent the upper and lower bound within certain confidence interval. If this
is called multiple times, it may be necessary to set the transparency so that
overlapping regions can be distinguished.
.. describe:: filled_x_area
This method plots a shaped region bounded by upper and lower X-values, as a
function of Y-values. This method is a rotated analogue of
:meth:`filled_y_area`.
.. describe:: textbox
This method draws a text-box on the canvas, which is a rectangular region
containing some text.
.. rubric:: Legends
Legends are generated based off of drawn graphics and their labels or names. These
are managed by individual drawer subclasses, and generated when the
:meth:`format_canvas` method is called. Legend entries are created when any drawing
function is called with ``legend=True``. There are three parameters in drawing
functions that are relevant to legend generation: ``name``, ``label``, and
``legend``. If a user would like the graphics drawn onto a canvas to be used as the
graphical component of a legend entry; they should set ``legend=True``. The legend
entry label can be defined in three locations: the ``label`` parameter of drawing
functions, the ``"label"`` entry in ``series_params``, and the ``name`` parameter of
drawing functions. These three possible label variables have a search hierarchy
given by the order in the aforementioned list. If one of the label variables is
``None``, the next is used. If all are ``None``, a legend entry is not generated for
the given series.
The recommended way to customize the legend entries is as follows:
1. Set the labels in the ``series_params`` option, keyed on the series names.
2. Initialize the canvas.
3. Call relevant drawing methods to create the figure. When calling the drawing
method that creates the graphic you would like to use in the legend, set
``legend=True``. For example, ``drawer.scatter(...,legend=True)`` would use
the scatter points as the legend graphics for the given series.
4. Format the canvas and call :meth:`figure` to get the figure.
.. rubric:: Options and Figure Options
Drawers have both :attr:`options` and :attr:`figure_options` available to set
parameters that define how to draw and what is drawn, respectively.
:class:`BasePlotter` is similar in that it also has ``options`` and
``figure_options``. The former contains class-specific variables that define how an
instance behaves. The latter contains figure-specific variables that typically
contain values that are drawn on the canvas, such as text. For details on the
difference between the two sets of options, see the documentation for
:class:`BasePlotter`.
.. note::
If a drawer instance is used with a plotter, then there is the potential for
any figure option to be overwritten with their value from the plotter. This
means that the drawer instance would be modified indirectly when the
:meth:`BasePlotter.figure` method is called. This must be kept in mind when
creating subclasses of :class:`BaseDrawer`.
"""
def __init__(self):
"""Create a BaseDrawer instance."""
# Normal options. Which includes the drawer axis, subplots, and default style.
self._options = self._default_options()
# A set of changed options for serialization.
self._set_options = set()
# Figure options which are typically updated by a plotter instance. Figure options include the
# axis labels, figure title, and a custom style instance.
self._figure_options = self._default_figure_options()
# A set of changed figure options for serialization.
self._set_figure_options = set()
# The initialized axis/axes, set by `initialize_canvas`.
self._axis = None
@property
def options(self) -> Options:
"""Return the drawer options."""
return self._options
@property
def figure_options(self) -> Options:
"""Return the figure options.
These are typically updated by a plotter instance, and thus may change. It is
recommended to set figure options in a parent :class:`BasePlotter` instance that
contains the :class:`BaseDrawer` instance.
"""
return self._figure_options
@classmethod
def _default_options(cls) -> Options:
"""Return default drawer options.
Options:
axis (Any): Arbitrary object that can be used as a canvas.
subplots (Tuple[int, int]): Number of rows and columns when the experimental
result is drawn in the multiple windows.
default_style (PlotStyle): The default style for drawer. This must contain
all required style parameters for :class:`drawer`, as is defined in
:meth:`PlotStyle.default_style()`. Subclasses can add extra required
style parameters by overriding :meth:`_default_style`.
"""
return Options(
axis=None,
subplots=(1, 1),
default_style=cls._default_style(),
)
@classmethod
def _default_style(cls) -> PlotStyle:
return PlotStyle.default_style()
@classmethod
def _default_figure_options(cls) -> Options:
"""Return default figure options.
Figure Options:
xlabel (Union[str, List[str]]): X-axis label string of the output figure. If
there are multiple columns in the canvas, this could be a list of labels.
ylabel (Union[str, List[str]]): Y-axis label string of the output figure. If
there are multiple rows in the canvas, this could be a list of labels.
xlim (Union[Tuple[float, float], List[Tuple[float, float]]): Min and max value
of the horizontal axis. If not provided, it is automatically scaled based
on the input data points. If there are multiple columns in the canvas,
this could be a list of xlims.
ylim (Union[Tuple[float, float], List[Tuple[float, float]]): Min and max value
of the vertical axis. If not provided, it is automatically scaled based
on the input data points. If there are multiple rows in the canvas,
this could be a list of ylims.
xval_unit (Union[str, List[str]]): Unit of x values.
No scaling prefix is needed here as this is controlled by ``xval_unit_scale``.
If there are multiple columns in the canvas, this could be a list of xval_units.
yval_unit (Union[str, List[str]]): Unit of y values.
No scaling prefix is needed here as this is controlled by ``yval_unit_scale``.
If there are multiple rows in the canvas, this could be a list of yval_units.
xval_unit_scale (Union[bool, List[bool]]): Whether to add an SI unit prefix to
``xval_unit`` if needed. For example, when the x values represent time and
``xval_unit="s"``, ``xval_unit_scale=True`` adds an SI unit prefix to
``"s"`` based on X values of plotted data. In the output figure, the
prefix is automatically selected based on the maximum value in this
axis. If your x values are in [1e-3, 1e-4], they are displayed as [1 ms,
10 ms]. By default, this option is set to ``True``. If ``False`` is
provided, the axis numbers will be displayed in the scientific notation.
If there are multiple columns in the canvas, this could be a list of xval_unit_scale.
yval_unit_scale (Union[bool, List[bool]]): Whether to add an SI unit prefix to
``yval_unit`` if needed. See ``xval_unit_scale`` for details.
If there are multiple rows in the canvas, this could be a list of yval_unit_scale.
xscale (str): The scaling of the x-axis, such as ``log`` or ``linear``.
yscale (str): The scaling of the y-axis, such as ``log`` or ``linear``.
figure_title (str): Title of the figure. Defaults to None, i.e. nothing is
shown.
sharex (bool): Set True to share x-axis ticks among sub-plots.
sharey (bool): Set True to share y-axis ticks among sub-plots.
series_params (Dict[str, Dict[str, Any]]): A dictionary of parameters for
each series. This is keyed on the name for each series. Sub-dictionary
is expected to have the following three configurations, "canvas",
"color", "symbol" and "label"; "canvas" is the integer index of axis
(when multi-canvas plot is set), "color" is the color of the drawn
graphics, "symbol" is the series marker style for scatter plots, and
"label" is a user provided series label that appears in the legend.
custom_style (PlotStyle): The style definition to use when drawing. This
overwrites style parameters in ``default_style`` in :attr:`options`.
Defaults to an empty PlotStyle instance (i.e., ``PlotStyle()``).
"""
options = Options(
xlabel=None,
ylabel=None,
xlim=None,
ylim=None,
xval_unit=None,
yval_unit=None,
xval_unit_scale=True,
yval_unit_scale=True,
xscale=None,
yscale=None,
sharex=True,
sharey=True,
figure_title=None,
series_params={},
custom_style=PlotStyle(),
)
options.set_validator("xscale", ["linear", "log", "symlog", "logit", "quadratic", None])
options.set_validator("yscale", ["linear", "log", "symlog", "logit", "quadratic", None])
return options
def set_options(self, **fields):
"""Set the drawer options.
Args:
fields: The fields to update the options
Raises:
AttributeError: If an unknown options is encountered.
"""
for field in fields:
if not hasattr(self._options, field):
raise AttributeError(
f"Options field {field} is not valid for {type(self).__name__}"
)
self._options.update_options(**fields)
self._set_options = self._set_options.union(fields)
def set_figure_options(self, **fields):
"""Set the figure options.
Args:
fields: The fields to update the figure options
Raises:
AttributeError: If an unknown figure option is encountered.
"""
for field in fields:
if not hasattr(self._figure_options, field):
raise AttributeError(
f"Figure options field {field} is not valid for {type(self).__name__}"
)
self._figure_options.update_options(**fields)
self._set_figure_options = self._set_figure_options.union(fields)
@property
def style(self) -> PlotStyle:
"""The combined plot style for this drawer.
The returned style instance is a combination of :attr:`options.default_style`
and :attr:`figure_options.custom_style`. Style parameters set in
``custom_style`` override those set in ``default_style``. If ``custom_style`` is
not an instance of :class:`PlotStyle`, the returned style is equivalent to
``default_style``.
Returns:
The plot style for this drawer.
"""
if isinstance(self.figure_options.custom_style, PlotStyle):
return PlotStyle.merge(self.options.default_style, self.figure_options.custom_style)
return self.options.default_style
@abstractmethod
def initialize_canvas(self):
"""Initialize the drawer canvas."""
@abstractmethod
def format_canvas(self):
"""Final cleanup for the canvas appearance."""
def label_for(self, name: Optional[SeriesName], label: Optional[SeriesName]) -> Optional[str]:
"""Get the legend label for the given series, with optional overrides.
This method determines the legend label for a series, with optional overrides
``label`` and the ``"label"`` entry in the ``series_params`` option (see
:attr:`options`). ``label`` is returned if it is not ``None``, as this is the
override with the highest priority. If it is ``None``, then the drawer will look
for a ``"label"`` entry in ``series_params``, for the series identified by
``name``. If this entry doesn't exist, or is ``None``, then ``name`` is used as
the label. If all these options are ``None``, then ``None`` is returned;
signifying that a legend entry for the provided series should not be generated.
Note that :meth:`label_for` will convert ``name`` to ``str`` when it is
returned.
Args:
name: The name of the series.
label: Optional label override.
Returns:
The legend entry label, or ``None``.
"""
if label is not None:
return str(label)
if name is not None:
return self.figure_options.series_params.get(name, {}).get("label", str(name))
return None
@abstractmethod
def scatter(
self,
x_data: Sequence[float],
y_data: Sequence[float],
x_err: Optional[Sequence[float]] = None,
y_err: Optional[Sequence[float]] = None,
name: Optional[SeriesName] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Draw scatter points, with optional error-bars.
Args:
x_data: X values.
y_data: Y values.
x_err: Optional error for X values.
y_err: Optional error for Y values.
name: Name of this series.
label: Optional legend label to override ``name`` and ``series_params``.
legend: Whether the drawn area must have a legend entry. Defaults to False.
The series label in the legend will be ``label`` if it is not None. If
it is, then ``series_params`` is searched for a ``"label"`` entry for
the series identified by ``name``. If this is also ``None``, then
``name`` is used as the fallback. If no ``name`` is provided, then no
legend entry is generated.
options: Valid options for the drawer backend API.
"""
@abstractmethod
def line(
self,
x_data: Sequence[float],
y_data: Sequence[float],
name: Optional[SeriesName] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Draw a line.
Args:
x_data: X values.
y_data: Y values.
name: Name of this series.
label: Optional legend label to override ``name`` and ``series_params``.
legend: Whether the drawn area must have a legend entry. Defaults to False.
The series label in the legend will be ``label`` if it is not None. If
it is, then ``series_params`` is searched for a ``"label"`` entry for
the series identified by ``name``. If this is also ``None``, then
``name`` is used as the fallback. If no ``name`` is provided, then no
legend entry is generated.
options: Valid options for the drawer backend API.
"""
@abstractmethod
def hline(
self,
y_value: float,
name: Optional[SeriesName] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Draw a horizontal line.
Args:
y_value: Y value for line.
name: Name of this series.
label: Optional legend label to override ``name`` and ``series_params``.
legend: Whether the drawn area must have a legend entry. Defaults to False.
The series label in the legend will be ``label`` if it is not None. If
it is, then ``series_params`` is searched for a ``"label"`` entry for
the series identified by ``name``. If this is also ``None``, then
``name`` is used as the fallback. If no ``name`` is provided, then no
legend entry is generated.
options: Valid options for the drawer backend API.
"""
@abstractmethod
def filled_y_area(
self,
x_data: Sequence[float],
y_ub: Sequence[float],
y_lb: Sequence[float],
name: Optional[SeriesName] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Draw filled area as a function of x-values.
Args:
x_data: X values.
y_ub: The upper boundary of Y values.
y_lb: The lower boundary of Y values.
name: Name of this series.
label: Optional legend label to override ``name`` and ``series_params``.
legend: Whether the drawn area must have a legend entry. Defaults to False.
The series label in the legend will be ``label`` if it is not None. If
it is, then ``series_params`` is searched for a ``"label"`` entry for
the series identified by ``name``. If this is also ``None``, then
``name`` is used as the fallback. If no ``name`` is provided, then no
legend entry is generated.
options: Valid options for the drawer backend API.
"""
@abstractmethod
def filled_x_area(
self,
x_ub: Sequence[float],
x_lb: Sequence[float],
y_data: Sequence[float],
name: Optional[SeriesName] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Draw filled area as a function of y-values.
Args:
x_ub: The upper boundary of X values.
x_lb: The lower boundary of X values.
y_data: Y values.
name: Name of this series.
label: Optional legend label to override ``name`` and ``series_params``.
legend: Whether the drawn area must have a legend entry. Defaults to False.
The series label in the legend will be ``label`` if it is not None. If
it is, then ``series_params`` is searched for a ``"label"`` entry for
the series identified by ``name``. If this is also ``None``, then
``name`` is used as the fallback. If no ``name`` is provided, then no
legend entry is generated.
options: Valid options for the drawer backend API.
"""
@abstractmethod
def textbox(
self,
description: str,
rel_pos: Optional[Tuple[float, float]] = None,
**options,
):
"""Draw text box.
Args:
description: A string to be drawn inside a report box.
rel_pos: Relative position of the text-box. If None, the default
``textbox_rel_pos`` from the style is used.
options: Valid options for the drawer backend API.
"""
@abstractmethod
def image(
self,
data: np.ndarray,
extent: Optional[ExtentTuple] = None,
name: Optional[SeriesName] = None,
label: Optional[str] = None,
cmap: Optional[Union[str, Any]] = None,
cmap_use_series_colors: bool = False,
colorbar: bool = False,
**options,
):
"""Draw an image of numerical values, series names, or RGB/A values.
Args:
data: The two-/three-dimensional data defining an image. If
``data.dims==2``, then the pixel colors are determined by ``cmap`` and
``cmap_use_series_colors``. If ``data.dims==3``, then it is assumed that
``data`` contains either RGB or RGBA data; which requires the third
dimension to have length ``3`` or ``4`` respectively. For RGB/A data,
the elements of ``data`` must be floats or integers in the range 0-1 and
0-255 respectively. If the data is two-dimensional, there is no limit on
the range of the values if they are numerical. If
``cmap_use_series_colors=True``, then ``data`` contains series names;
which can be strings or numerical values, as long as they are
appropriate series names.
extent: An optional tuple ``(x_min, x_max, y_min, y_max)`` which defines a
rectangular region within which the values inside ``data`` should be
plotted. The units of ``extent`` are the same as those of the X and Y
axes for the axis. If None, the extent of the image is taken as ``(0,
data.shape[0], 0, data.shape[1])``. Default is None.
name: Name of this image. Used to lookup ``canvas`` and ``label`` in
``series_params``.
label: An optional label for the colorbar, if ``colorbar=True``.
cmap: Optional colormap for assigning colors to the image values, if
``data`` is not an RGB/A image. ``cmap`` must be a string or object
instance which is recognized by the drawer. Defaults to None.
cmap_use_series_colors: Whether to assign colors to the image based on
series colors, where the values inside ``data`` are series names. If
``cmap_use_series_colors=True``,``cmap`` is ignored. This only works for
two-dimensional images as three-dimensional ``data`` contains explicit
colors as RGB/A values. If ``len(data.shape)=3``,
``cmap_use_series_colours`` is ignored. Defaults to False.
colorbar: Whether to add a bar showing the color-value mapping for the
image. Defaults to False.
options: Valid options for the drawer backend API.
"""
@property
@abstractmethod
def figure(self):
"""Return figure object handler to be saved in the database."""
def config(self) -> Dict:
"""Return the config dictionary for this drawer."""
options = dict((key, getattr(self._options, key)) for key in self._set_options)
figure_options = dict(
(key, getattr(self._figure_options, key)) for key in self._set_figure_options
)
return {
"cls": type(self),
"options": options,
"figure_options": figure_options,
}
def __json_encode__(self):
return self.config()
@classmethod
def __json_decode__(cls, value):
instance = cls()
if "options" in value:
instance.set_options(**value["options"])
if "figure_options" in value:
instance.set_figure_options(**value["figure_options"])
return instance
|
qiskit-experiments/qiskit_experiments/visualization/drawers/base_drawer.py/0
|
{
"file_path": "qiskit-experiments/qiskit_experiments/visualization/drawers/base_drawer.py",
"repo_id": "qiskit-experiments",
"token_count": 10075
}
| 112 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test parameter guess functions."""
# pylint: disable=invalid-name
from test.base import QiskitExperimentsTestCase
import numpy as np
from ddt import ddt, data, unpack
from qiskit_experiments.curve_analysis import guess
@ddt
class TestGuesses(QiskitExperimentsTestCase):
"""Test for initial guess functions."""
__tolerance_percent__ = 0.2
def assertAlmostEqualAbsolute(self, value: float, ref_value: float):
"""A helper validation function that assumes relative error tolerance."""
delta = TestGuesses.__tolerance_percent__ * np.abs(ref_value)
self.assertAlmostEqual(value, ref_value, delta=delta)
@data(1.1, 2.0, 1.6, -1.4, 4.5)
def test_frequency(self, freq: float):
"""Test for frequency guess."""
x = np.linspace(-1, 1, 101)
y = 0.3 * np.cos(2 * np.pi * freq * x + 0.5) + 1.2
freq_guess = guess.frequency(x, y)
self.assertAlmostEqualAbsolute(freq_guess, np.abs(freq))
@data(1.1, 2.0, 1.6, -1.4, 4.5)
def test_frequency_with_non_uniform_sampling(self, freq: float):
"""Test for frequency guess with non uniform x value."""
x = np.concatenate((np.linspace(-1, 0, 15), np.linspace(0.1, 1, 30)))
y = 0.3 * np.cos(2 * np.pi * freq * x + 0.5) + 1.2
freq_guess = guess.frequency(x, y)
self.assertAlmostEqualAbsolute(freq_guess, np.abs(freq))
@data(
[0.20928722, -0.40958197, 0.29898025, 0.45622079, -0.33379813],
[-0.41245894, -0.42868717, -0.17165843, -0.28708211, -0.25228829],
[0.01775771, 0.47539627, 0.1101062, 0.38296899, -0.22005228],
)
def test_max(self, test_values):
"""Test max value."""
max_guess, idx = guess.max_height(test_values)
ref_val = max(test_values)
ref_idx = np.argmax(test_values)
self.assertEqual(max_guess, ref_val)
self.assertEqual(idx, ref_idx)
max_guess, idx = guess.max_height(test_values, absolute=True)
ref_val = max(np.absolute(test_values))
ref_idx = np.argmax(np.absolute(test_values))
self.assertEqual(max_guess, ref_val)
self.assertEqual(idx, ref_idx)
max_guess, idx = guess.max_height(test_values, percentile=80)
ref_val = np.percentile(test_values, 80)
ref_idx = np.argmin(np.abs(test_values - ref_val))
self.assertEqual(max_guess, ref_val)
self.assertEqual(idx, ref_idx)
@data(
[0.20928722, -0.40958197, 0.29898025, 0.45622079, -0.33379813],
[-0.41245894, -0.42868717, -0.17165843, -0.28708211, -0.25228829],
[0.01775771, 0.47539627, 0.1101062, 0.38296899, -0.22005228],
)
def test_min(self, test_values):
"""Test min value."""
min_guess, idx = guess.min_height(test_values)
ref_val = min(test_values)
ref_idx = np.argmin(test_values)
self.assertEqual(min_guess, ref_val)
self.assertEqual(idx, ref_idx)
min_guess, idx = guess.min_height(test_values, absolute=True)
ref_val = min(np.absolute(test_values))
ref_idx = np.argmin(np.absolute(test_values))
self.assertEqual(min_guess, ref_val)
self.assertEqual(idx, ref_idx)
min_guess, idx = guess.min_height(test_values, percentile=20)
ref_val = np.percentile(test_values, 20)
ref_idx = np.argmin(np.abs(test_values - ref_val))
self.assertEqual(min_guess, ref_val)
self.assertEqual(idx, ref_idx)
@data(1.2, -0.6, 0.1, 3.5, -4.1, 3.0)
def test_exp_decay(self, alpha: float):
"""Test for exponential decay guess."""
x = np.linspace(0, 1, 100)
y = np.exp(alpha * x)
alpha_guess = guess.exp_decay(x, y)
self.assertAlmostEqualAbsolute(alpha_guess, alpha)
def test_exp_decay_with_invalid_y(self):
"""Test when invalid y data is input to exp curve init guess."""
x = np.array([9.0e-06, 1.9e-05, 2.9e-05, 3.9e-05])
y = np.array([0.16455749, 0.07045296, 0.02702439, -0.00135192])
# The last point is excluded. This point might be some artifact due to filtering.
alpha_guess = guess.exp_decay(x, y)
np.testing.assert_almost_equal(alpha_guess, -90326, decimal=0)
@data([1.2, 1.4], [-0.6, 2.5], [0.1, 2.3], [3.5, 1.1], [-4.1, 6.5], [3.0, 1.2])
@unpack
def test_exp_osci_decay(self, alpha, freq):
"""Test of exponential decay guess with oscillation."""
x = np.linspace(0, 1, 100)
y = np.exp(alpha * x) * np.cos(2 * np.pi * freq * x)
alpha_guess = guess.oscillation_exp_decay(x, y)
self.assertAlmostEqualAbsolute(alpha_guess, alpha)
@data(
[10, 1.0, 0.5],
[50, 1.2, 0.2],
[80, -1.2, 0.6],
[30, -0.2, 0.4],
[40, 3.2, 0.3],
[20, -0.4, 0.8],
)
@unpack
def test_linewidth_spect(self, idx, a, fwhm):
"""Test of linewidth of peaks."""
x = np.linspace(-1, 1, 100)
sigma = fwhm / np.sqrt(8 * np.log(2))
y = a * np.exp(-((x - x[idx]) ** 2) / (2 * sigma**2))
lw_guess = guess.full_width_half_max(x, y, idx)
self.assertAlmostEqual(fwhm, lw_guess, delta=0.1)
@data(
[0.1, 0.0, 1.0, 0.5],
[-0.3, 0.6, 1.2, 0.2],
[0.2, -0.8, -1.2, 0.6],
[0.9, 0.2, -0.2, 0.4],
[0.6, 0.1, 3.2, 0.3],
[-0.7, -0.4, -1.6, 0.8],
)
@unpack
def test_baseline_spect(self, b0, x0, a, fwhm):
"""Test of baseline of peaks."""
x = np.linspace(-1, 1, 100)
sigma = fwhm / np.sqrt(8 * np.log(2))
y = a * np.exp(-((x - x0) ** 2) / (2 * sigma**2)) + b0
b0_guess = guess.constant_spectral_offset(y)
self.assertAlmostEqual(b0, b0_guess, delta=0.1)
@data(
[0.1, 0.0, 1.0, 1.3],
[-0.3, 0.6, 1.2, 0.4],
[0.2, -0.8, -1.2, 3.6],
[0.9, 0.2, -0.2, 0.3],
[0.6, 0.1, 3.2, 0.8],
[-0.7, -0.4, -1.6, 1.2],
)
@unpack
def test_baseline_sinusoidal(self, b0, x0, a, freq):
"""Test of baseline of sinusoidal signal."""
x = np.linspace(-1, 1, 100)
y = a * np.cos(2 * np.pi * freq * (x - x0)) + b0
b0_guess = guess.constant_sinusoidal_offset(y)
self.assertAlmostEqual(b0, b0_guess, delta=0.1)
@data(
# typical 1Q
[0.5, 0.5, 0.99],
# typical 2Q
[0.25, 0.75, 0.97],
# alpha around equation switching
[0.48, 0.46, 0.85],
# bad limit
[0.20, 0.36, 0.72],
[0.55, 0.40, 0.65],
)
@unpack
def test_rb_decay(self, a, b, alpha):
"""Test of rb decay basis guess."""
x = np.arange(1, 100, 5)
y = a * alpha**x + b
alpha_guess = guess.rb_decay(x, y, b=b)
self.assertAlmostEqual(alpha, alpha_guess, delta=alpha * 0.1)
def test_rb_decay_with_very_bad_output(self):
"""Test if rb decay guess does not raise an error even for very bad outputs."""
x = np.array([1, 2, 3])
y = np.array([0.24, 0.22, 0.23]) # all are below b
out = guess.rb_decay(x=x, y=y, b=0.25)
self.assertEqual(out, 0.0)
|
qiskit-experiments/test/curve_analysis/test_guess.py/0
|
{
"file_path": "qiskit-experiments/test/curve_analysis/test_guess.py",
"repo_id": "qiskit-experiments",
"token_count": 3834
}
| 113 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test rough amplitude calibration experiment classes."""
from test.base import QiskitExperimentsTestCase
from qiskit import pulse
from qiskit.circuit import Parameter
from qiskit_experiments.exceptions import CalibrationError
from qiskit_experiments.calibration_management.basis_gate_library import FixedFrequencyTransmon
from qiskit_experiments.calibration_management import Calibrations
from qiskit_experiments.library import HalfAngleCal
from qiskit_experiments.test.pulse_backend import SingleTransmonTestBackend
class TestHalfAngleCal(QiskitExperimentsTestCase):
"""A class to test the half angle calibration experiments."""
def setUp(self):
"""Setup the tests."""
super().setUp()
library = FixedFrequencyTransmon()
self.backend = SingleTransmonTestBackend(noise=False, atol=1e-3)
self.cals = Calibrations.from_backend(self.backend, libraries=[library])
def test_amp_parameter_error(self):
"""Test that setting cal_parameter_name to amp raises an error"""
with self.assertRaises(CalibrationError):
HalfAngleCal([0], self.cals, cal_parameter_name="amp")
def test_angle_parameter_missing_error(self):
"""Test that default cal_parameter_name with no matching parameter raises an error"""
cals_no_angle = Calibrations()
dur = Parameter("dur")
amp = Parameter("amp")
sigma = Parameter("Ο")
beta = Parameter("Ξ²")
drive = pulse.DriveChannel(Parameter("ch0"))
with pulse.build(name="sx") as sx:
pulse.play(pulse.Drag(dur, amp, sigma, beta), drive)
cals_no_angle.add_schedule(sx, num_qubits=1)
with self.assertRaises(CalibrationError):
HalfAngleCal([0], cals_no_angle)
def test_circuits_roundtrip_serializable(self):
"""Test circuits serialization of the experiment."""
exp = HalfAngleCal([0], self.cals, backend=self.backend)
self.assertRoundTripSerializable(exp._transpiled_circuits())
|
qiskit-experiments/test/library/calibration/test_half_angle.py/0
|
{
"file_path": "qiskit-experiments/test/library/calibration/test_half_angle.py",
"repo_id": "qiskit-experiments",
"token_count": 883
}
| 114 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test Tphi experiment.
"""
from test.base import QiskitExperimentsTestCase
from qiskit.exceptions import QiskitError
from qiskit_experiments.library import Tphi, T2Hahn, T2Ramsey
from qiskit_experiments.test.noisy_delay_aer_simulator import NoisyDelayAerBackend
from qiskit_experiments.library.characterization.analysis import (
TphiAnalysis,
T2RamseyAnalysis,
T2HahnAnalysis,
)
class TestTphi(QiskitExperimentsTestCase):
"""Test Tphi experiment."""
__tolerance__ = 0.1
def test_tphi_ramsey_end_to_end(self):
"""
Run a complete Tphi experiment with T2ramsey on a fake Tphi backend.
"""
delays_t1 = list(range(1, 40, 3))
delays_t2 = list(range(1, 51, 2))
exp = Tphi(
physical_qubits=[0],
delays_t1=delays_t1,
delays_t2=delays_t2,
t2type="ramsey",
osc_freq=0.1,
)
t1 = 20
t2ramsey = 25
backend = NoisyDelayAerBackend([t1], [t2ramsey])
expdata = exp.run(backend=backend, seed_simulator=1)
self.assertExperimentDone(expdata)
self.assertRoundTripSerializable(expdata)
self.assertRoundTripPickle(expdata)
result = expdata.analysis_results("T_phi")
estimated_tphi = 1 / ((1 / t2ramsey) - (1 / (2 * t1)))
self.assertAlmostEqual(
result.value.nominal_value,
estimated_tphi,
delta=TestTphi.__tolerance__ * result.value.nominal_value,
)
self.assertEqual(result.quality, "good", "Result quality bad")
def test_tphi_with_changing_params(self):
"""
Run Tphi experiment, then set new delay values in set_experiment_options, and check
that the new experiment has the correct delay values.
"""
delays_t1 = list(range(1, 40, 3))
delays_t2 = list(range(1, 50, 2))
exp = Tphi(
physical_qubits=[0],
delays_t1=delays_t1,
delays_t2=delays_t2,
t2type="ramsey",
osc_freq=0.1,
)
t1 = 20
t2ramsey = 25
backend = NoisyDelayAerBackend([t1], [t2ramsey])
expdata = exp.run(backend=backend, seed_simulator=1)
self.assertExperimentDone(expdata)
# Extract x values from metadata
x_values_t1 = []
x_values_t2 = []
for datum in expdata.data():
metadata = datum["metadata"]
xval = metadata["composite_metadata"][0]["xval"]
if metadata["composite_index"][0] == 0:
x_values_t1.append(xval)
else:
x_values_t2.append(xval)
self.assertListEqual(x_values_t1, delays_t1, "Incorrect delays_t1")
self.assertListEqual(x_values_t2, delays_t2, "Incorrect delays_t2")
new_delays_t1 = list(range(1, 45, 3))
new_delays_t2 = list(range(1, 55, 2))
new_osc_freq = 0.2
exp.set_experiment_options(
delays_t1=new_delays_t1, delays_t2=new_delays_t2, osc_freq=new_osc_freq
)
expdata = exp.run(backend=backend, seed_simulator=1)
self.assertExperimentDone(expdata)
# Extract x values from metadata
x_values_t1 = []
x_values_t2 = []
new_freq_t2 = expdata.metadata["component_metadata"][1]["osc_freq"]
for datum in expdata.data():
metadata = datum["metadata"]
xval = metadata["composite_metadata"][0]["xval"]
if metadata["composite_index"][0] == 0:
x_values_t1.append(xval)
else:
x_values_t2.append(xval)
self.assertListEqual(x_values_t1, new_delays_t1, "Incorrect delays_t1")
self.assertListEqual(x_values_t2, new_delays_t2, "Incorrect delays_t2")
self.assertEqual(new_freq_t2, new_osc_freq, "Option osc_freq not set correctly")
def test_tphi_t2_option(self):
"""Test that Tphi switches between T2Ramsey and T2Hahn correctly."""
delays_t1 = list(range(1, 40, 3))
delays_t2 = list(range(1, 50, 2))
exp = Tphi(physical_qubits=[0], delays_t1=delays_t1, delays_t2=delays_t2, t2type="ramsey")
self.assertTrue(isinstance(exp.component_experiment(1), T2Ramsey))
self.assertTrue(isinstance(exp.analysis.component_analysis(1), T2RamseyAnalysis))
with self.assertRaises(QiskitError): # T2Ramsey should not allow a T2Hahn option
exp.set_experiment_options(num_echoes=1)
exp = Tphi(physical_qubits=[0], delays_t1=delays_t1, delays_t2=delays_t2)
self.assertTrue(isinstance(exp.component_experiment(1), T2Hahn))
self.assertTrue(isinstance(exp.analysis.component_analysis(1), T2HahnAnalysis))
with self.assertRaises(QiskitError): # T2Hahn should not allow a T2ramsey option
exp.set_experiment_options(osc_freq=0.0)
def test_roundtrip_serializable(self):
"""Test round trip JSON serialization"""
exp = Tphi([0], [1], [2])
self.assertRoundTripSerializable(exp)
exp = Tphi([0], [1], [2], "hahn", 3)
self.assertRoundTripSerializable(exp)
exp = Tphi([0], [1], [2], "ramsey", 0)
self.assertRoundTripSerializable(exp)
def test_circuits_roundtrip_serializable(self):
"""Test round trip JSON serialization"""
exp = Tphi([0], [1], [2])
self.assertRoundTripSerializable(exp._transpiled_circuits())
exp = Tphi([0], [1], [2], "hahn", 3)
self.assertRoundTripSerializable(exp._transpiled_circuits())
exp = Tphi([0], [1], [2], "ramsey", 0)
self.assertRoundTripSerializable(exp._transpiled_circuits())
def test_analysis_config(self):
"""Test converting analysis to and from config works"""
analysis = TphiAnalysis()
loaded_analysis = analysis.from_config(analysis.config())
self.assertNotEqual(analysis, loaded_analysis)
self.assertEqual(analysis.config(), loaded_analysis.config())
|
qiskit-experiments/test/library/characterization/test_tphi.py/0
|
{
"file_path": "qiskit-experiments/test/library/characterization/test_tphi.py",
"repo_id": "qiskit-experiments",
"token_count": 2937
}
| 115 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Mock drawer for testing.
"""
from typing import Any, Optional, Sequence, Tuple, Union
from qiskit_experiments.visualization import BaseDrawer, PlotStyle
from qiskit_experiments.visualization.utils import ExtentTuple
class MockDrawer(BaseDrawer):
"""Mock drawer for visualization tests.
Most methods of this class do nothing.
"""
@property
def figure(self):
"""Does nothing."""
pass
@classmethod
def _default_style(cls) -> PlotStyle:
"""Default style.
Style Param:
overwrite_param: A test style parameter to be overwritten by a test.
"""
style = super()._default_style()
style["overwrite_param"] = "overwrite_param"
return style
def initialize_canvas(self):
"""Does nothing."""
pass
def format_canvas(self):
"""Does nothing."""
pass
def scatter(
self,
x_data: Sequence[float],
y_data: Sequence[float],
x_err: Optional[Sequence[float]] = None,
y_err: Optional[Sequence[float]] = None,
name: Optional[str] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Does nothing."""
pass
def line(
self,
x_data: Sequence[float],
y_data: Sequence[float],
name: Optional[str] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Does nothing."""
pass
def hline(
self,
y_value: float,
name: Optional[str] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Does nothing."""
pass
def filled_y_area(
self,
x_data: Sequence[float],
y_ub: Sequence[float],
y_lb: Sequence[float],
name: Optional[str] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Does nothing."""
pass
def filled_x_area(
self,
x_ub: Sequence[float],
x_lb: Sequence[float],
y_data: Sequence[float],
name: Optional[str] = None,
label: Optional[str] = None,
legend: bool = False,
**options,
):
"""Does nothing."""
pass
def textbox(
self,
description: str,
rel_pos: Optional[Tuple[float, float]] = None,
**options,
):
"""Does nothing."""
pass
def image(
self,
data: "numpy.ndarray",
extent: Optional[ExtentTuple] = None,
name: Optional[str] = None,
label: Optional[str] = None,
cmap: Optional[Union[str, Any]] = None,
cmap_use_series_colors: bool = False,
colorbar: bool = False,
**options,
):
"""Does nothing."""
pass
|
qiskit-experiments/test/visualization/mock_drawer.py/0
|
{
"file_path": "qiskit-experiments/test/visualization/mock_drawer.py",
"repo_id": "qiskit-experiments",
"token_count": 1488
}
| 116 |
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- family-names: "The Qiskit Nature developers and contributors"
title: "Qiskit Nature"
version: 0.6.0
doi: 10.5281/zenodo.7828767
date-released: 2023-04-14
url: "https://github.com/qiskit-community/qiskit-nature"
|
qiskit-nature/CITATION.cff/0
|
{
"file_path": "qiskit-nature/CITATION.cff",
"repo_id": "qiskit-nature",
"token_count": 110
}
| 117 |
{% if referencefile %}
.. include:: {{ referencefile }}
{% endif %}
{{ objname }}
{{ underline }}
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
:show-inheritance:
:no-members:
:no-inherited-members:
:no-special-members:
{% block attributes_summary %}
{% if attributes %}
.. rubric:: Attributes
{% for item in all_attributes %}
{%- if item not in inherited_members %}
{%- if not item.startswith('_') %}
.. autoattribute:: {{ name }}.{{ item }}
{%- endif -%}
{%- endif %}
{%- endfor %}
{% endif %}
{% endblock %}
{% block methods_summary %}
{% if methods %}
.. rubric:: Methods
{% for item in all_methods %}
{%- if item not in inherited_members %}
{%- if not item.startswith('_') %}
.. automethod:: {{ name }}.{{ item }}
{%- endif -%}
{%- endif %}
{%- endfor %}
{% endif %}
{% endblock %}
|
qiskit-nature/docs/_templates/autosummary/class_no_inherited_members.rst/0
|
{
"file_path": "qiskit-nature/docs/_templates/autosummary/class_no_inherited_members.rst",
"repo_id": "qiskit-nature",
"token_count": 386
}
| 118 |
ο»ΏWatson
=====================================
.. automodule:: qiskit_nature.second_q.formats.watson
:no-members:
:no-inherited-members:
:no-special-members:
|
qiskit-nature/docs/apidocs/qiskit_nature.second_q.formats.watson.rst/0
|
{
"file_path": "qiskit-nature/docs/apidocs/qiskit_nature.second_q.formats.watson.rst",
"repo_id": "qiskit-nature",
"token_count": 62
}
| 119 |
Lattice Model Problems with v0.5
================================
The lattice models have only received some minor updates in this release
of Qiskit Nature.
One **important** change is that since all lattice models now implement
the new ``Hamiltonian`` interface rather than the old ``Property`` one,
you obtain the actual operator via the ``second_q_op()`` method instead
of ``second_q_ops()`` (Note the trailing ``s``).
Overview
~~~~~~~~
Most notably, the imports have changed as follows:
+----------------------------------------------------------------------------+---------------------------------------------------------+
| Legacy location | New location |
+============================================================================+=========================================================+
| ``qiskit_nature.problems.second_quantization.lattice.models`` | ``qiskit_nature.second_q.hamiltonians`` |
+----------------------------------------------------------------------------+---------------------------------------------------------+
| ``qiskit_nature.problems.second_quantization.lattice.lattices`` | ``qiskit_nature.second_q.hamiltonians.lattices`` |
+----------------------------------------------------------------------------+---------------------------------------------------------+
| ``qiskit_nature.problems.second_quantization.lattice.LatticeModelProblem`` | ``qiskit_nature.second_q.problems.LatticeModelProblem`` |
+----------------------------------------------------------------------------+---------------------------------------------------------+
Furthermore, the factory methods for ``Lattice`` objects defined on the
``LatticeModel`` classes were migrated as follows:
+------------------------------------------+-----------------------------------+
| Legacy method | New method |
+==========================================+===================================+
| ``FermiHubbardModel.uniform_parameters`` | ``Lattice.uniform_parameters`` |
+------------------------------------------+-----------------------------------+
| ``FermiHubbardModel.from_parameters`` | ``Lattice.from_adjacency_matrix`` |
+------------------------------------------+-----------------------------------+
| ``IsingModel.uniform_parameters`` | ``Lattice.uniform_parameters`` |
+------------------------------------------+-----------------------------------+
| ``IsingModel.from_parameters`` | ``Lattice.from_adjacency_matrix`` |
+------------------------------------------+-----------------------------------+
Further Resources
~~~~~~~~~~~~~~~~~
Be sure to check out the following tutorials for how to use the new
code:
- `Lattice Models <../tutorials/10_lattice_models.ipynb>`__
``FermiHubbardModel.uniform_parameters``
----------------------------------------
Previously
~~~~~~~~~~
.. code:: ipython3
from qiskit_nature.problems.second_quantization.lattice.lattices import LineLattice
from qiskit_nature.problems.second_quantization.lattice.models import FermiHubbardModel
line = LineLattice(2)
fermi = FermiHubbardModel.uniform_parameters(line, 2.0, 4.0, 3.0)
print(fermi.second_q_ops()) # Note: the trailing `s`
.. parsed-literal::
Fermionic Operator
register length=4, number terms=10
(2+0j) * ( +_0 -_2 )
+ (-2+0j) * ( -_0 +_2 )
+ (4+0j) * ( +_0 -_0 )
+ (4+0j) * ( +_2 -_2 )
+ (2+0j) * ( +_1 -_3 )
+ (-2+0j) * ( -_1 +_3 )
+ (4+0j) * ( +_1 -_1 )
+ (4+0j) * ( +_3 -_3 )
+ (3+0j) * ...
New
~~~
.. code:: ipython3
from qiskit_nature.second_q.hamiltonians.lattices import LineLattice
from qiskit_nature.second_q.hamiltonians import FermiHubbardModel
line = LineLattice(2)
fermi = FermiHubbardModel(line.uniform_parameters(2.0, 4.0), 3.0)
print(fermi.second_q_op()) # Note: NO trailing `s`
.. parsed-literal::
Fermionic Operator
number spin orbitals=4, number terms=10
2.0 * ( +_0 -_2 )
+ -2.0 * ( -_0 +_2 )
+ 4.0 * ( +_0 -_0 )
+ 4.0 * ( +_2 -_2 )
+ 2.0 * ( +_1 -_3 )
+ -2.0 * ( -_1 +_3 )
+ 4.0 * ( +_1 -_1 )
+ 4.0 * ( +_3 -_3 )
+ 3.0 * ( +_0 -_0 +_1 -_1 )
+ 3.0 * ( +_2 -_2 +_3 -_3 )
``FermiHubbardModel.from_parameters``
-------------------------------------
Previously
~~~~~~~~~~
.. code:: ipython3
import numpy as np
from qiskit_nature.problems.second_quantization.lattice.models import FermiHubbardModel
interaction = np.array([[4.0, 2.0], [2.0, 4.0]])
fermi = FermiHubbardModel.from_parameters(interaction, 3.0)
print(fermi.second_q_ops()) # Note: the trailing `s`
.. parsed-literal::
Fermionic Operator
register length=4, number terms=10
(4+0j) * ( +_0 -_0 )
+ (2+0j) * ( +_0 -_2 )
+ (-2+0j) * ( -_0 +_2 )
+ (4+0j) * ( +_2 -_2 )
+ (4+0j) * ( +_1 -_1 )
+ (2+0j) * ( +_1 -_3 )
+ (-2+0j) * ( -_1 +_3 )
+ (4+0j) * ( +_3 -_3 )
+ (3+0j) * ...
New
~~~
.. code:: ipython3
import numpy as np
from qiskit_nature.second_q.hamiltonians.lattices import Lattice
from qiskit_nature.second_q.hamiltonians import FermiHubbardModel
interaction = np.array([[4.0, 2.0], [2.0, 4.0]])
lattice = Lattice.from_adjacency_matrix(interaction)
fermi = FermiHubbardModel(lattice, 3.0)
print(fermi.second_q_op()) # Note: NO trailing `s`
.. parsed-literal::
Fermionic Operator
number spin orbitals=4, number terms=10
4.0 * ( +_0 -_0 )
+ 2.0 * ( +_0 -_2 )
+ -2.0 * ( -_0 +_2 )
+ 4.0 * ( +_2 -_2 )
+ 4.0 * ( +_1 -_1 )
+ 2.0 * ( +_1 -_3 )
+ -2.0 * ( -_1 +_3 )
+ 4.0 * ( +_3 -_3 )
+ 3.0 * ( +_0 -_0 +_1 -_1 )
+ 3.0 * ( +_2 -_2 +_3 -_3 )
``IsingModel.uniform_parameters``
---------------------------------
Previously
~~~~~~~~~~
.. code:: ipython3
from qiskit_nature.problems.second_quantization.lattice.lattices import LineLattice
from qiskit_nature.problems.second_quantization.lattice.models import IsingModel
line = LineLattice(2)
ising = IsingModel.uniform_parameters(line, 2.0, 4.0)
print(ising.second_q_ops()) # Note: the trailing `s`
.. parsed-literal::
Z_0 Z_1 * (2+0j)
+ X_0 * (4+0j)
+ X_1 * (4+0j)
New
~~~
.. code:: ipython3
from qiskit_nature.second_q.hamiltonians.lattices import LineLattice
from qiskit_nature.second_q.hamiltonians import IsingModel
line = LineLattice(2)
ising = IsingModel(line.uniform_parameters(2.0, 4.0))
print(ising.second_q_op()) # Note: NO trailing `s`
.. parsed-literal::
Spin Operator
spin=1/2, number spins=2, number terms=3
2.0 * ( Z_0 Z_1 )
+ 4.0 * ( X_0 )
+ 4.0 * ( X_1 )
``IsingModel.from_parameters``
------------------------------
Previously
~~~~~~~~~~
.. code:: ipython3
import numpy as np
from qiskit_nature.problems.second_quantization.lattice.models import IsingModel
interaction = np.array([[4.0, 2.0], [2.0, 4.0]])
ising = IsingModel.from_parameters(interaction)
print(ising.second_q_ops()) # Note: the trailing `s`
.. parsed-literal::
X_0 * (4+0j)
+ Z_0 Z_1 * (2+0j)
+ X_1 * (4+0j)
New
~~~
.. code:: ipython3
import numpy as np
from qiskit_nature.second_q.hamiltonians.lattices import Lattice
from qiskit_nature.second_q.hamiltonians import IsingModel
interaction = np.array([[4.0, 2.0], [2.0, 4.0]])
lattice = Lattice.from_adjacency_matrix(interaction)
ising = IsingModel(lattice)
print(ising.second_q_op()) # Note: NO trailing `s`
.. parsed-literal::
Spin Operator
spin=1/2, number spins=2, number terms=3
4.0 * ( X_0 )
+ 2.0 * ( Z_0 Z_1 )
+ 4.0 * ( X_1 )
|
qiskit-nature/docs/migration/0.5_e_lattice_models.rst/0
|
{
"file_path": "qiskit-nature/docs/migration/0.5_e_lattice_models.rst",
"repo_id": "qiskit-nature",
"token_count": 3102
}
| 120 |
<jupyter_start><jupyter_text>QCSchemaThe [QCSchema](https://github.com/MolSSI/QCSchema) is a standard data format for quantum chemistry. The current version of it which has been adopted by multiple classical chemistry codes, only supports serialization via `JSON`, but their docs clearly indicate that `HDF5` may also be used. Thus, in Qiskit Nature, we have opted for implementing support for both hierarchical data formats.Now, why do we have a tutorial specifically about this format you may wonder? The reason is fairly simple: this is the data format which our drivers use internally to transfer data between the classical computation and Qiskit Nature. Thus, this tutorial will explain a few concepts, in case you want to get a bit more elaborate with your driver interaction.> **Note:** the support for electronic-repulsion integrals as part of the QCSchema is not yet part of the official specification and, thus, custom to Qiskit Nature. But we are working with the QCSchema authors to make this integration official!For the purposes of this tutorial, we are using the `PySCFDriver`, but most discussion points should apply to the other electronic structure drivers, too.First, let us construct a `PySCFDriver` and run it:<jupyter_code>from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver()
problem = driver.run()
print(problem)<jupyter_output><qiskit_nature.second_q.problems.electronic_structure_problem.ElectronicStructureProblem object at 0x155041be1250><jupyter_text>This is basically short for the following:<jupyter_code>from qiskit_nature.second_q.problems import ElectronicBasis
driver.run_pyscf()
problem = driver.to_problem(basis=ElectronicBasis.MO, include_dipole=True)
print(problem.basis)<jupyter_output>ElectronicBasis.MO<jupyter_text>There are two things to note here:- the problem is specifically requested in the MO basis- dipole integrals are handled separately (because the current QCSchema standard does not support these coefficients)What this means for you as an end-user, is that you can also request the problem in another basis like so:<jupyter_code>ao_problem = driver.to_problem(basis=ElectronicBasis.AO)
print(ao_problem.basis)<jupyter_output>ElectronicBasis.AO<jupyter_text>If you now want to transform an AO problem into the MO basis, you need to use the `BasisTransformer` which is explained in [a separate tutorial](05_problem_transformers.ipynb).This is the point, where you need to understand that the `to_problem` method actually relies on the `to_qcschema` method internally:<jupyter_code>from qiskit_nature.second_q.formats.qcschema_translator import qcschema_to_problem
qcschema = driver.to_qcschema()
ao_problem = qcschema_to_problem(qcschema, basis=ElectronicBasis.AO)<jupyter_output><empty_output><jupyter_text>Specifically extracting the QCSchema object from the driver allows you to later extract a `BasisTransformer` from it, without having to manually dig out the AO-2-MO transformation coefficients from the depths of the driver object:<jupyter_code>from qiskit_nature.second_q.formats.qcschema_translator import get_ao_to_mo_from_qcschema
basis_transformer = get_ao_to_mo_from_qcschema(qcschema)
mo_problem = basis_transformer.transform(ao_problem)
print(mo_problem.basis)
import tutorial_magics
%qiskit_version_table
%qiskit_copyright<jupyter_output><empty_output>
|
qiskit-nature/docs/tutorials/08_qcschema.ipynb/0
|
{
"file_path": "qiskit-nature/docs/tutorials/08_qcschema.ipynb",
"repo_id": "qiskit-nature",
"token_count": 976
}
| 121 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2018, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
r"""
=============================================
Qiskit Nature module (:mod:`qiskit_nature`)
=============================================
.. currentmodule:: qiskit_nature
Qiskit Nature provides function to experiment with quantum computing for natural
science problems, such as in chemistry and physics. For example computing the ground state energy
or excited state energies of molecules.
The top-level classes and submodules of qiskit_nature are:
.. autosummary::
:toctree: ../stubs/
:nosignatures:
QiskitNatureError
UnsupportMethodError
Globals
=======
.. autosummary::
:toctree:
logging
settings
Submodules
==========
.. autosummary::
:toctree:
second_q
testing
utils
"""
from qiskit_algorithms.list_or_dict import ListOrDict as ListOrDictType
from .exceptions import QiskitNatureError, UnsupportMethodError
from .logging import logging
from .settings import settings
from .version import __version__
__all__ = [
"__version__",
"ListOrDictType",
"QiskitNatureError",
"UnsupportMethodError",
"logging",
"settings",
]
|
qiskit-nature/qiskit_nature/__init__.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/__init__.py",
"repo_id": "qiskit-nature",
"token_count": 476
}
| 122 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2020, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The ground state calculation interface."""
from __future__ import annotations
from abc import ABC, abstractmethod
from qiskit.quantum_info import SparsePauliOp
from qiskit_nature.second_q.operators import SparseLabelOp
from qiskit_nature.second_q.mappers import QubitMapper
from qiskit_nature.second_q.problems import BaseProblem
from qiskit_nature.second_q.problems import EigenstateResult
class GroundStateSolver(ABC):
"""The ground state calculation interface."""
def __init__(
self,
qubit_mapper: QubitMapper,
) -> None:
# pylint: disable=unused-argument
"""
Args:
qubit_mapper: The :class:`~qiskit_nature.second_q.mappers.QubitMapper` instance that
converts a second quantized operator to qubit operators.
"""
self._qubit_mapper = qubit_mapper
@abstractmethod
def solve(
self,
problem: BaseProblem,
aux_operators: dict[str, SparseLabelOp | SparsePauliOp] | None = None,
) -> EigenstateResult:
"""Compute the ground state energy of the molecule that was supplied via the driver.
Args:
problem: A class encoding a problem to be solved.
aux_operators: Additional auxiliary operators to evaluate.
Returns:
An interpreted :class:`~.EigenstateResult`. For more information see also
:meth:`~.BaseProblem.interpret`.
"""
raise NotImplementedError
@abstractmethod
def get_qubit_operators(
self,
problem: BaseProblem,
aux_operators: dict[str, SparseLabelOp | SparsePauliOp] | None = None,
) -> tuple[SparsePauliOp, dict[str, SparsePauliOp] | None]:
"""Gets the operator and auxiliary operators, and transforms the provided auxiliary operators
using a ``QubitMapper``.
If the user-provided ``aux_operators`` contain a name which clashes with an internally
constructed auxiliary operator, then the corresponding internal operator will be overridden by
the user-provided operator.
Args:
problem: A class encoding a problem defining the qubit operators.
aux_operators: Additional auxiliary operators to transform.
Returns:
A tuple with the main operator (hamiltonian) and a dictionary of auxiliary default and
custom operators.
"""
@abstractmethod
def supports_aux_operators(self) -> bool:
"""Returns whether the eigensolver supports auxiliary operators."""
raise NotImplementedError
@property
def qubit_mapper(self):
"""Returns the qubit mapper."""
return self._qubit_mapper
@property
@abstractmethod
def solver(self):
"""Returns the solver."""
|
qiskit-nature/qiskit_nature/second_q/algorithms/ground_state_solvers/ground_state_solver.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/algorithms/ground_state_solvers/ground_state_solver.py",
"repo_id": "qiskit-nature",
"token_count": 1198
}
| 123 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
These utility methods are used by the :class:`~.UCC` Ansatz in order to construct its excitation
operators.
"""
from __future__ import annotations
from typing import Iterator
import itertools
import logging
logger = logging.getLogger(__name__)
def get_alpha_excitations(
num_spatial_orbitals: int,
num_alpha: int,
*,
generalized: bool = False,
) -> list[tuple[int, int]]:
"""Generates all possible single alpha-electron excitations.
This method assumes block-ordered spin-orbitals.
Args:
num_alpha: the number of alpha electrons.
num_spatial_orbitals: the number of spatial-orbitals.
generalized: boolean flag whether or not to use generalized excitations, which ignore the
occupation of the spin orbitals. As such, the set of generalized excitations is only
determined from the number of spin orbitals and independent from the number of alpha
electrons.
Returns:
The list of excitations encoded as tuples. Each tuple is a pair. The first entry contains
the occupied spin orbital index and the second entry the unoccupied one.
"""
if generalized:
return list(itertools.combinations(range(num_spatial_orbitals), 2))
alpha_occ = range(num_alpha)
alpha_unocc = range(num_alpha, num_spatial_orbitals)
return list(itertools.product(alpha_occ, alpha_unocc))
def get_beta_excitations(
num_spatial_orbitals: int,
num_beta: int,
*,
generalized: bool = False,
) -> list[tuple[int, int]]:
"""Generates all possible single beta-electron excitations.
This method assumes block-ordered spin-orbitals.
Args:
num_beta: the number of beta electrons.
num_spatial_orbitals: the total number of spatial-orbitals.
generalized: boolean flag whether or not to use generalized excitations, which ignore the
occupation of the spin orbitals. As such, the set of generalized excitations is only
determined from the number of spin orbitals and independent from the number of beta
electrons.
Returns:
The list of excitations encoded as tuples. Each tuple is a pair. The first entry contains
the occupied spin orbital index and the second entry the unoccupied one.
"""
num_spin_orbitals = 2 * num_spatial_orbitals
if generalized:
return list(itertools.combinations(range(num_spatial_orbitals, num_spin_orbitals), 2))
beta_index_offset = num_spatial_orbitals
beta_occ = range(beta_index_offset, beta_index_offset + num_beta)
beta_unocc = range(beta_index_offset + num_beta, num_spin_orbitals)
return list(itertools.product(beta_occ, beta_unocc))
def generate_fermionic_excitations(
num_excitations: int,
num_spatial_orbitals: int,
num_particles: tuple[int, int],
*,
alpha_spin: bool = True,
beta_spin: bool = True,
max_spin_excitation: int | None = None,
generalized: bool = False,
preserve_spin: bool = True,
) -> list[tuple[tuple[int, ...], tuple[int, ...]]]:
# pylint: disable=line-too-long
"""Generates all possible excitations with the given number of excitations for the specified
number of particles distributed among the given number of spatial orbitals.
The method must be called for each type of excitation (singles, doubles, etc.) that is to be
considered in the Ansatz. Excitations will be produced based on an initial `Hartree-Fock`
occupation by default unless `generalized` is set to `True`, in which case the excitations are
only determined based on the number of spatial orbitals and are independent from
the number of particles.
This method assumes block-ordered spin-orbitals.
Args:
num_excitations: number of excitations per operator (1 means single excitations, etc.).
num_spatial_orbitals: number of spatial-orbitals.
num_particles: number of alpha and beta particles.
alpha_spin: boolean flag whether to include alpha-spin excitations.
beta_spin: boolean flag whether to include beta-spin excitations.
max_spin_excitation: the largest number of excitations within a spin. E.g. you can set
this to 1 and `num_excitations` to 2 in order to obtain only
mixed-spin double excitations (alpha,beta) but no pure-spin double
excitations (alpha,alpha or beta,beta).
generalized: boolean flag whether or not to use generalized excitations, which ignore the
occupation of the spin orbitals. As such, the set of generalized excitations is only
determined from the number of spin orbitals and independent from the number of
particles.
preserve_spin: boolean flag whether or not to preserve the particle spins.
Returns:
The list of excitations encoded as tuples of tuples. Each tuple in the list is a pair. The
first tuple contains the occupied spin orbital indices whereas the second one contains the
indices of the unoccupied spin orbitals.
Examples:
Generate excitations with basic inputs.
>>> from qiskit_nature.second_q.circuit.library.ansatzes.utils.fermionic_excitation_generator import generate_fermionic_excitations
>>> generate_fermionic_excitations(num_excitations=1, num_spatial_orbitals=3, num_particles=(1,1))
[((0,), (1,)), ((0,), (2,)), ((3,), (4,)), ((3,), (5,))]
Generate generalized excitations.
>>> generate_fermionic_excitations(1, 3, (1, 1), generalized=True)
[((0,), (1,)), ((0,), (2,)), ((1,), (2,)), ((3,), (4,)), ((3,), (5,)), ((4,), (5,))]
"""
num_spin_orbitals = 2 * num_spatial_orbitals
alpha_excitations: list[tuple[int, int]] = []
beta_excitations: list[tuple[int, int]] = []
if preserve_spin:
if alpha_spin:
alpha_excitations = get_alpha_excitations(
num_spatial_orbitals, num_particles[0], generalized=generalized
)
logger.debug("Generated list of single alpha excitations: %s", alpha_excitations)
if beta_spin:
beta_excitations = get_beta_excitations(
num_spatial_orbitals, num_particles[1], generalized=generalized
)
logger.debug("Generated list of single beta excitations: %s", beta_excitations)
else:
if generalized:
# Combining generalized=True with preserve_spin=False results in all possible
# excitations, regardless of both, orbital occupancy and orbital spin species.
# This effectively amounts to all permutations of available orbitals. However,
# this does _not_ include de-excitations, which need to be filtered!
# First, we get the generalized alpha-spin single excitations
single_excitations = get_alpha_excitations(
num_spatial_orbitals, sum(num_particles), generalized=True
)
# We can now obtain the alpha excitations by complementing the previously generated list
# of single excitations with the non-spin-preserving excitations.
alpha_excitations = sorted(
itertools.chain.from_iterable(
itertools.starmap(
lambda i, a: [(i, a), (i, a + num_spatial_orbitals)], single_excitations
)
)
)
# The beta excitations are identical but starting from beta-spin indices
beta_excitations = sorted(
itertools.chain.from_iterable(
itertools.starmap(
lambda i, a: [
(i + num_spatial_orbitals, a),
(i + num_spatial_orbitals, a + num_spatial_orbitals),
],
single_excitations,
)
)
)
else:
# preserve_spin=False doesn't distinguish between alpha and beta spin species. This is
# effectively the same scenario as a single spin species for a system of double the
# actual size up to a reordering of the orbitals. We can reuse single spin species
# excitation generator if we reorder the orbitals afterwards. The first num_particles[0]
# orbitals in the output are fine, but the next num_particles[1] orbitals have to be
# reordered to start at index num_spatial_orbitals
single_excitations = get_alpha_excitations(
num_spin_orbitals, sum(num_particles), generalized=False
)
def reorder_index(index: int) -> int:
# Alpha spins already at correct index
if index < num_particles[0]:
return index
# Cyclically permute remaining (num_spin_orbitals - num_particles[0]) orbitals to
# get Beta spins at correct index
else:
offset = num_particles[0]
period = num_spin_orbitals - offset
shift = num_spatial_orbitals - offset
return (index - offset + shift) % period + offset
for occ_idx, unocc_idx in single_excitations:
# we map from interleaved to blocked spin orbital indices
reordered_occ_idx = reorder_index(occ_idx)
reordered_unocc_idx = reorder_index(unocc_idx)
reordered_excitation = (reordered_occ_idx, reordered_unocc_idx)
if reordered_occ_idx < num_spatial_orbitals:
alpha_excitations.append(reordered_excitation)
else:
beta_excitations.append(reordered_excitation)
# NOTE: we sort the lists to ensure that non-spin flipped variants take higher precedence
alpha_excitations = sorted(alpha_excitations)
beta_excitations = sorted(beta_excitations)
if not alpha_excitations and not beta_excitations:
# nothing to do, let's return early
return []
# we can find the actual list of excitations by doing the following:
# 1. combine the single alpha- and beta-spin excitations
# 2. find all possible combinations of length `num_excitations`
pool: Iterator[tuple[tuple[int, int], ...]] = itertools.combinations(
alpha_excitations + beta_excitations, num_excitations
)
# if max_spin_excitation is set, we need to filter the pool of excitations
if max_spin_excitation is not None:
logger.info(
"The maximum number of excitations within each spin species was set to %s",
max_spin_excitation,
)
# first, remove all those excitations, in which more than max_spin_excitation alpha
# excitations are performed at ones
if alpha_excitations: # False if empty list
alpha_exc_set = set(alpha_excitations)
pool = itertools.filterfalse(
lambda exc: len(set(exc) & alpha_exc_set) > max_spin_excitation, pool
)
# then, do the same for beta
if beta_excitations: # False if empty list
beta_exc_set = set(beta_excitations)
pool = itertools.filterfalse(
lambda exc: len(set(exc) & beta_exc_set) > max_spin_excitation, pool
)
excitations: list[tuple[tuple[int, ...], tuple[int, ...]]] = []
visited_excitations = set()
for exc in pool:
# validate an excitation by asserting that all indices are unique:
# 1. get the frozen set of indices in the excitation
exc_set = frozenset(itertools.chain.from_iterable(exc))
# 2. all indices must be unique (size of set equals 2 * num_excitations)
# 3. and we also don't want to include permuted variants of identical excitations
if len(exc_set) == num_excitations * 2 and exc_set not in visited_excitations:
visited_excitations.add(exc_set)
occ: tuple[int, ...]
unocc: tuple[int, ...]
occ, unocc = zip(*exc)
exc_tuple = (occ, unocc)
excitations.append(exc_tuple)
logger.debug("Added the excitation: %s", exc_tuple)
return excitations
|
qiskit-nature/qiskit_nature/second_q/circuit/library/ansatzes/utils/fermionic_excitation_generator.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/circuit/library/ansatzes/utils/fermionic_excitation_generator.py",
"repo_id": "qiskit-nature",
"token_count": 5081
}
| 124 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2020, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
FCIDump (:mod:`qiskit_nature.second_q.formats.fcidump`)
=============================================================
Contains tools to parse and dump FCIDump files.
.. currentmodule:: qiskit_nature.second_q.formats.fcidump
.. autosummary::
:toctree: ../stubs/
:nosignatures:
FCIDump
"""
from .fcidump import FCIDump
__all__ = ["FCIDump"]
|
qiskit-nature/qiskit_nature/second_q/formats/fcidump/__init__.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/formats/fcidump/__init__.py",
"repo_id": "qiskit-nature",
"token_count": 264
}
| 125 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The QCSchema wavefunction dataclass."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Sequence, cast
import h5py
from .qc_base import _QCBase
from .qc_basis_set import QCBasisSet
# TODO: directly support np.ndarray instead of always converting to flat lists
@dataclass
class QCWavefunction(_QCBase):
"""A dataclass to store any additional computed wavefunction properties.
Matrix quantities are stored as flat, column-major arrays.
For more information refer to
[here](https://molssi-qc-schema.readthedocs.io/en/latest/auto_wf.html#wavefunction-schema).
"""
basis: str | QCBasisSet
"""The basis set used during the computation. This can be either a simple string or a full
:class:`QCBasisSet` specification.
Note: This deviates slightly from the official QCSchema spec, where a QCBasisSet instance is
required. However, since this is data not relevant in the routines of Qiskit Nature, we are a
bit more lenient here, in order to enable a migration path from legacy HDF5 files generated by
Qiskit Nature.
"""
overlap: str | None = None
"""The name of the overlap matrix in the AO basis."""
orbitals_a: str | None = None
"""The name of the alpha-spin orbitals in the AO basis."""
orbitals_b: str | None = None
"""The name of the beta-spin orbitals in the AO basis."""
density_a: str | None = None
"""The name of the alpha-spin density in the AO basis."""
density_b: str | None = None
"""The name of the beta-spin density in the AO basis."""
density_mo_a: str | None = None
"""The name of the alpha-spin density in the MO basis."""
density_mo_b: str | None = None
"""The name of the beta-spin density in the MO basis."""
fock_a: str | None = None
"""The name of the alpha-spin Fock matrix in the AO basis."""
fock_b: str | None = None
"""The name of the beta-spin Fock matrix in the AO basis."""
fock_mo_a: str | None = None
"""The name of the alpha-spin Fock matrix in the MO basis."""
fock_mo_b: str | None = None
"""The name of the beta-spin Fock matrix in the MO basis."""
eigenvalues_a: str | None = None
"""The name of the alpha-spin orbital eigenvalues."""
eigenvalues_b: str | None = None
"""The name of the beta-spin orbital eigenvalues."""
occupations_a: str | None = None
"""The name of the alpha-spin orbital occupations."""
occupations_b: str | None = None
"""The name of the beta-spin orbital occupations."""
eri: str | None = None
"""The name of the electron-repulsion integrals in the AO basis."""
eri_mo_aa: str | None = None
"""The name of the alpha-alpha electron-repulsion integrals in the MO basis."""
eri_mo_ab: str | None = None
"""The name of the alpha-beta electron-repulsion integrals in the MO basis."""
eri_mo_ba: str | None = None
"""The name of the beta-alpha electron-repulsion integrals in the MO basis."""
eri_mo_bb: str | None = None
"""The name of the beta-beta electron-repulsion integrals in the MO basis."""
dipole_x: str | None = None
"""The name of x-axis dipole moment integrals in the AO basis."""
dipole_y: str | None = None
"""The name of y-axis dipole moment integrals in the AO basis."""
dipole_z: str | None = None
"""The name of z-axis dipole moment integrals in the AO basis."""
dipole_mo_x_a: str | None = None
"""The name of alpha-spin x-axis dipole moment integrals in the MO basis."""
dipole_mo_y_a: str | None = None
"""The name of alpha-spin y-axis dipole moment integrals in the MO basis."""
dipole_mo_z_a: str | None = None
"""The name of alpha-spin z-axis dipole moment integrals in the MO basis."""
dipole_mo_x_b: str | None = None
"""The name of beta-spin x-axis dipole moment integrals in the MO basis."""
dipole_mo_y_b: str | None = None
"""The name of beta-spin y-axis dipole moment integrals in the MO basis."""
dipole_mo_z_b: str | None = None
"""The name of beta-spin z-axis dipole moment integrals in the MO basis."""
scf_overlap: Sequence[float] | None = None
"""The SCF overlap matrix in the AO basis."""
scf_orbitals_a: Sequence[float] | None = None
"""The SCF alpha-spin orbitals in the AO basis."""
scf_orbitals_b: Sequence[float] | None = None
"""The SCF beta-spin orbitals in the AO basis."""
scf_density_a: Sequence[float] | None = None
"""The SCF alpha-spin density in the AO basis."""
scf_density_b: Sequence[float] | None = None
"""The SCF beta-spin density in the AO basis."""
scf_density_mo_a: Sequence[float] | None = None
"""The SCF alpha-spin density in the MO basis."""
scf_density_mo_b: Sequence[float] | None = None
"""The SCF beta-spin density in the MO basis."""
scf_fock_a: Sequence[float] | None = None
"""The SCF alpha-spin Fock matrix in the AO basis."""
scf_fock_b: Sequence[float] | None = None
"""The SCF beta-spin Fock matrix in the AO basis."""
scf_fock_mo_a: Sequence[float] | None = None
"""The SCF alpha-spin Fock matrix in the MO basis."""
scf_fock_mo_b: Sequence[float] | None = None
"""The SCF beta-spin Fock matrix in the MO basis."""
scf_coulomb_a: Sequence[float] | None = None
"""The SCF alpha-spin Coulomb matrix in the AO basis."""
scf_coulomb_b: Sequence[float] | None = None
"""The SCF beta-spin Coulomb matrix in the AO basis."""
scf_exchange_a: Sequence[float] | None = None
"""The SCF alpha-spin Exchange matrix in the AO basis."""
scf_exchange_b: Sequence[float] | None = None
"""The SCF beta-spin Exchange matrix in the AO basis."""
scf_eigenvalues_a: Sequence[float] | None = None
"""The SCF alpha-spin orbital eigenvalues."""
scf_eigenvalues_b: Sequence[float] | None = None
"""The SCF beta-spin orbital eigenvalues."""
scf_occupations_a: Sequence[float] | None = None
"""The SCF alpha-spin orbital occupations."""
scf_occupations_b: Sequence[float] | None = None
"""The SCF beta-spin orbital occupations."""
scf_eri: Sequence[float] | None = None
"""The SCF electron-repulsion integrals in the AO basis."""
scf_eri_mo_aa: Sequence[float] | None = None
"""The SCF alpha-alpha electron-repulsion integrals in the MO basis."""
scf_eri_mo_ab: Sequence[float] | None = None
"""The SCF alpha-beta electron-repulsion integrals in the MO basis."""
scf_eri_mo_ba: Sequence[float] | None = None
"""The SCF beta-alpha electron-repulsion integrals in the MO basis."""
scf_eri_mo_bb: Sequence[float] | None = None
"""The SCF beta-beta electron-repulsion integrals in the MO basis."""
scf_dipole_x: Sequence[float] | None = None
"""The SCF x-axis dipole moment integrals in the AO basis."""
scf_dipole_y: Sequence[float] | None = None
"""The SCF y-axis dipole moment integrals in the AO basis."""
scf_dipole_z: Sequence[float] | None = None
"""The SCF z-axis dipole moment integrals in the AO basis."""
scf_dipole_mo_x_a: Sequence[float] | None = None
"""The SCF alpha-spin x-axis dipole moment integrals in the MO basis."""
scf_dipole_mo_y_a: Sequence[float] | None = None
"""The SCF alpha-spin y-axis dipole moment integrals in the MO basis."""
scf_dipole_mo_z_a: Sequence[float] | None = None
"""The SCF alpha-spin z-axis dipole moment integrals in the MO basis."""
scf_dipole_mo_x_b: Sequence[float] | None = None
"""The SCF beta-spin x-axis dipole moment integrals in the MO basis."""
scf_dipole_mo_y_b: Sequence[float] | None = None
"""The SCF beta-spin y-axis dipole moment integrals in the MO basis."""
scf_dipole_mo_z_b: Sequence[float] | None = None
"""The SCF beta-spin z-axis dipole moment integrals in the MO basis."""
localized_orbitals_a: Sequence[float] | None = None
"""The localized alpha-spin orbitals. All `nmo` orbitals are included, even if only a subset
were localized."""
localized_orbitals_b: Sequence[float] | None = None
"""The localized beta-spin orbitals. All `nmo` orbitals are included, even if only a subset were
localized."""
localized_fock_a: Sequence[float] | None = None
"""The alpha-spin Fock matrix in the localized basis. All `nmo` orbitals are included, even if
only a subset were localized."""
localized_fock_b: Sequence[float] | None = None
"""The beta-spin Fock matrix in the localized basis. All `nmo` orbitals are included, even if
only a subset were localized."""
h_core_a: Sequence[float] | None = None
"""The alpha-spin core (one-electron) Hamiltonian matrix in the AO basis."""
h_core_b: Sequence[float] | None = None
"""The beta-spin core (one-electron) Hamiltonian matrix in the AO basis."""
h_effective_a: Sequence[float] | None = None
"""The effective alpha-spin core (one-electron) Hamiltonian matrix in the AO basis."""
h_effective_b: Sequence[float] | None = None
"""The effective beta-spin core (one-electron) Hamiltonian matrix in the AO basis."""
restricted: bool | None = None
"""Whether the computation used restricted spin orbitals."""
@classmethod
def from_dict(cls, data: dict[str, Any]) -> QCWavefunction:
basis: str | dict[str, Any] | QCBasisSet = data.pop("basis")
if isinstance(basis, dict):
basis = QCBasisSet.from_dict(basis)
return cls(**data, basis=basis)
def to_hdf5(self, group: h5py.Group) -> None:
for key, value in self.__dict__.items():
if value is None:
continue
if key == "restricted":
group.attrs["restricted"] = self.restricted
elif key == "basis":
if isinstance(self.basis, QCBasisSet):
basis_group = group.require_group("basis")
self.basis.to_hdf5(basis_group)
else:
group.attrs["basis"] = self.basis
elif hasattr(value, "to_hdf5"):
inner_group = group.require_group(key)
value.to_hdf5(inner_group)
else:
group.create_dataset(key, data=value)
@classmethod
def _from_hdf5_group(cls, h5py_group: h5py.Group) -> QCWavefunction:
data = dict(h5py_group.attrs.items())
for key, value in h5py_group.items():
if key == "basis":
basis: str | QCBasisSet
if "basis" in h5py_group.keys():
basis = cast(QCBasisSet, QCBasisSet.from_hdf5(h5py_group["basis"]))
else:
basis = h5py_group.attrs["basis"]
data["basis"] = basis
else:
data[key] = value[...]
return cls(**data)
|
qiskit-nature/qiskit_nature/second_q/formats/qcschema/qc_wavefunction.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/formats/qcschema/qc_wavefunction.py",
"repo_id": "qiskit-nature",
"token_count": 4251
}
| 126 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2023, 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The kagome lattice"""
from __future__ import annotations
from dataclasses import asdict
from itertools import product
import numpy as np
from rustworkx import PyGraph
from .boundary_condition import BoundaryCondition
from .lattice import Lattice, LatticeDrawStyle
class KagomeLattice(Lattice):
r"""The two-dimensional kagome lattice.
The kagome lattice is a two-dimensional Bravais lattice formed by tiling together
equilateral triangles and regular hexagons in an alternating pattern. The lattice is
spanned by the primitive lattice vectors :math:`\vec{a}_{1} = (1, 0)^{\top}` and
:math:`\vec{a}_{2} = (1/2, \sqrt{3}/2)^{\top}` with each unit cell consisting of three
lattice sites located at :math:`\vec{r}_0 = \mathbf{0}`, :math:`\vec{r}_1 = 2\vec{a}_{1}`
and :math:`\vec{r}_2 = 2 \vec{a}_{2}`, respectively.
This class allows for the simple construction of kagome lattices. For example,
.. code-block:: python
from qiskit_nature.second_q.hamiltonians.lattices import (
BoundaryCondition,
KagomeLattice,
)
kagome = KagomeLattice(
5,
4,
edge_parameter = 1.0,
onsite_parameter = 2.0,
boundary_condition = BoundaryCondition.PERIODIC
)
instantiates a kagome lattice with 5 and 4 unit cells in the x and y direction,
respectively, which has weights 1.0 on all edges and weights 2.0 on self-loops.
The boundary conditions are periodic for the entire lattice.
References:
- `Kagome Lattice @ wikipedia <https://en.wikipedia.org/wiki/Trihexagonal_tiling>`_
- `Bravais Lattice @ wikipedia <https://en.wikipedia.org/wiki/Bravais_lattice>`_
"""
# Dimension of lattice
_dim = 2
# Number of sites in a unit cell
_num_sites_per_cell = 3
# Relative positions (relative to site 0) of sites in a unit cell
_cell_positions = np.array([[0, 0], [1, 0], [1 / 2, np.sqrt(3) / 2]])
# Primitive translation vectors in each direction
_basis = np.array([[2, 0], [1, np.sqrt(3)]])
def __init__(
self,
rows: int,
cols: int,
edge_parameter: complex = 1.0,
onsite_parameter: complex = 0.0,
boundary_condition: (
BoundaryCondition | tuple[BoundaryCondition, BoundaryCondition]
) = BoundaryCondition.OPEN,
) -> None:
"""
Args:
rows: Number of unit cells in the x direction.
cols: Number of unit cells in the y direction.
edge_parameter: Weight on all the edges, specified as a single value
Defaults to 1.0,
onsite_parameter: Weight on the self-loops, which are edges connecting a node to itself.
Defaults to 0.0.
boundary_condition: Boundary condition for each direction.
The available boundary conditions are:
:attr:`.BoundaryCondition.OPEN`, :attr:`.BoundaryCondition.PERIODIC`.
Defaults to :attr:`.BoundaryCondition.OPEN`.
Raises:
ValueError: When edge parameter or boundary condition is a tuple,
the length of that is not the same as that of size.
"""
self._rows = rows
self._cols = cols
self._size = (rows, cols)
self._edge_parameter = edge_parameter
self._onsite_parameter = onsite_parameter
if isinstance(boundary_condition, BoundaryCondition):
boundary_condition = (boundary_condition, boundary_condition)
elif isinstance(boundary_condition, tuple):
if len(boundary_condition) != self._dim:
raise ValueError(
"size mismatch, "
f"`boundary_condition`: {len(boundary_condition)}, `size`: {self._dim}."
"The length of `boundary_condition` must be the same as that of size."
)
self._boundary_condition = boundary_condition
graph: PyGraph = PyGraph(multigraph=False)
graph.add_nodes_from(range(self._num_sites_per_cell * np.prod(self._size)))
# add edges excluding the boundary edges
bulk_edges = self._bulk_edges()
graph.add_edges_from(bulk_edges)
# add self-loops
self_loop_list = self._self_loops()
graph.add_edges_from(self_loop_list)
# add edges that cross the boundaries
boundary_edge_list = self._boundary_edges()
graph.add_edges_from(boundary_edge_list)
# a list of edges that depend on the boundary condition
self.boundary_edges = [(edge[0], edge[1]) for edge in boundary_edge_list]
super().__init__(graph)
# default position
self.pos = self._default_position()
def _coordinate_to_index(self, coord: np.ndarray) -> int:
"""Convert the coordinate of a lattice point to an integer for labeling.
When self.size=(l0, l1), then a coordinate (x0, x1) is converted as
x0 + x1*l0.
Args:
coord: Input coordinate to be converted.
Returns:
Return x0 + x1*l0 where coord=np.array([x0, x1]) and self.size=(l0, l1).
"""
base = np.array([np.prod(self._size[:i]) for i in range(self._dim)], dtype=int)
return np.dot(coord, base).item()
def _self_loops(self) -> list[tuple[int, int, complex]]:
"""Return a list consisting of the self-loops on all the nodes.
Returns:
A list of the self-loops.
"""
onsite_parameter = self._onsite_parameter
num_nodes = self._num_sites_per_cell * np.prod(self._size)
return [(node_a, node_a, onsite_parameter) for node_a in range(num_nodes)]
def _bulk_edges(self) -> list[tuple[int, int, complex]]:
"""Return a list consisting of the edges in the bulk, which don't cross the boundaries.
Returns:
A list of weighted edges that do not cross the boundaries.
"""
edge_parameter = self._edge_parameter
num_sites_per_cell = self._num_sites_per_cell
list_of_edges = []
unit_cell_coordinates = list(product(*map(range, self._size)))
for x, y in unit_cell_coordinates:
# each cell is indexed by its leftmost lattice site
cell_a_idx = self._coordinate_to_index(np.array([x, y]))
# indices of sites within unit cell
cell_a_0 = num_sites_per_cell * cell_a_idx
cell_a_1 = num_sites_per_cell * cell_a_idx + 1
cell_a_2 = num_sites_per_cell * cell_a_idx + 2
# connect sites within a unit cell
list_of_edges.append((cell_a_0, cell_a_1, edge_parameter))
list_of_edges.append((cell_a_1, cell_a_2, edge_parameter))
list_of_edges.append((cell_a_2, cell_a_0, edge_parameter))
# one cell east if not at the east boundary
if x != self._rows - 1:
cell_b_idx = self._coordinate_to_index(np.array([x, y]) + np.array([1, 0]))
cell_b_0 = num_sites_per_cell * cell_b_idx
list_of_edges.append((cell_a_1, cell_b_0, edge_parameter))
# one cell north if not at the north boundary
if y != self._cols - 1:
cell_b_idx = self._coordinate_to_index(np.array([x, y]) + np.array([0, 1]))
cell_b_0 = num_sites_per_cell * cell_b_idx
list_of_edges.append((cell_a_2, cell_b_0, edge_parameter))
# one cell west and north if not at west north boundary
if x != 0 and y != self._cols - 1:
cell_b_idx = self._coordinate_to_index(np.array([x, y]) + np.array([-1, 1]))
cell_b_1 = num_sites_per_cell * cell_b_idx + 1
list_of_edges.append((cell_a_2, cell_b_1, edge_parameter))
return list_of_edges
def _boundary_edges(self) -> list[tuple[int, int, complex]]:
"""Return a list consisting of the edges that cross the boundaries
depending on the boundary conditions.
Raises:
ValueError: Given boundary condition is invalid values.
Returns:
A list of weighted edges that cross the boundaries.
"""
list_of_edges = []
edge_parameter = self._edge_parameter
num_sites_per_cell = self._num_sites_per_cell
boundary_condition = self._boundary_condition
is_x_periodic = boundary_condition[0] == BoundaryCondition.PERIODIC
is_y_periodic = boundary_condition[1] == BoundaryCondition.PERIODIC
# add edges when the boundary condition is periodic.
# The periodic boundary condition in the x direction.
# It makes sense only when rows is greater than 1.
if is_x_periodic and self._rows > 1:
for y in range(self._cols):
cell_a_idx = self._coordinate_to_index(np.array([self._rows - 1, y]))
cell_a_1 = num_sites_per_cell * cell_a_idx + 1
cell_b_idx = self._coordinate_to_index(np.array([0, y]))
cell_b_0 = num_sites_per_cell * cell_b_idx
list_of_edges.append((cell_a_1, cell_b_0, edge_parameter.conjugate()))
# The periodic boundary condition in the y direction.
# It makes sense only when cols is greater than 1.
if is_y_periodic and self._cols > 1:
for x in range(self._rows):
cell_a_idx = self._coordinate_to_index(np.array([x, self._cols - 1]))
cell_a_2 = num_sites_per_cell * cell_a_idx + 2
cell_b_idx = self._coordinate_to_index(np.array([x, 0]))
cell_b_0 = num_sites_per_cell * cell_b_idx
list_of_edges.append((cell_a_2, cell_b_0, edge_parameter.conjugate()))
if is_x_periodic and is_y_periodic:
# The periodic boundary condition in the diagonal directions.
for x in range(1, self._rows):
cell_a_idx = self._coordinate_to_index(np.array([x, self._cols - 1]))
cell_a_2 = num_sites_per_cell * cell_a_idx + 2
cell_b_idx = self._coordinate_to_index(np.array([(x - 1) % self._rows, 0]))
cell_b_1 = num_sites_per_cell * cell_b_idx + 1
list_of_edges.append((cell_a_2, cell_b_1, edge_parameter.conjugate()))
for y in range(self._cols - 1):
cell_a_idx = self._coordinate_to_index(np.array([0, y]))
cell_a_2 = num_sites_per_cell * cell_a_idx + 2
cell_b_idx = self._coordinate_to_index(
np.array([self._rows - 1, (y + 1) % self._cols])
)
cell_b_1 = num_sites_per_cell * cell_b_idx + 1
list_of_edges.append((cell_a_2, cell_b_1, edge_parameter.conjugate()))
# isolating x = 0, y = cols - 1 to prevent duplicating edges
cell_a_idx = self._coordinate_to_index(np.array([0, self._cols - 1]))
cell_a_2 = num_sites_per_cell * cell_a_idx + 2
cell_b_idx = self._coordinate_to_index(np.array([self._rows - 1, 0]))
cell_b_1 = num_sites_per_cell * cell_b_idx + 1
list_of_edges.append((cell_a_2, cell_b_1, edge_parameter.conjugate()))
for i in range(self._dim):
if not isinstance(boundary_condition[i], BoundaryCondition):
raise ValueError(
f"Invalid `boundary condition` {boundary_condition[i]} is given."
"`boundary condition` must be "
+ " or ".join(str(bc) for bc in BoundaryCondition)
)
return list_of_edges
def _default_position(self) -> dict[int, list[float]]:
"""Return a dictionary of default positions for visualization of a two-dimensional lattice.
Returns:
The keys are the labels of lattice points,
and the values are two-dimensional coordinates.
"""
boundary_condition = self._boundary_condition
num_sites_per_cell = self._num_sites_per_cell
pos = {}
width = np.array([0.0, 0.0])
for i in (0, 1):
if boundary_condition[i] == BoundaryCondition.PERIODIC:
# the positions are shifted along the y-direction
# when the boundary condition in the x-direction is periodic and vice versa.
# The width of the shift is fixed to 0.2.
width[(i + 1) % 2] = 0.2
for cell_idx in range(np.prod(self._size)):
# maps an cell index to two-dimensional coordinate
# the positions are shifted so that the edges between boundaries can be seen
# for the periodic cases.
cell_coord = np.array(divmod(cell_idx, self._size[0])[::-1]) + width * np.cos(
np.pi
* (np.array(divmod(cell_idx, self._size[0])))
/ (np.array(self._size)[::-1] - 1)
)
for i in range(num_sites_per_cell):
node_i = num_sites_per_cell * cell_idx + i
pos[node_i] = (np.dot(cell_coord, self._basis) + self._cell_positions[i]).tolist()
return pos
def _style_pos(self) -> dict[int, list[float]]:
"""Return a dictionary of positions for visualization of a two-dimensional lattice without
boundaries.
Returns:
The keys are the labels of lattice points,
and the values are two-dimensional coordinates.
"""
num_sites_per_cell = self._num_sites_per_cell
basis = self._basis
cell_positions = self._cell_positions
pos = {}
for cell_idx in range(np.prod(self._size)):
# maps an cell index to two-dimensional coordinate
# the positions are shifted so that the edges between boundaries can be seen
# for the periodic cases.
cell_coord = np.array(divmod(cell_idx, self._size[0])[::-1])
for i in range(num_sites_per_cell):
node_i = num_sites_per_cell * cell_idx + i
pos[node_i] = (np.dot(cell_coord, basis) + cell_positions[i]).tolist()
return pos
@property
def rows(self) -> int:
"""Number of unit cells in the x direction.
Returns:
The number of rows of the lattice.
"""
return self._rows
@property
def cols(self) -> int:
"""Number of unit cells in the y direction.
Returns:
The number of columns of the lattice.
"""
return self._cols
@property
def size(self) -> tuple[int, int]:
"""Number of unit cells in the x and y direction, respectively.
Returns:
The size of the lattice.
"""
return self._size
@property
def edge_parameter(self) -> complex:
"""Weights on all edges.
Returns:
The parameter for the edges.
"""
return self._edge_parameter
@property
def onsite_parameter(self) -> complex:
"""Weight on the self-loops.
Returns:
The parameter for the self-loops.
"""
return self._onsite_parameter
@property
def boundary_condition(self) -> BoundaryCondition | tuple[BoundaryCondition, BoundaryCondition]:
"""Boundary condition for the entire lattice.
Returns:
The boundary condition.
"""
return self._boundary_condition
def draw_without_boundary(
self,
*,
self_loop: bool = False,
style: LatticeDrawStyle | None = None,
):
r"""Draw the lattice with no edges between the boundaries.
Args:
self_loop: Draw self-loops in the lattice. Defaults to False.
style : Styles for rustworkx.visualization.mpl_draw.
Please see
https://www.rustworkx.org/apiref/rustworkx.visualization.mpl_draw.html
for details.
"""
graph = self.graph
if style is None:
style = LatticeDrawStyle()
elif not isinstance(style, LatticeDrawStyle):
style = LatticeDrawStyle(**style)
if style.pos is None:
style.pos = self._default_position()
graph.remove_edges_from(self.boundary_edges)
self._mpl(
graph=graph,
self_loop=self_loop,
**asdict(style),
)
|
qiskit-nature/qiskit_nature/second_q/hamiltonians/lattices/kagome_lattice.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/hamiltonians/lattices/kagome_lattice.py",
"repo_id": "qiskit-nature",
"token_count": 7697
}
| 127 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""PolynomialTensor class"""
from __future__ import annotations
from collections.abc import Callable, Mapping
from itertools import product
from numbers import Number
from typing import Iterator, Sequence, Type, cast
import numpy as np
from qiskit.quantum_info.operators.mixins import (
LinearMixin,
GroupMixin,
TolerancesMixin,
)
from qiskit_nature.settings import settings
import qiskit_nature.optionals as _optionals
from qiskit_nature.utils import get_einsum
from .tensor import Tensor
if _optionals.HAS_SPARSE:
# pylint: disable=import-error
from sparse import SparseArray, COO, DOK, GCXS
else:
class COO: # type: ignore
"""Empty COO class
Replacement if sparse.COO is not present.
"""
pass
class DOK: # type: ignore
"""Empty DOK class
Replacement if sparse.DOK is not present.
"""
pass
class GCXS: # type: ignore
"""Empty GCXS class
Replacement if sparse.GCXS is not present.
"""
pass
class SparseArray: # type: ignore
"""Empty SparseArray class
Replacement if sparse.SparseArray is not present.
"""
pass
class PolynomialTensor(LinearMixin, GroupMixin, TolerancesMixin, Mapping):
"""A container class to store arbitrary operator coefficients.
This class generalizes the storing of operator coefficients in tensor format. Actual operators
can be extracted from it using the
:meth:`qiskit_nature.second_q.operators.SparseLabelOp.from_polynomial_tensor` method on the
respective subclasses of the ``SparseLabelOp``.
Internally, this class stores tensors as instances of
:class:`~qiskit_nature.second_q.operators.Tensor`. Refer to its documentation for more details.
The storage format maps from string keys to these ``Tensor`` objects. By design, **no**
assumptions are made about the *contents* of the keys. However, the length of each key
determines the dimension of the tensor which it maps, too. For example (using numpy arrays for
the sake of simplicity):
.. code-block:: python
import numpy as np
data = {}
# the empty string, maps to a 0-dimensional matrix, a single number
data[""] = 1.0
# a string of length 1, must map to a 1-dimensional array
data["+"] = np.array([1, 2])
# a string of length 2, must map to a 2-dimensional array
data["+-"] = np.array([[1, 2], [3, 4]])
# ... and so on
In general, the idea is that each character in a key will be associated with the corresponding
axis of the tensor, when an operator gets built from the ``PolynomialTensor`` instance. This
means, that the previous example would expand for example like so:
.. code-block:: python
from qiskit_nature.second_q.operators import FermionicOp, PolynomialTensor
tensor = PolynomialTensor(data)
operator = FermionicOp.from_polynomial_tensor(tensor)
print(operator)
# Fermionic Operator
# number spin orbitals=2, number terms=7
# 1.0
# + 1 * ( +_0 )
# + 2 * ( +_1 )
# + 1 * ( +_0 -_0 )
# + 2 * ( +_0 -_1 )
# + 3 * ( +_1 -_0 )
# + 4 * ( +_1 -_1 )
**Algebra**
This class supports the following basic arithmetic operations: addition, subtraction, scalar
multiplication, and operator multiplication.
For example,
Addition
.. code-block:: python
matrix = np.array([[0, 1], [2, 3]], dtype=float)
0.5 * PolynomialTensor({"+-": matrix}) + PolynomialTensor({"+-": matrix})
Operator multiplication
.. code-block:: python
tensor = PolynomialTensor({"+-": matrix})
print(tensor @ tensor)
Tensor multiplication
.. code-block:: python
print(tensor ^ tensor)
You can also implement more advanced arithmetic via the :meth:`apply` and :meth:`einsum`
methods.
.. code-block:: python
print(PolynomialTensor.apply(np.transpose, tensor))
print(PolynomialTensor.apply(np.conjugate, 1j * tensor))
print(PolynomialTensor.apply(np.kron, tensor, tensor))
print(PolynomialTensor.einsum({"ij,ji": ("+-", "+-", "")}, tensor, tensor))
**Sparse Arrays**
Furthermore, since the ``PolynomialTensor`` is building on top of the
:class:`~qiskit_nature.second_q.operators.Tensor` class it supports both, dense numpy arrays and
sparse arrays. Since it needs to support more than 2-dimensional arrays, we rely on the
`sparse <https://sparse.pydata.org/en/stable/index.html>`_ library.
.. code-block:: python
import sparse as sp
sparse_matrix = sp.as_coo(matrix)
print(PolynomialTensor({"+-": sparse_matrix}))
One can convert between dense and sparse representation of the same tensor via the
:meth:`to_dense` and :meth:`to_sparse` methods, respectively.
"""
def __init__(
self,
data: Mapping[str, np.ndarray | SparseArray | complex | Tensor],
*,
validate: bool = True,
) -> None:
"""
Args:
data: mapping of string-based operator keys to coefficient tensor values. If the values
are not already of type :class:`~qiskit_nature.second_q.operators.Tensor`, they will
automatically be wrapped into one.
validate: when set to False the ``data`` will not be validated. Disable this setting
with care!
Raises:
ValueError: when length of operator key does not match dimensions of value matrix.
ValueError: when value matrix does not have consistent dimensions.
ValueError: when some or all value matrices in ``data`` have different dimensions.
"""
copy_dict: dict[str, Tensor] = {}
dim: int | None = None
for key, value in data.items():
if not isinstance(value, Tensor):
value = Tensor(value)
if validate and len(value.shape) != len(key):
raise ValueError(
f"Data key {key} of length {len(key)} does not match "
f"data value matrix of dimensions {value.shape}"
)
dims = set(value.shape)
if validate and len(dims) > 1:
raise ValueError(
f"For key {key}: dimensions of value matrix are not identical {value.shape}"
)
if dim is None and dims:
# we use the length of the first axis as the dimension of this tensor
dim = value.shape[0]
if validate and len(dims) == 1 and dims.pop() != dim:
raise ValueError(
"Dimensions of value matrices in data dictionary do not all agree with each "
f"other. The inferred dimension is {dim}, violating the shape {value.shape} of "
f"key '{key}'."
)
copy_dict[key] = value
self._data = copy_dict
@property
def register_length(self) -> int | None:
"""The size of the operator that can be generated from this ``PolynomialTensor``."""
for key in self._data:
if key == "":
continue
return self[key].shape[0]
return None
def __repr__(self) -> str:
data_str = f"{dict(self.items())}"
return "PolynomialTensor(" f"{data_str})"
def __str__(self) -> str:
pre = "Polynomial Tensor\n"
ret = " " + "\n ".join([f'"{label}":\n{str(coeff)}' for label, coeff in self.items()])
return pre + ret
@classmethod
def empty(cls) -> PolynomialTensor:
"""Constructs an empty tensor.
Returns:
The empty tensor.
"""
return PolynomialTensor({})
def is_empty(self) -> bool:
"""Returns whether this tensor is empty or not."""
return len(self) == 0
@_optionals.HAS_SPARSE.require_in_call
def is_sparse(self) -> bool:
"""Returns whether all matrices in this tensor are sparse."""
return all(self[key].is_sparse() for key in self if key != "")
def is_dense(self) -> bool:
"""Returns whether all matrices in this tensor are dense."""
return all(self[key].is_dense() for key in self if key != "")
def __getitem__(self, __k: str) -> Tensor:
"""Gets the value from the ``PolynomialTensor``.
Args:
__k: operator key string in the ``PolynomialTensor``.
Returns:
Value corresponding to the operator key ``__k``.
"""
item = self._data.__getitem__(__k)
return item
def __len__(self) -> int:
"""Returns the length of the ``PolynomialTensor``."""
return self._data.__len__()
def __iter__(self) -> Iterator[str]:
"""Returns an iterator of the ``PolynomialTensor``."""
return self._data.__iter__()
def to_dense(self) -> PolynomialTensor:
"""Returns a new instance where all matrices are now dense tensors.
If the instance on which this method was called already fulfilled this requirement, it is
returned unchanged.
"""
if self.is_dense():
return self
_optionals.HAS_SPARSE.require_now("SparseArray")
dense_dict: dict[str, Tensor] = {}
for key, value in self._data.items():
dense_dict[key] = value.to_dense()
return PolynomialTensor(dense_dict, validate=False)
# TODO: change the following type-hint if/when SparseArray dictates the existence of from_numpy
@_optionals.HAS_SPARSE.require_in_call
def to_sparse(
self, *, sparse_type: Type[COO] | Type[DOK] | Type[GCXS] = COO
) -> PolynomialTensor:
"""Returns a new instance where all matrices are now sparse tensors.
If the instance on which this method was called already fulfilled this requirement, it is
returned unchanged.
Args:
sparse_type: the type to use for the conversion to sparse matrices. Note, that this will
only be applied to matrices which were previously dense tensors. Sparse arrays of
another type will not be explicitly converted.
Returns:
A new ``PolynomialTensor`` with all its matrices converted to the requested sparse array
type.
"""
if self.is_sparse():
return self
sparse_dict: dict[str, Tensor] = {}
for key, value in self._data.items():
sparse_dict[key] = value.to_sparse(sparse_type=sparse_type)
return PolynomialTensor(sparse_dict, validate=False)
def _multiply(self, other: complex) -> PolynomialTensor:
"""Scalar multiplication of a PolynomialTensor with a scalar.
Args:
other: scalar to be multiplied with the ``PolynomialTensor``.
Returns:
The new ``PolynomialTensor`` product object.
Raises:
TypeError: if ``other`` is not a number.
"""
if not isinstance(other, Number):
raise TypeError(f"other {other} must be a number")
prod_dict: dict[str, Tensor] = {}
for key, matrix in self._data.items():
prod_dict[key] = other * matrix
return PolynomialTensor(prod_dict, validate=False)
def _add(self, other: PolynomialTensor, qargs=None) -> PolynomialTensor:
"""Addition of ``PolynomialTensor`` instances.
Args:
other: second ``PolynomialTensor`` object to be added to the first.
Returns:
The new summed ``PolynomialTensor``.
Raises:
TypeError: when ``other`` is not a ``PolynomialTensor``.
ValueError: when values corresponding to keys in ``other`` and the first
``PolynomialTensor`` object do not match.
"""
if not isinstance(other, PolynomialTensor):
raise TypeError("Incorrect argument type: other should be PolynomialTensor")
sum_dict = {key: value + other._data.get(key, 0) for key, value in self._data.items()}
other_unique = {key: other._data[key] for key in other._data.keys() - self._data.keys()}
sum_dict.update(other_unique)
return PolynomialTensor(sum_dict, validate=False)
def __eq__(self, other: object) -> bool:
"""Check equality of ``PolynomialTensor`` instances.
.. note::
This check only asserts the internal matrix elements for equality but ignores the type
of the matrices. As such, it will indicate equality of two ``PolynomialTensor``
instances even if one contains sparse and the other dense numpy arrays, as long as their
elements are identical.
Args:
other: the second ``PolynomialTensor`` object to be compared with the first.
Returns:
True when the ``PolynomialTensor`` objects are equal, False when unequal.
"""
if not isinstance(other, PolynomialTensor):
return False
if self._data.keys() != other._data.keys():
return False
for key, value in self._data.items():
other_value = other._data[key]
if value != other_value:
return False
return True
def equiv(self, other: object) -> bool:
"""Check equivalence of ``PolynomialTensor`` instances.
.. note::
This check only asserts the internal matrix elements for equivalence but ignores the
type of the matrices. As such, it will indicate equivalence of two ``PolynomialTensor``
instances even if one contains sparse and the other dense numpy arrays, as long as their
elements match.
Args:
other: the second ``PolynomialTensor`` object to be compared with the first.
Returns:
True when the ``PolynomialTensor`` objects are equivalent, False when not.
"""
if not isinstance(other, PolynomialTensor):
return False
if self._data.keys() != other._data.keys():
return False
for key, value in self._data.items():
other_value = other._data[key]
if not value.equiv(other_value):
return False
return True
def compose(
self, other: PolynomialTensor, qargs: None = None, front: bool = False
) -> PolynomialTensor:
r"""Returns the matrix multiplication with another ``PolynomialTensor``.
Args:
other: the other PolynomialTensor.
qargs: UNUSED.
front: If ``True``, composition uses right matrix multiplication, otherwise left
multiplication is used (the default).
Raises:
NotImplementedError: when the two tensors do not have the same :attr:`register_length`.
Returns:
The tensor resulting from the composition.
.. note::
Composition (``&``) by default is defined as `left` matrix multiplication for operators,
while ``@`` (equivalent to :meth:`dot`) is defined as `right` matrix multiplication.
This means that ``A & B == A.compose(B)`` is equivalent to ``B @ A == B.dot(A)`` when
``A`` and ``B`` are of the same type.
Setting the ``front=True`` keyword argument changes this to `right` matrix
multiplication which is equivalent to the :meth:`dot` method
``A.dot(B) == A.compose(B, front=True)``.
"""
a = self if front else other
b = other if front else self
if a.register_length != b.register_length:
raise NotImplementedError()
new_data: dict[str, Tensor] = {}
for akey, bkey in product(a, b):
new_key = akey + bkey
atensor = a[akey]
btensor = b[bkey]
outer = atensor.compose(btensor, qargs=qargs, front=True)
if new_key in new_data:
new_data[new_key] = new_data[new_key] + outer
else:
new_data[new_key] = outer
return PolynomialTensor(new_data)
def tensor(self, other: PolynomialTensor) -> PolynomialTensor:
r"""Returns the tensor product with another ``PolynomialTensor``.
Args:
other: the other PolynomialTensor.
Raises:
NotImplementedError: when the two tensors do not have the same :attr:`register_length`.
Returns:
The tensor resulting from the tensor product, :math:`self \otimes other`.
.. note::
The tensor product can be obtained using the ``^`` binary operator.
Hence ``a.tensor(b)`` is equivalent to ``a ^ b``.
.. note::
Tensor uses reversed operator ordering to :meth:`expand`.
For two tensors of the same type ``a.tensor(b) = b.expand(a)``.
"""
return self._tensor(self, other)
def expand(self, other: PolynomialTensor) -> PolynomialTensor:
r"""Returns the reverse-order tensor product with another ``PolynomialTensor``.
Args:
other: the other PolynomialTensor.
Raises:
NotImplementedError: when the two tensors do not have the same :attr:`register_length`.
Returns:
The tensor resulting from the tensor product, :math:`other \otimes self`.
.. note::
Expand is the opposite operator ordering to :meth:`tensor`.
For two tensors of the same type ``a.expand(b) = b.tensor(a)``.
"""
return self._tensor(other, self)
@classmethod
def _tensor(cls, a: PolynomialTensor, b: PolynomialTensor) -> PolynomialTensor:
if a.register_length != b.register_length:
raise NotImplementedError()
new_data: dict[str, Tensor] = {}
for akey, bkey in product(a, b):
atensor = a[akey]
btensor = b[bkey]
einsum = atensor.tensor(btensor)
new_key = akey + bkey
if new_key in new_data:
new_data[new_key] = new_data[new_key] + einsum
else:
new_data[new_key] = einsum
return PolynomialTensor(new_data)
@classmethod
def apply(
cls,
function: Callable[..., np.ndarray | SparseArray | complex],
*operands: PolynomialTensor,
multi: bool = False,
validate: bool = True,
) -> PolynomialTensor | list[PolynomialTensor]:
"""Applies the provided function to the common set of keys of the provided tensors.
The usage of this method is best explained by some examples:
.. code-block:: python
import numpy as np
from qiskit_nature.second_q.opertors import PolynomialTensor
rand_a = np.random.random((2, 2))
rand_b = np.random.random((2, 2))
a = PolynomialTensor({"+-": rand_a})
b = PolynomialTensor({"+": np.random.random(2), "+-": rand_b})
# transpose
a_transpose = PolynomialTensor.apply(np.transpose, a)
print(a_transpose == PolynomialTensor({"+-": rand_a.transpose()})) # True
# conjugate
a_complex = 1j * a
a_conjugate = PolynomialTensor.apply(np.conjugate, a_complex)
print(a_conjugate == PolynomialTensor({"+-": -1j * rand_a})) # True
# kronecker product
ab_kron = PolynomialTensor.apply(np.kron, a, b)
print(ab_kron == PolynomialTensor({"+-": np.kron(rand_a, rand_b)})) # True
# Note: that ab_kron does NOT contain the "+" and "+-+" keys although b contained the
# "+" key. That is because the function only gets applied to the keys which are common
# to all tensors passed to it.
# computing eigenvectors
hermi_a = np.array([[1, -2j], [2j, 5]])
a = PolynomialTensor({"+-": hermi_a})
_, eigenvectors = PolynomialTensor.apply(np.linalg.eigh, a, multi=True, validate=False)
print(eigenvectors == PolynomialTensor({"+-": np.eigh(hermi_a)[1]})) # True
.. note::
The provided function will only be applied to the internal arrays of the common keys of
all provided ``PolynomialTensor`` instances. That means, that no cross-products will be
generated.
Args:
function: the function to apply to the internal arrays of the provided operands. This
function must take numpy (or sparse) arrays as its positional arguments. The number
of arguments must match the number of provided operands.
operands: a sequence of ``PolynomialTensor`` instances on which to operate.
multi: when set to True this indicates that the provided numpy function will return
multiple new numpy arrays which will each be wrapped into a ``PolynomialTensor``
instance separately.
validate: when set to False the ``data`` will not be validated. Disable this setting
with care!
Returns:
A new ``PolynomialTensor`` instance with the resulting arrays.
"""
common_keys = set.intersection(*(set(op) for op in operands))
new_tensors: list[dict[str, Tensor]] = [{}]
for key in common_keys:
results = cast(Tensor, function(*(op[key] for op in operands)))
if multi:
for idx, res in enumerate(results):
if idx >= len(new_tensors):
new_tensors.append({})
new_tensors[idx][key] = res
else:
new_tensors[0][key] = results
if multi:
return [cls(tensor, validate=validate) for tensor in new_tensors]
return cls(new_tensors[0], validate=validate)
@classmethod
def stack(
cls,
function: Callable[..., np.ndarray | SparseArray | Number],
operands: Sequence[PolynomialTensor],
*,
validate: bool = True,
) -> PolynomialTensor:
"""Stacks the provided sequence of tensors using the given numpy stacking function.
The usage of this method is best explained by some examples:
.. code-block:: python
import numpy as np
from qiskit_nature.second_q.opertors import PolynomialTensor
rand_a = np.random.random((2, 2))
rand_b = np.random.random((2, 2))
a = PolynomialTensor({"+-": rand_a})
b = PolynomialTensor({"+": np.random.random(2), "+-": rand_b})
# np.hstack
ab_hstack = PolynomialTensor.stack(np.hstack, [a, b], validate=False)
print(ab_hstack == PolynomialTensor({"+-": np.hstack([a, b], validate=False)})) # True
# np.vstack
ab_vstack = PolynomialTensor.stack(np.vstack, [a, b], validate=False)
print(ab_vstack == PolynomialTensor({"+-": np.vstack([a, b], validate=False)})) # True
.. note::
The provided function will only be applied to the internal arrays of the common keys of
all provided ``PolynomialTensor`` instances. That means, that no cross-products will be
generated.
.. note::
When stacking arrays this will likely lead to array shapes which would fail the shape
validation check (as you can see from the examples above where we explicitly disable
them). This is considered an advanced use case which is why the user is left to disable
this check themselves, to ensure they know what they are doing.
Args:
function: the stacking function to apply to the internal arrays of the provided
operands. This function must take a sequence of numpy (or sparse) arrays as its
first argument. You should use :code:`functools.partial` if you need to provide
keyword arguments (e.g. :code:`partial(np.stack, axis=-1)`). Common methods to use
here are :func:`numpy.hstack` and :func:`numpy.vstack`.
operands: a sequence of ``PolynomialTensor`` instances on which to operate.
validate: when set to False the ``data`` will not be validated. Disable this setting
with care!
Returns:
A new ``PolynomialTensor`` instance with the resulting arrays.
"""
common_keys = set.intersection(*(set(op) for op in operands))
new_data: dict[str, Tensor | Number] = {}
for key in common_keys:
new_data[key] = cast(Tensor, function([*(op[key] for op in operands)]))
return cls(new_data, validate=validate)
def split(
self,
function: Callable[..., np.ndarray | SparseArray | Number],
indices_or_sections: int | Sequence[int],
*,
validate: bool = True,
) -> list[PolynomialTensor]:
"""Splits the acted on tensor instance using the given numpy splitting function.
The usage of this method is best explained by some examples:
.. code-block:: python
import numpy as np
from qiskit_nature.second_q.opertors import PolynomialTensor
rand_ab = np.random.random((4, 4))
ab = PolynomialTensor({"+-": rand_ab})
# np.hsplit
a, b = ab.split(np.hsplit, [2], validate=False)
print(a == PolynomialTensor({"+-": np.hsplit(ab, [2])[0], validate=False)})) # True
print(b == PolynomialTensor({"+-": np.hsplit(ab, [2])[1], validate=False)})) # True
# np.vsplit
a, b = ab.split(np.vsplit, [2], validate=False)
print(a == PolynomialTensor({"+-": np.vsplit(ab, [2])[0], validate=False)})) # True
print(b == PolynomialTensor({"+-": np.vsplit(ab, [2])[1], validate=False)})) # True
.. note::
When splitting arrays this will likely lead to array shapes which would fail the shape
validation check (as you can see from the examples above where we explicitly disable
them). This is considered an advanced use case which is why the user is left to disable
this check themselves, to ensure they know what they are doing.
Args:
function: the splitting function to use. This function must take a single numpy (or
sparse) array as its first input followed by a sequence of indices to split on.
You should use :code:`functools.partial` if you need to provide keyword arguments
(e.g. :code:`partial(np.split, axis=-1)`). Common methods to use here are
:func:`numpy.hsplit` and :func:`numpy.vsplit`.
indices_or_sections: a single index or sequence of indices to split on.
validate: when set to False the ``data`` will not be validated. Disable this setting
with care!
Returns:
New ``PolynomialTensor`` instances containing the split arrays.
"""
new_tensors: list[dict[str, Tensor | Number]] = []
for key, arr in self._data.items():
for idx, new_arr in enumerate(
function(arr, indices_or_sections) # type: ignore[arg-type]
):
if idx < len(new_tensors):
new_tensors[idx][key] = new_arr
else:
new_tensors.append({key: new_arr})
return [self.__class__(new_data, validate=validate) for new_data in new_tensors]
@classmethod
def einsum(
cls,
einsum_map: dict[str, tuple[str, ...]],
*operands: PolynomialTensor,
validate: bool = True,
) -> PolynomialTensor:
"""Applies the various Einsum convention operations to the provided tensors.
This method wraps the :meth:`numpy.einsum` function, allowing very complex operations to be
applied efficiently to the matrices stored inside the provided ``PolynomialTensor``
operands.
As an example, let us compute the exact exchange term of an
:class:`qiskit_nature.second_q.hamiltonians.ElectronicEnergy` hamiltonian:
.. code-block:: python
# a PolynomialTensor containing the two-body terms of an ElectronicEnergy hamiltonian
two_body = PolynomialTensor({"++--": ...})
# an electronic density:
density = PolynomialTensor({"+-": ...})
# computes the ElectronicEnergy.exchange operator
exchange = PolynomialTensor.einsum(
{"pqrs,qs->pr": ("++--", "+-", "+-")},
two_body,
density,
)
# result will be contained in exchange["+-"]
Another example is the mapping from the AO to MO basis, as implemented by the
:class:`qiskit_nature.second_q.transformers.BasisTransformer`.
.. code-block:: python
# the one- and two-body integrals of a hamiltonian
hamiltonian = PolynomialTensor({"+-": ..., "++--": ...})
# the AO-to-MO transformation coefficients
mo_coeff = PolynomialTensor({"+-": ...})
einsum_map = {
"jk,ji,kl->il": ("+-", "+-", "+-", "+-"),
"prsq,pi,qj,rk,sl->iklj": ("++--", "+-", "+-", "+-", "+-", "++--"),
}
transformed = PolynomialTensor.einsum(
einsum_map, hamiltonian, mo_coeff, mo_coeff, mo_coeff, mo_coeff
)
# results will be contained in transformed["+-"] and transformed["++--"], respectively
.. note::
:class:`sparse.SparseArray` supports ``opt_einsum.contract` if ``opt_einsum`` is installed.
It does not support ``numpy.einsum``. In this case, the resultant
``PolynomialTensor`` will contain all dense numpy arrays. If a user would like to work
with a sparse array instead, they should install ``opt_einsum`` or
they should convert it explicitly using the :meth:`to_sparse` method.
Args:
einsum_map: a dictionary, mapping from :meth:`numpy.einsum` subscripts to a tuple of
strings. These strings correspond to the keys of matrices to be extracted from the
provided ``PolynomialTensor`` operands. The last string in this tuple indicates the
key under which to store the result in the returned ``PolynomialTensor``.
operands: a sequence of ``PolynomialTensor`` instances on which to operate.
validate: when set to False the ``data`` will not be validated. Disable this setting
with care!
Returns:
A new ``PolynomialTensor``.
"""
einsum_func, uses_sparse = get_einsum()
operand_list = list(operands) if uses_sparse else [op.to_dense() for op in operands]
new_data: dict[str, Tensor] = {}
for einsum, terms in einsum_map.items():
*inputs, output = terms
try:
ops = []
for idx, term in enumerate(inputs):
op = operand_list[idx]._data[term]
ops.append(op)
result = einsum_func(einsum, *ops, optimize=settings.optimize_einsum)
except KeyError:
continue
if output in new_data:
new_data += result
else:
new_data[output] = result
return cls(new_data, validate=validate)
|
qiskit-nature/qiskit_nature/second_q/operators/polynomial_tensor.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/operators/polynomial_tensor.py",
"repo_id": "qiskit-nature",
"token_count": 13683
}
| 128 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Lattice Model Problem class."""
from __future__ import annotations
from typing import cast, Union
from qiskit_algorithms import EigensolverResult, MinimumEigensolverResult
from qiskit_nature.second_q.hamiltonians import LatticeModel
from qiskit_nature.second_q.properties import Interpretable
from .base_problem import BaseProblem
from .lattice_model_result import LatticeModelResult
from .lattice_properties_container import LatticePropertiesContainer
from .eigenstate_result import EigenstateResult
class LatticeModelProblem(BaseProblem):
"""A lattice model problem.
This class specifically deals with handling of :class:`.LatticeModel` type hamiltonians.
The following attributes can be read and updated once the ``LatticeModelProblem`` object has
been constructed.
Attributes:
properties (LatticePropertiesContainer): a container for additional observable operator
factories.
"""
def __init__(self, hamiltonian: LatticeModel) -> None:
"""
Args:
hamiltonian: A lattice model class to create second quantized operators.
Raises:
TypeError: if the provided ``hamiltonian`` is not of type :class:`.LatticeModel`.
"""
super().__init__(hamiltonian)
self.properties: LatticePropertiesContainer = LatticePropertiesContainer()
@property
def hamiltonian(self) -> LatticeModel:
"""Returns the hamiltonian wrapped by this problem."""
return cast(LatticeModel, self._hamiltonian)
def interpret(
self,
raw_result: Union[EigenstateResult, EigensolverResult, MinimumEigensolverResult],
) -> LatticeModelResult:
"""Interprets a raw result in the context of this transformation.
Args:
raw_result: a raw result to be interpreted
Returns:
A lattice model result.
"""
eigenstate_result = super().interpret(raw_result)
result = LatticeModelResult()
result.combine(eigenstate_result)
if isinstance(self.hamiltonian, Interpretable):
self.hamiltonian.interpret(result)
for prop in self.properties:
if isinstance(prop, Interpretable):
prop.interpret(result)
result.computed_lattice_energies = eigenstate_result.eigenvalues
return result
|
qiskit-nature/qiskit_nature/second_q/problems/lattice_model_problem.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/problems/lattice_model_problem.py",
"repo_id": "qiskit-nature",
"token_count": 989
}
| 129 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
r"""Generator functions for various spin-related operators.
When dealing with non-orthonormal orbitals, you need to make sure that you include the `overlap`
matrices when using the methods below. This ensures that the operators can resolve any spin
contamination that may be present in your orbitals.
The overlap matrices that you provide have to be computed in the same basis in which the spin
operator is encoded. If you are working in the molecular orbital (MO) basis, the overlap can be
easily constructed starting from the atomic orbital (AO) overlap matrix, which can be obtained from
any standard quantum chemistry program (for example from the `get_oplp()` method in PySCF). This AO
overlap matrix can be transformed to the MO basis using the AO-to-MO transformation matrix, $C$,
according to the following equation:
.. math::
s^{MO} = C^T s^{AO} C.
For restricted spin orbitals (i.e. :math:`C_\alpha == C_\beta`), the equation above simplifies to
the identity matrix (because the MOs will be orthonormal), in which case you can omit the `overlap`
arguments below). Otherwise, you must include the correct overlap. For example, the overlap-matrix
between the $`\alpha`$- and $`\beta`$-spin orbitals is:
.. math::
s^{\alpha,\beta} = C_\alpha^T s^{AO} C_\beta.
"""
from __future__ import annotations
import numpy as np
from qiskit_nature.second_q.operators import FermionicOp
def s_plus_operator(num_spatial_orbitals: int, overlap: np.ndarray | None = None) -> FermionicOp:
r"""Constructs the $S^+$ operator.
The $S^+$ operator is defined as:
.. math::
S^+ = \sum_{i,j} s_{ij}^{\alpha,\beta} \hat{a}_{i}^{\dagger} \hat{a}_{j},
where $s$ denotes the overlap-matrix between the $\alpha$- and $\beta$-spin orbitals.
Note that for orthonormal orbitals this overlap-matrix will become the identity matrix,
simplifying the operator above to become:
.. math::
S^+ = \sum_{i=1}^{n} \hat{a}_{i}^{\dagger} \hat{a}_{i+n},
where, $n$ denotes the index of the *spatial* orbital. Since Qiskit Nature employs the blocked
spin-ordering convention, the creation operator above is applied to the :math:`\alpha`-spin
orbital and the annihilation operator is applied to the corresponding :math:`\beta`-spin
orbital.
Args:
num_spatial_orbitals: the size of the operator which to generate.
overlap: the overlap-matrix between the $\alpha$- and $\beta$-spin orbitals. When this is
`None`, the overlap-matrix is assumed to be identity, resulting in the second definition
above.
Returns:
The $S^+$ operator of the requested size.
"""
if overlap is None:
op = FermionicOp(
{f"+_{orb} -_{orb + num_spatial_orbitals}": 1.0 for orb in range(num_spatial_orbitals)}
)
else:
op = FermionicOp(
{
f"+_{idx[0]} -_{idx[1] + num_spatial_orbitals}": overlap[idx]
for idx in np.ndindex(*overlap.shape)
}
)
return op.simplify()
def s_minus_operator(num_spatial_orbitals: int, overlap: np.ndarray | None = None) -> FermionicOp:
r"""Constructs the $S^-$ operator.
The $S^-$ operator is defined as:
.. math::
S^- = \sum_{i,j} s_{ij}^{\beta,\alpha} \hat{a}_{i}^{\dagger} \hat{a}_{j},
where $s$ denotes the overlap-matrix between the $\beta$- and $\alpha$-spin orbitals.
.. note::
The `overlap` input to this method is related to the input of the other methods
(:meth:`s_plus_operator`, :meth:`s_x_operator`, and :meth:`s_y_operator`) by its transpose,
because the following relation holds:
.. math::
s_{ij}^{\beta,\alpha} = \left(s_{ij}^{\alpha,\beta}\right)^T.
Note that for orthonormal orbitals this overlap-matrix will become the identity matrix,
simplifying the operator above to become:
S^- = \sum_{i=1}^{n} \hat{a}_{i+n}^{\dagger} \hat{a}_{i}
where, $n$ denotes the index of the *spatial* orbital. Since Qiskit Nature employs the blocked
spin-ordering convention, the creation operator above is applied to the :math:`\beta`-spin
orbital and the annihilation operator is applied to the corresponding :math:`\alpha`-spin
orbital.
Args:
num_spatial_orbitals: the size of the operator which to generate.
overlap: the overlap-matrix between the $\beta$- and $\alpha$-spin orbitals. When this is
`None`, the overlap-matrix is assumed to be identity, resulting in the second definition
above.
Returns:
The $S^-$ operator of the requested size.
"""
if overlap is None:
op = FermionicOp(
{f"+_{orb + num_spatial_orbitals} -_{orb}": 1.0 for orb in range(num_spatial_orbitals)}
)
else:
op = FermionicOp(
{
f"+_{idx[0] + num_spatial_orbitals} -_{idx[1]}": overlap[idx]
for idx in np.ndindex(*overlap.shape)
}
)
return op.simplify()
def s_x_operator(num_spatial_orbitals: int, overlap: np.ndarray | None = None) -> FermionicOp:
r"""Constructs the $S^x$ operator.
The $S^x$ operator is defined as:
.. math::
S^x = \frac{1}{2} \left(S^+ + S^-\right)
Args:
num_spatial_orbitals: the size of the operator which to generate.
overlap: the overlap-matrix between the $\alpha$- and $\beta$-spin orbitals. When this is
`None`, the overlap-matrix is assumed to be identity.
Returns:
The $S^x$ operator of the requested size.
"""
if overlap is None:
op = FermionicOp(
{
f"+_{orb} -_{(orb + num_spatial_orbitals) % (2*num_spatial_orbitals)}": 0.5
for orb in range(2 * num_spatial_orbitals)
}
)
else:
op = 0.5 * (
s_plus_operator(num_spatial_orbitals, overlap)
+ s_minus_operator(num_spatial_orbitals, overlap.T)
)
return op
def s_y_operator(num_spatial_orbitals: int, overlap: np.ndarray | None = None) -> FermionicOp:
r"""Constructs the $S^y$ operator.
The $S^y$ operator is defined as:
.. math::
S^y = -\frac{i}{2} \left(S^+ - S^-\right)
Args:
num_spatial_orbitals: the size of the operator which to generate.
overlap: the overlap-matrix between the $\alpha$- and $\beta$-spin orbitals. When this is
`None`, the overlap-matrix is assumed to be identity.
Returns:
The $S^y$ operator of the requested size.
"""
if overlap is None:
op = FermionicOp(
{
f"+_{orb} -_{(orb + num_spatial_orbitals) % (2*num_spatial_orbitals)}": 0.5j
* (-1.0) ** (orb < num_spatial_orbitals)
for orb in range(2 * num_spatial_orbitals)
}
)
else:
op = -0.5j * (
s_plus_operator(num_spatial_orbitals, overlap)
- s_minus_operator(num_spatial_orbitals, overlap.T)
)
return op
def s_z_operator(num_spatial_orbitals: int) -> FermionicOp:
r"""Constructs the $S^z$ operator.
The $S^z$ operator is defined as:
.. math::
S^z = \frac{1}{2} \sum_{i=1}^{n} \left(
\hat{a}_{i}^{\dagger}\hat{a}_{i} - \hat{a}_{i+n}^{\dagger}\hat{a}_{i+n}
\right),
where, $n$ denotes the index of the *spatial* orbital. Since Qiskit Nature employs the blocked
spin-ordering convention, this means that the above corresponds to evaluating the number
operator (:math:`\hat{a}^{\dagger}\hat{a}`) once on the :math:`\alpha`-spin orbital and once on
the :math:`\beta`-spin orbital and taking their difference.
.. note::
Contrary to the other methods in this module, this one does not require the inclusion of an
overlap-matrix for non-orthonormal orbitals, because it does not mix the $\alpha$- and
$\beta$-spin contributions.
Args:
num_spatial_orbitals: the size of the operator which to generate.
Returns:
The $S^z$ operator of the requested size.
"""
op = FermionicOp(
{
f"+_{orb} -_{orb}": 0.5 * (-1.0) ** (orb >= num_spatial_orbitals)
for orb in range(2 * num_spatial_orbitals)
}
)
return op
|
qiskit-nature/qiskit_nature/second_q/properties/s_operators.py/0
|
{
"file_path": "qiskit-nature/qiskit_nature/second_q/properties/s_operators.py",
"repo_id": "qiskit-nature",
"token_count": 3512
}
| 130 |
---
upgrade:
- |
The internal data in :class:`~qiskit_nature.operators.second_quantization.FermionicOp`
have been changed. As a result, more data type is now accepted by initialize.
The ascending order constraint and the no-same index constraint have been removed.
In addition, the dense and sparse labels are now automatically detected by the existence of
underscores.
The property `display_format` of :class:`~qiskit_nature.operators.second_quantization.FermionicOp`
is added. There are two modes `dense` and `sparse`.
This display format can be switched by the property `FermionicOp.display_format`.
|
qiskit-nature/releasenotes/notes/0.2/add-fermionicop-label-mode-a1faa97811dac9b4.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.2/add-fermionicop-label-mode-a1faa97811dac9b4.yaml",
"repo_id": "qiskit-nature",
"token_count": 187
}
| 131 |
---
fixes:
- |
The :class:`~qiskit_nature.runtime.VQEProgram` does support the evaluation of auxiliary
operators at the final state, but the
:meth:`qiskit_nature.runtime.VQEProgram.supports_aux_operators` method previously returned
`False` instead of `True`.
|
qiskit-nature/releasenotes/notes/0.2/vqeprogram-aux-operators-9db380d8e20b1f1d.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.2/vqeprogram-aux-operators-9db380d8e20b1f1d.yaml",
"repo_id": "qiskit-nature",
"token_count": 98
}
| 132 |
---
fixes:
- |
Fixes QEOM such that when using parity mapping with two_qubit_reduction,
or Z2 symmetries with any mapping, that the excited states are computed
as expected.
Fix the electronic structure problem sector locator such that the 'auto'
Z2 symmetry conversion, of the qubit converter, results in the ground state
for such problems and not some other value due to incorrect sector selection.
|
qiskit-nature/releasenotes/notes/0.3/qeom_fix-4341f74549f22891.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.3/qeom_fix-4341f74549f22891.yaml",
"repo_id": "qiskit-nature",
"token_count": 115
}
| 133 |
---
features:
- |
The new setting `qiskit_nature.settings.optimize_einsum` was added which allows enabling the
`optimize` argument in `numpy.einsum` calls with more than 2 operands. This is known to yield
significant computational efficiency increases at the expense of higher memory consumption.
The setting defaults to `True`.
|
qiskit-nature/releasenotes/notes/0.4/einsum-optimization-27ae7457599e578e.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.4/einsum-optimization-27ae7457599e578e.yaml",
"repo_id": "qiskit-nature",
"token_count": 96
}
| 134 |
---
features:
- |
Add the option to initialize a :class:`~qiskit_nature.problems.second_quantization.lattice.Lattice`
from a ``networkx.Graph`` object, which will be internally converted to a ``retworkx.PyGraph``
for performance.
For example, you can now construct a lattice as
.. code-block:: python
import networkx as nx
from qiskit_nature.problems.second_quantization.lattice import Lattice
# 3-regular random graph on 6 nodes
graph = nx.generators.random_graphs.random_regular_graph(3, n=6)
lattice = Lattice(graph)
|
qiskit-nature/releasenotes/notes/0.4/lattice-from-networkx-20c7c8119af77f36.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.4/lattice-from-networkx-20c7c8119af77f36.yaml",
"repo_id": "qiskit-nature",
"token_count": 219
}
| 135 |
---
features:
- |
Adds `.SparseLabelOp.equiv` for checking approximate equality
between two SparseLabelOps.
|
qiskit-nature/releasenotes/notes/0.5/approx-eq-15521c2b1ece996b.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.5/approx-eq-15521c2b1ece996b.yaml",
"repo_id": "qiskit-nature",
"token_count": 39
}
| 136 |
---
fixes:
- |
Fixes the behavior of ``FermionicOp.simplify`` when called on a zero-operator.
|
qiskit-nature/releasenotes/notes/0.5/fix-simplify-zero-fermionicop-f2f68e6f19613468.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.5/fix-simplify-zero-fermionicop-f2f68e6f19613468.yaml",
"repo_id": "qiskit-nature",
"token_count": 35
}
| 137 |
---
features:
- |
Three new methods for creating instances
:class:`~qiskit_nature.second_q.properties.ElectronicDensity` have been
added:
1. constructing an empty (or all-zero) density of a given size:
.. code-block:: python
empty = ElectronicDensity.empty(num_spatial_orbitals=4)
2. constructing an identity density, meaning that the 1-body matrices are
initialized with identity matrices
.. code-block:: python
identity = ElectronicDensity.identity(num_spatial_orbitals=4)
3. constructing from a provided number of particles. This is a shorter
variant of the already existing ``from_orbital_occupation`` method for the
most common use-case.
.. code-block:: python
num_spatial_orbitals = 4
num_particles = (2, 2)
two_and_two = ElectronicDensity.from_particle_number(num_spatial_orbitals, num_particles)
# for example now the 1-body matrices will be:
# [[1, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 0, 0, 0],
# [0, 0, 0, 0]]
All of the methods above take the optional keyword-argument ``include_rdm2``
which determines whether or not the 2-body matrices are computed based on the
constructed 1-body matrices. By default, this is set to ``True``.
|
qiskit-nature/releasenotes/notes/0.6/feat-electronic-denisty-initializations-d1bd5131c5776418.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.6/feat-electronic-denisty-initializations-d1bd5131c5776418.yaml",
"repo_id": "qiskit-nature",
"token_count": 504
}
| 138 |
---
fixes:
- |
Fixes a bug in which :class:`~.BogoliubovTransform` would sometimes throw an error due to an inability to
cast complex numbers to floats.
|
qiskit-nature/releasenotes/notes/0.6/givens-fix-a12cb547cf5a1be4.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.6/givens-fix-a12cb547cf5a1be4.yaml",
"repo_id": "qiskit-nature",
"token_count": 50
}
| 139 |
---
features:
- |
Introduced a new feature that implements the bosonic operator :class:`.BosonicOp`.
Its functionalities are analogous to the :class:`.FermioniOp`, but for commuting bosonic particles.
It should be used to represent a bosonic operator, so if one wants to represent the boson number
operator it should do for example:
.. code-block:: python
from qiskit_nature.second_q.operators import BosonicOp
bosonic_op = BosonicOp({'+_0 -_0': 1}, num_modes=1)
Due to the nature of bosonic particles, this class uses the commutator relations instead of the
anti-commutator ones (used by fermionic particles).
- |
In order to use the bosonic operator for quantum applications, this feature also introduces the
bosonic linear mapper, which allows to map the BosonicOp to the qubit space. This mapper is based
on `this paper <https://link.aps.org/doi/10.1103/PhysRevResearch.3.043212>`_.
To use this mapper one can for example:
.. code-block:: python
from qiskit_nature.second_q.mappers import BosonicLinearMapper
mapper = BosonicLinearMapper(truncation=1)
qubit_op = mapper.map(bos_op)
|
qiskit-nature/releasenotes/notes/0.7/bosonic-operator-and-mapper-45bfde873f092681.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.7/bosonic-operator-and-mapper-45bfde873f092681.yaml",
"repo_id": "qiskit-nature",
"token_count": 385
}
| 140 |
---
features:
- |
Added linear algebra utilities for performing the double-factorization of a two-body tensor:
- :func:`qiskit_nature.utils.double_factorized`
- :func:`qiskit_nature.utils.modified_cholesky`
|
qiskit-nature/releasenotes/notes/0.7/low-rank-linalg-86da86096d6122d8.yaml/0
|
{
"file_path": "qiskit-nature/releasenotes/notes/0.7/low-rank-linalg-86da86096d6122d8.yaml",
"repo_id": "qiskit-nature",
"token_count": 81
}
| 141 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the UCC Ansatz."""
from test import QiskitNatureTestCase
import unittest
from ddt import data, ddt, unpack
from qiskit import transpile
from qiskit_nature import QiskitNatureError
from qiskit_nature.second_q.circuit.library import UCC
from qiskit_nature.second_q.mappers import JordanWignerMapper, ParityMapper
from qiskit_nature.second_q.operators import FermionicOp
def assert_ucc_like_ansatz(test_case, ansatz, num_spatial_orbitals, expected_ops):
"""Assertion utility."""
excitation_ops = ansatz.excitation_ops()
test_case.assertEqual(len(excitation_ops), len(expected_ops))
for op, exp in zip(excitation_ops, expected_ops):
test_case.assertEqual(op, exp)
ansatz._build()
test_case.assertEqual(ansatz.num_qubits, 2 * num_spatial_orbitals)
@ddt
class TestUCC(QiskitNatureTestCase):
"""Tests for the UCC Ansatz."""
# Note: many variations of this class are tested by its sub-classes UCCSD, PUCCD and SUCCD.
# Thus, the tests here mainly cover edge cases which those classes cannot account for.
@unpack
@data(
(
"t",
4,
(2, 2),
[
FermionicOp(
{"+_0 +_1 +_4 -_2 -_3 -_6": 1j, "+_6 +_3 +_2 -_4 -_1 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_0 +_1 +_4 -_2 -_3 -_7": 1j, "+_7 +_3 +_2 -_4 -_1 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_0 +_1 +_5 -_2 -_3 -_6": 1j, "+_6 +_3 +_2 -_5 -_1 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_0 +_1 +_5 -_2 -_3 -_7": 1j, "+_7 +_3 +_2 -_5 -_1 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_0 +_4 +_5 -_2 -_6 -_7": 1j, "+_7 +_6 +_2 -_5 -_4 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_0 +_4 +_5 -_3 -_6 -_7": 1j, "+_7 +_6 +_3 -_5 -_4 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_1 +_4 +_5 -_2 -_6 -_7": 1j, "+_7 +_6 +_2 -_5 -_4 -_1": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_1 +_4 +_5 -_3 -_6 -_7": 1j, "+_7 +_6 +_3 -_5 -_4 -_1": -1j},
num_spin_orbitals=8,
),
],
),
(
"t",
4,
(2, 1),
[
FermionicOp(
{"+_0 +_1 +_4 -_2 -_3 -_5": 1j, "+_5 +_3 +_2 -_4 -_1 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_0 +_1 +_4 -_2 -_3 -_6": 1j, "+_6 +_3 +_2 -_4 -_1 -_0": -1j},
num_spin_orbitals=8,
),
FermionicOp(
{"+_0 +_1 +_4 -_2 -_3 -_7": 1j, "+_7 +_3 +_2 -_4 -_1 -_0": -1j},
num_spin_orbitals=8,
),
],
),
(
"q",
4,
(2, 2),
[
FermionicOp(
{"+_0 +_1 +_4 +_5 -_2 -_3 -_6 -_7": 1j, "+_7 +_6 +_3 +_2 -_5 -_4 -_1 -_0": -1j},
num_spin_orbitals=8,
)
],
),
(
"s",
2,
(2, 1),
[
FermionicOp(
{"+_2 -_3": 1j, "+_3 -_2": (-0 - 1j)},
num_spin_orbitals=4,
)
],
),
# TODO: add more edge cases?
)
def test_ucc_ansatz(self, excitations, num_spatial_orbitals, num_particles, expect):
"""Tests the UCC Ansatz."""
mapper = JordanWignerMapper()
ansatz = UCC(
qubit_mapper=mapper,
num_particles=num_particles,
num_spatial_orbitals=num_spatial_orbitals,
excitations=excitations,
)
assert_ucc_like_ansatz(self, ansatz, num_spatial_orbitals, expect)
@unpack
@data(
# Excitations not a list of pairs
(
4,
(2, 2),
[((0, 1, 4), (2, 3, 6), (2, 3, 7))],
),
# Excitation pair has not same length
(
4,
(2, 2),
[((0, 1, 4), (2, 3, 6, 7))],
),
# Excitation pair with non-unique indices
(
4,
(2, 2),
[((0, 1, 4), (2, 4, 6))],
),
(
4,
(2, 2),
[((0, 1, 1), (2, 3, 6))],
),
)
def test_custom_excitations(self, num_spatial_orbitals, num_particles, excitations):
"""Tests if an error is raised when the excitations have a wrong format"""
mapper = JordanWignerMapper()
# pylint: disable=unused-argument
def custom_excitations(num_spatial_orbitals, num_particles):
return excitations
with self.assertRaises(QiskitNatureError):
ansatz = UCC(
qubit_mapper=mapper,
num_particles=num_particles,
num_spatial_orbitals=num_spatial_orbitals,
excitations=custom_excitations,
)
ansatz.excitation_ops()
def test_transpile_no_parameters(self):
"""Test transpilation without parameters"""
num_spatial_orbitals = 4
num_particles = (2, 2)
mapper = JordanWignerMapper()
ansatz = UCC(
num_spatial_orbitals=num_spatial_orbitals,
num_particles=num_particles,
qubit_mapper=mapper,
excitations="s",
)
ansatz = transpile(ansatz, optimization_level=3)
self.assertEqual(ansatz.num_qubits, 8)
def test_build_ucc(self):
"""Test building UCC"""
ucc = UCC()
with self.subTest("Check defaulted construction"):
self.assertIsNone(ucc.num_particles)
self.assertIsNone(ucc.num_spatial_orbitals)
self.assertIsNone(ucc.excitations)
self.assertIsNone(ucc.qubit_mapper)
self.assertIsNone(ucc.operators)
self.assertIsNone(ucc.excitation_list)
self.assertEqual(ucc.num_qubits, 0)
with self.assertRaises(ValueError):
_ = ucc.data
with self.subTest("Set num particles"):
ucc.num_particles = (1, 1)
self.assertEqual(ucc.num_particles, (1, 1))
self.assertIsNone(ucc.operators)
with self.assertRaises(ValueError):
_ = ucc.data
with self.subTest("Set num spatial orbitals"):
ucc.num_spatial_orbitals = 2
self.assertEqual(ucc.num_spatial_orbitals, 2)
self.assertIsNone(ucc.operators)
with self.assertRaises(ValueError):
_ = ucc.data
with self.subTest("Set excitations"):
ucc.excitations = "sd"
self.assertEqual(ucc.excitations, "sd")
self.assertIsNone(ucc.operators)
with self.assertRaises(ValueError):
_ = ucc.data
with self.subTest("Set qubit mapper to complete build"):
mapper = JordanWignerMapper()
ucc.qubit_mapper = mapper
self.assertEqual(ucc.qubit_mapper, mapper)
self.assertIsNotNone(ucc.operators)
self.assertEqual(len(ucc.operators), 3)
self.assertEqual(ucc.num_qubits, 4)
self.assertIsNotNone(ucc.data)
with self.subTest("Set custom operators"):
self.assertEqual(len(ucc.operators), 3)
ucc.operators = ucc.operators[:2]
self.assertEqual(len(ucc.operators), 2)
self.assertEqual(ucc.num_qubits, 4)
with self.subTest("Reset operators back to as per UCC"):
ucc.operators = None
self.assertEqual(ucc.num_qubits, 4)
self.assertIsNotNone(ucc.operators)
self.assertEqual(len(ucc.operators), 3)
with self.subTest("Set num particles to include 0"):
ucc.num_particles = (1, 0)
self.assertEqual(ucc.num_particles, (1, 0))
self.assertIsNotNone(ucc.operators)
self.assertEqual(len(ucc.operators), 1)
with self.subTest("Change num particles"):
ucc.num_particles = (1, 1)
self.assertIsNotNone(ucc.operators)
self.assertEqual(len(ucc.operators), 3)
with self.subTest("Change num spatial orbitals"):
ucc.num_spatial_orbitals = 3
self.assertIsNotNone(ucc.operators)
self.assertEqual(len(ucc.operators), 8)
with self.subTest("Change excitations"):
ucc.excitations = "s"
self.assertIsNotNone(ucc.operators)
self.assertEqual(len(ucc.operators), 4)
with self.subTest("Change qubit mapper"):
mapper = ParityMapper()
ucc.qubit_mapper = mapper
self.assertIsNotNone(ucc.operators)
self.assertEqual(ucc.qubit_mapper, mapper)
self.assertEqual(ucc.num_qubits, 6)
# TODO: PR #1018 Add test with parity mapper and two qubit reduction
if __name__ == "__main__":
unittest.main()
|
qiskit-nature/test/second_q/circuit/library/ansatzes/test_ucc.py/0
|
{
"file_path": "qiskit-nature/test/second_q/circuit/library/ansatzes/test_ucc.py",
"repo_id": "qiskit-nature",
"token_count": 5534
}
| 142 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2020, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver Gaussian internals - does not require Gaussian installed """
import unittest
from test import QiskitNatureTestCase
from qiskit_nature.second_q.drivers import GaussianDriver
class TestDriverGaussianExtra(QiskitNatureTestCase):
"""Gaussian Driver extra tests for driver specifics, errors etc"""
def test_cfg_augment(self):
"""test input configuration augmentation"""
cfg = (
"# rhf/sto-3g scf(conventional)\n\n"
"h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735\n\n"
)
aug_cfg = GaussianDriver._augment_config("mymatfile.mat", cfg)
expected = (
"# rhf/sto-3g scf(conventional)\n"
"# Window=Full Int=NoRaff Symm=(NoInt,None)"
" output=(matrix,i4labels,mo2el) tran=full\n\n"
"h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735"
"\n\nmymatfile.mat\n\n"
)
self.assertEqual(aug_cfg, expected)
if __name__ == "__main__":
unittest.main()
|
qiskit-nature/test/second_q/drivers/gaussiand/test_driver_gaussian_extra.py/0
|
{
"file_path": "qiskit-nature/test/second_q/drivers/gaussiand/test_driver_gaussian_extra.py",
"repo_id": "qiskit-nature",
"token_count": 634
}
| 143 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2018, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver PySCF """
import unittest
from test import QiskitNatureTestCase
from test.second_q.drivers.test_driver import TestDriver
from qiskit_nature.units import DistanceUnit
from qiskit_nature.second_q.drivers import PySCFDriver
from qiskit_nature import QiskitNatureError
import qiskit_nature.optionals as _optionals
class TestDriverPySCF(QiskitNatureTestCase, TestDriver):
"""PYSCF Driver tests."""
@unittest.skipIf(not _optionals.HAS_PYSCF, "pyscf not available.")
def setUp(self):
super().setUp()
driver = PySCFDriver(
atom="H .0 .0 .0; H .0 .0 0.735",
unit=DistanceUnit.ANGSTROM,
charge=0,
spin=0,
basis="sto3g",
)
self.driver_result = driver.run()
def test_h3(self):
"""Test for H3 chain, see also https://github.com/Qiskit/qiskit-aqua/issues/1148."""
atom = "H 0 0 0; H 0 0 1; H 0 0 2"
driver = PySCFDriver(atom=atom, unit=DistanceUnit.ANGSTROM, charge=0, spin=1, basis="sto3g")
driver_result = driver.run()
self.assertAlmostEqual(driver_result.reference_energy, -1.523996200246108, places=5)
def test_h4(self):
"""Test for H4 chain"""
atom = "H 0 0 0; H 0 0 1; H 0 0 2; H 0 0 3"
driver = PySCFDriver(atom=atom, unit=DistanceUnit.ANGSTROM, charge=0, spin=0, basis="sto3g")
driver_result = driver.run()
self.assertAlmostEqual(driver_result.reference_energy, -2.09854593699776, places=5)
def test_invalid_atom_type(self):
"""Atom is string with ; separator or list of string"""
with self.assertRaises(QiskitNatureError):
PySCFDriver(atom=("H", 0, 0, 0))
def test_list_atom(self):
"""Check input with list of strings"""
atom = ["H 0 0 0", "H 0 0 1"]
driver = PySCFDriver(atom=atom, unit=DistanceUnit.ANGSTROM, charge=0, spin=0, basis="sto3g")
driver_result = driver.run()
self.assertAlmostEqual(driver_result.reference_energy, -1.0661086493179366, places=5)
def test_zmatrix(self):
"""Check z-matrix input"""
atom = "H; H 1 1.0"
driver = PySCFDriver(atom=atom, unit=DistanceUnit.ANGSTROM, charge=0, spin=0, basis="sto3g")
driver_result = driver.run()
self.assertAlmostEqual(driver_result.reference_energy, -1.0661086493179366, places=5)
if __name__ == "__main__":
unittest.main()
|
qiskit-nature/test/second_q/drivers/pyscfd/test_driver_pyscf.py/0
|
{
"file_path": "qiskit-nature/test/second_q/drivers/pyscfd/test_driver_pyscf.py",
"repo_id": "qiskit-nature",
"token_count": 1194
}
| 144 |
{
"schema_name": "qc_schema_output",
"schema_version": 1,
"molecule": {
"schema_name": "qcschema_molecule",
"schema_version": 2,
"geometry": [
0,
0,
0,
0,
0,
6
],
"symbols": [
"He",
"He"
]
},
"driver": "energy",
"model": {
"method": "VV10",
"basis": "cc-pVDZ"
},
"keywords": {},
"provenance": {
"creator": "QM Program",
"version": "1.1",
"routine": "module.json.run_json"
},
"return_result": -5.815121364568496,
"success": true,
"properties": {
"calcinfo_nbasis": 10,
"calcinfo_nmo": 10,
"calcinfo_nalpha": 2,
"calcinfo_nbeta": 2,
"calcinfo_natom": 2,
"return_energy": -5.815121364568496,
"scf_one_electron_energy": -9.10156722786234,
"scf_two_electron_energy": 4.782528510470115,
"nuclear_repulsion_energy": 0.6666666666666666,
"scf_dipole_moment": [
0.0,
0.0,
9.030096599360606e-14
],
"scf_iterations": 3,
"scf_total_energy": -5.815121364568496,
"scf_vv10_energy": 0.018799951240226136,
"scf_xc_energy": -2.181549265083163
}
}
|
qiskit-nature/test/second_q/formats/qcschema/he2_energy_VV10_output.json/0
|
{
"file_path": "qiskit-nature/test/second_q/formats/qcschema/he2_energy_VV10_output.json",
"repo_id": "qiskit-nature",
"token_count": 600
}
| 145 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Bravyi-Kitaev Mapper """
import unittest
from test import QiskitNatureTestCase
from qiskit.quantum_info import SparsePauliOp
import qiskit_nature.optionals as _optionals
from qiskit_nature.second_q.drivers import PySCFDriver
from qiskit_nature.second_q.mappers import BravyiKitaevMapper
from qiskit_nature.second_q.operators import FermionicOp
class TestBravyiKitaevMapper(QiskitNatureTestCase):
"""Test Bravyi-Kitaev Mapper"""
REF_H2 = SparsePauliOp.from_list(
[
("IIII", -0.81054798160031430),
("IZII", +0.17218393211855787),
("IIZI", +0.12091263243164174),
("ZIZI", +0.12091263243164174),
("ZZZI", -0.22575349071287365),
("IIIZ", +0.17218393211855818),
("IZIZ", +0.16892753854646372),
("ZZIZ", +0.17464343053355980),
("IIZZ", -0.22575349071287362),
("IZZZ", +0.16614543242281926),
("ZZZZ", +0.16614543242281926),
("IXIX", +0.04523279999117751),
("ZXIX", +0.04523279999117751),
("IXZX", -0.04523279999117751),
("ZXZX", -0.04523279999117751),
]
)
@unittest.skipIf(not _optionals.HAS_PYSCF, "pyscf not available.")
def test_mapping(self):
"""Test mapping to qubit operator"""
driver = PySCFDriver()
driver_result = driver.run()
fermionic_op, _ = driver_result.second_q_ops()
mapper = BravyiKitaevMapper()
qubit_op = mapper.map(fermionic_op)
self.assertTrue(qubit_op.equiv(TestBravyiKitaevMapper.REF_H2))
def test_mapping_for_single_op(self):
"""Test for single register operator."""
with self.subTest("test +"):
op = FermionicOp({"+_0": 1}, num_spin_orbitals=1)
expected = SparsePauliOp.from_list([("X", 0.5), ("Y", -0.5j)])
self.assertEqualSparsePauliOp(BravyiKitaevMapper().map(op), expected)
with self.subTest("test -"):
op = FermionicOp({"-_0": 1}, num_spin_orbitals=1)
expected = SparsePauliOp.from_list([("X", 0.5), ("Y", 0.5j)])
self.assertEqualSparsePauliOp(BravyiKitaevMapper().map(op), expected)
with self.subTest("test N"):
op = FermionicOp({"+_0 -_0": 1}, num_spin_orbitals=1)
expected = SparsePauliOp.from_list([("I", 0.5), ("Z", -0.5)])
self.assertEqualSparsePauliOp(BravyiKitaevMapper().map(op), expected)
with self.subTest("test E"):
op = FermionicOp({"-_0 +_0": 1}, num_spin_orbitals=1)
expected = SparsePauliOp.from_list([("I", 0.5), ("Z", 0.5)])
self.assertEqualSparsePauliOp(BravyiKitaevMapper().map(op), expected)
with self.subTest("test I"):
op = FermionicOp({"": 1}, num_spin_orbitals=1)
expected = SparsePauliOp.from_list([("I", 1)])
self.assertEqualSparsePauliOp(BravyiKitaevMapper().map(op), expected)
def test_mapping_overwrite_reg_len(self):
"""Test overwriting the register length."""
op = FermionicOp({"+_0 -_0": 1}, num_spin_orbitals=1)
expected = FermionicOp({"+_0 -_0": 1}, num_spin_orbitals=3)
mapper = BravyiKitaevMapper()
self.assertEqual(mapper.map(op, register_length=3), mapper.map(expected))
if __name__ == "__main__":
unittest.main()
|
qiskit-nature/test/second_q/mappers/test_bravyi_kitaev_mapper.py/0
|
{
"file_path": "qiskit-nature/test/second_q/mappers/test_bravyi_kitaev_mapper.py",
"repo_id": "qiskit-nature",
"token_count": 1792
}
| 146 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for SparseLabelOp"""
from __future__ import annotations
from typing import Collection, Iterator, Mapping, Sequence
import unittest
from test import QiskitNatureTestCase
import numpy as np
from qiskit.circuit import Parameter
from qiskit_nature.second_q.operators import PolynomialTensor, SparseLabelOp
from qiskit_nature.second_q.operators.sparse_label_op import _TCoeff
a = Parameter("a")
b = Parameter("b")
op1 = {
"+_0 -_1": 0.0,
"+_0 -_2": 1.0,
}
op2 = {
"+_0 -_1": 0.5,
"+_0 -_2": 1.0,
}
op3 = {
"+_0 -_1": 0.5,
"+_0 -_3": 3.0,
}
opComplex = {
"+_0 -_1": 0.5 + 1j,
"+_0 -_2": 1.0,
}
opParameter = {
"+_0 -_1": a,
"+_0 -_2": b,
}
class DummySparseLabelOp(SparseLabelOp):
"""Dummy SparseLabelOp for testing purposes"""
@property
def register_length(self) -> int | None:
return None
# pylint: disable=unused-argument
def _new_instance(
self, data: Mapping[str, complex], *, other: SparseLabelOp | None = None
) -> SparseLabelOp:
return self.__class__(data, copy=False)
def _validate_keys(self, keys: Collection[str]) -> None:
pass
@classmethod
def _validate_polynomial_tensor_key(cls, keys: Collection[str]) -> None:
pass
@classmethod
def from_polynomial_tensor(cls, tensor: PolynomialTensor) -> SparseLabelOp:
pass
def terms(self) -> Iterator[tuple[list[tuple[str, int]], complex]]:
pass
@classmethod
def from_terms(cls, terms: Sequence[tuple[list[tuple[str, int]], _TCoeff]]) -> SparseLabelOp:
pass
def _permute_term(
self, term: list[tuple[str, int]], permutation: Sequence[int]
) -> list[tuple[str, int]]:
pass
def transpose(self) -> SparseLabelOp:
return self
def compose(self, other, qargs=None, front=False) -> SparseLabelOp:
return self
def tensor(self, other) -> SparseLabelOp:
return self
def expand(self, other) -> SparseLabelOp:
return self
# pylint: disable=unused-argument
def simplify(self, atol: float | None = None) -> SparseLabelOp:
return self
class TestSparseLabelOp(QiskitNatureTestCase):
"""SparseLabelOp tests."""
def test_add(self):
"""Test add method"""
with self.subTest("real + real"):
test_op = DummySparseLabelOp(op1) + DummySparseLabelOp(op2)
target_op = DummySparseLabelOp(
{
"+_0 -_1": 0.5,
"+_0 -_2": 2.0,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("complex + real"):
test_op = DummySparseLabelOp(op2) + DummySparseLabelOp(opComplex)
target_op = DummySparseLabelOp(
{
"+_0 -_1": 1.0 + 1j,
"+_0 -_2": 2.0,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("complex + complex"):
test_op = DummySparseLabelOp(opComplex) + DummySparseLabelOp(opComplex)
target_op = DummySparseLabelOp(
{
"+_0 -_1": 1.0 + 2j,
"+_0 -_2": 2.0,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("complex + parameter"):
test_op = DummySparseLabelOp(opComplex) + DummySparseLabelOp(opParameter)
target_op = DummySparseLabelOp({"+_0 -_1": 0.5 + 1j + a, "+_0 -_2": 1.0 + b})
self.assertEqual(test_op, target_op)
with self.subTest("new key"):
test_op = DummySparseLabelOp(op1) + DummySparseLabelOp(op3)
target_op = DummySparseLabelOp(
{
"+_0 -_1": 0.5,
"+_0 -_2": 1.0,
"+_0 -_3": 3.0,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("sum"):
test_op = sum([DummySparseLabelOp(op1), DummySparseLabelOp(op3)])
target_op = DummySparseLabelOp(
{
"+_0 -_1": 0.5,
"+_0 -_2": 1.0,
"+_0 -_3": 3.0,
},
)
self.assertEqual(test_op, target_op)
def test_mul(self):
"""Test scalar multiplication method"""
with self.subTest("real * real"):
test_op = DummySparseLabelOp(op1) * 2
target_op = DummySparseLabelOp(
{
"+_0 -_1": 0.0,
"+_0 -_2": 2.0,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("complex * real"):
test_op = DummySparseLabelOp(opComplex) * 2
target_op = DummySparseLabelOp(
{
"+_0 -_1": 1.0 + 2j,
"+_0 -_2": 2.0,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("real * complex"):
test_op = DummySparseLabelOp(op2) * (0.5 + 1j)
target_op = DummySparseLabelOp(
{
"+_0 -_1": 0.25 + 0.5j,
"+_0 -_2": 0.5 + 1j,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("complex * complex"):
test_op = DummySparseLabelOp(opComplex) * (0.5 + 1j)
target_op = DummySparseLabelOp(
{
"+_0 -_1": -0.75 + 1j,
"+_0 -_2": 0.5 + 1j,
},
)
self.assertEqual(test_op, target_op)
with self.subTest("parameter * complex"):
test_op = DummySparseLabelOp(opParameter) * (0.5 + 1j)
target_op = DummySparseLabelOp(
{
"+_0 -_1": a * (0.5 + 1j),
"+_0 -_2": b * (0.5 + 1j),
},
)
self.assertEqual(test_op, target_op)
with self.subTest("complex * parameter"):
test_op = DummySparseLabelOp(opComplex) * (a + b)
target_op = DummySparseLabelOp(
{
"+_0 -_1": (0.5 + 1j) * (a + b),
"+_0 -_2": (a + b),
},
)
self.assertEqual(test_op, target_op)
with self.subTest("parameter * parameter"):
test_op = DummySparseLabelOp(opParameter) * (a + b)
target_op = DummySparseLabelOp(
{
"+_0 -_1": a * (a + b),
"+_0 -_2": b * (a + b),
},
)
self.assertEqual(test_op, target_op)
with self.subTest("raises TypeError"):
with self.assertRaises(TypeError):
_ = DummySparseLabelOp(op1) * "something"
# regression test against https://github.com/Qiskit/qiskit-nature/issues/953
with self.subTest("numpy types"):
test_op = np.double(2) * DummySparseLabelOp(op1)
target_op = DummySparseLabelOp(
{
"+_0 -_1": 0.0,
"+_0 -_2": 2.0,
},
)
self.assertEqual(test_op, target_op)
def test_adjoint(self):
"""Test adjoint method"""
with self.subTest("complex"):
test_op = DummySparseLabelOp(opComplex).adjoint()
target_op = DummySparseLabelOp({"+_0 -_1": 0.5 - 1j, "+_0 -_2": 1.0})
self.assertEqual(test_op, target_op)
with self.subTest("parameter"):
test_op = DummySparseLabelOp(opParameter).adjoint()
target_op = DummySparseLabelOp({"+_0 -_1": a.conjugate(), "+_0 -_2": b.conjugate()})
self.assertEqual(test_op, target_op)
def test_conjugate(self):
"""Test conjugate method"""
with self.subTest("complex"):
test_op = DummySparseLabelOp(opComplex).conjugate()
target_op = DummySparseLabelOp({"+_0 -_1": 0.5 - 1j, "+_0 -_2": 1.0})
self.assertEqual(test_op, target_op)
with self.subTest("parameter"):
test_op = DummySparseLabelOp(opParameter).conjugate()
target_op = DummySparseLabelOp({"+_0 -_1": a.conjugate(), "+_0 -_2": b.conjugate()})
self.assertEqual(test_op, target_op)
def test_eq(self):
"""test __eq__ method"""
with self.subTest("equal"):
test_op = DummySparseLabelOp(op1) == DummySparseLabelOp(op1)
self.assertTrue(test_op)
with self.subTest("not equal - keys"):
test_op = DummySparseLabelOp(op1) == DummySparseLabelOp(
{
"+_0 -_1": 0.0,
"+_0 -_3": 1.0,
},
)
self.assertFalse(test_op)
with self.subTest("not equal - values"):
test_op = DummySparseLabelOp(op1) == DummySparseLabelOp(op2)
self.assertFalse(test_op)
with self.subTest("not equal - tolerance"):
test_op = DummySparseLabelOp(op1) == DummySparseLabelOp(
{
"+_0 -_1": 0.000000001,
"+_0 -_2": 1.0,
},
)
self.assertFalse(test_op)
def test_equiv(self):
"""test equiv method"""
with self.subTest("not equivalent - tolerances"):
test_op = DummySparseLabelOp(op1).equiv(
DummySparseLabelOp(
{
"+_0 -_1": 0.000001,
"+_0 -_2": 1.0,
},
)
)
self.assertFalse(test_op)
with self.subTest("not equivalent - keys"):
test_op = DummySparseLabelOp(op1).equiv(
DummySparseLabelOp(
{
"+_0 -_1": 0.0,
"+_0 -_3": 1.0,
},
)
)
self.assertFalse(test_op)
with self.subTest("equivalent"):
test_op = DummySparseLabelOp(op1).equiv(
DummySparseLabelOp(
{
"+_0 -_1": 0.000000001,
"+_0 -_2": 1.0,
},
)
)
self.assertTrue(test_op)
with self.subTest("parameters"):
test_op = DummySparseLabelOp(opParameter)
with self.assertRaisesRegex(ValueError, "parameter"):
_ = test_op.equiv(DummySparseLabelOp(opParameter))
test_op = DummySparseLabelOp(opComplex)
with self.assertRaisesRegex(ValueError, "parameter"):
_ = test_op.equiv(DummySparseLabelOp(opParameter))
def test_iter(self):
"""test __iter__ method"""
test_op = iter(DummySparseLabelOp(op1))
self.assertEqual(next(test_op), "+_0 -_1")
self.assertEqual(next(test_op), "+_0 -_2")
def test_get_item(self):
"""test __getitem__ method"""
test_op = DummySparseLabelOp(op1)
self.assertEqual(test_op["+_0 -_1"], 0.0)
def test_len(self):
"""test __len__ method"""
test_op = DummySparseLabelOp(op1)
self.assertEqual(len(test_op), 2)
def test_copy(self):
"""test copy bool"""
data = {
"+_0 -_1": 0.0,
"+_0 -_3": 1.0,
}
test_op = DummySparseLabelOp(data, copy=True)
data["+_0 -_1"] = 0.2
self.assertEqual(test_op._data["+_0 -_1"], 0.0)
def test_zero(self):
"""test zero class initializer"""
test_op = DummySparseLabelOp.zero()
self.assertEqual(test_op._data, {})
def test_one(self):
"""test one class initializer"""
test_op = DummySparseLabelOp.one()
self.assertEqual(test_op._data, {"": 1.0})
def test_induced_norm(self):
"""Test induced norm."""
op = DummySparseLabelOp({"+_0 -_1": 3.0, "+_0 -_2": -4j})
self.assertAlmostEqual(op.induced_norm(), 7.0)
self.assertAlmostEqual(op.induced_norm(2), 5.0)
test_op = DummySparseLabelOp(opParameter)
with self.assertRaisesRegex(ValueError, "parameter"):
_ = test_op.induced_norm()
def test_chop(self):
"""Test chop."""
op = DummySparseLabelOp({"+_0 -_1": 1 + 1e-12j, "+_0 -_2": a})
self.assertEqual(op.chop(), DummySparseLabelOp({"+_0 -_1": 1, "+_0 -_2": a}))
op = DummySparseLabelOp({"+_0 -_1": 1e-12 + 1j, "+_0 -_2": a})
self.assertEqual(op.chop(), DummySparseLabelOp({"+_0 -_1": 1j, "+_0 -_2": a}))
self.assertEqual((op - op).chop(), DummySparseLabelOp.zero())
def test_is_parameterized(self):
"""Test is_parameterized."""
self.assertTrue(DummySparseLabelOp(opParameter).is_parameterized())
self.assertFalse(DummySparseLabelOp(op1).is_parameterized())
def test_assign_parameters(self):
"""Test assign_parameters."""
op = DummySparseLabelOp({"+_0 -_1": a, "+_0 -_2": b})
assigned_op = op.assign_parameters({a: 1.0})
self.assertEqual(assigned_op, DummySparseLabelOp({"+_0 -_1": 1.0, "+_0 -_2": b}))
self.assertEqual(op, DummySparseLabelOp({"+_0 -_1": a, "+_0 -_2": b}))
op = DummySparseLabelOp({"+_0 -_1": a + 1}) + DummySparseLabelOp({"+_1 -_0": a})
assigned_op = op.assign_parameters({a: 1})
self.assertEqual(assigned_op, DummySparseLabelOp({"+_0 -_1": 2, "+_1 -_0": 1}))
op = DummySparseLabelOp({"+_0 -_1": a + 1}) + DummySparseLabelOp({"+_1 -_0": b})
assigned_op = op.assign_parameters({a: 1})
self.assertEqual(assigned_op, DummySparseLabelOp({"+_0 -_1": 2, "+_1 -_0": b}))
op = DummySparseLabelOp({"+_0 -_1": a + 1})
assigned_op = op.assign_parameters({b: 1})
self.assertEqual(assigned_op, op)
def test_round(self):
"""test round function"""
with self.subTest("round just real part"):
data = {
"+_0 -_1": 0.7 + 3j,
"+_0 -_3": 1.1 + 4j,
}
test_op = DummySparseLabelOp(data).round()
self.assertEqual(
test_op._data,
{
"+_0 -_1": 1.0 + 3j,
"+_0 -_3": 1.0 + 4j,
},
)
with self.subTest("round just imag part"):
data = {
"+_0 -_1": 1.0 + 0.9j,
"+_0 -_3": 1.0 + 0.2j,
}
test_op = DummySparseLabelOp(data).round()
self.assertEqual(
test_op._data,
{
"+_0 -_1": 1.0 + 1j,
"+_0 -_3": 1.0 + 0j,
},
)
with self.subTest("round real and imag part"):
data = {
"+_0 -_1": 0.8 + 0.3j,
"+_0 -_3": 1.2 + 0.8j,
}
test_op = DummySparseLabelOp(data).round()
self.assertEqual(
test_op._data,
{
"+_0 -_1": 1 + 0j,
"+_0 -_3": 1.0 + 1j,
},
)
with self.subTest("round real and imag part to 3dp"):
data = {
"+_0 -_1": 0.8762 + 0.3789j,
"+_0 -_3": 1.2458 + 0.8652j,
}
test_op = DummySparseLabelOp(data).round(3)
self.assertEqual(
test_op._data,
{
"+_0 -_1": 0.876 + 0.379j,
"+_0 -_3": 1.246 + 0.865j,
},
)
with self.subTest("round just real part to 3dp"):
data = {
"+_0 -_1": 0.8762 + 0.370j,
"+_0 -_3": 1.2458 + 0.860j,
}
test_op = DummySparseLabelOp(data).round(3)
self.assertEqual(
test_op._data,
{
"+_0 -_1": 0.876 + 0.370j,
"+_0 -_3": 1.246 + 0.860j,
},
)
with self.subTest("round just imag part to 3dp"):
data = {
"+_0 -_1": 0.8760 + 0.3789j,
"+_0 -_3": 1.245 + 0.8652j,
}
test_op = DummySparseLabelOp(data).round(3)
self.assertEqual(
test_op._data,
{
"+_0 -_1": 0.8760 + 0.379j,
"+_0 -_3": 1.245 + 0.865j,
},
)
def test_is_zero(self):
"""test if coefficients are all zero"""
with self.subTest("operator length is zero"):
test_op = DummySparseLabelOp({})
self.assertTrue(test_op.is_zero())
with self.subTest("coefficients are all zero"):
test_op = DummySparseLabelOp(
{
"+_0 -_1": 0.0,
"+_0 -_3": 0.0,
}
)
self.assertTrue(test_op.is_zero())
with self.subTest("coefficients are all zero with tol"):
test_op = DummySparseLabelOp(
{
"+_0 -_1": 0.05,
"+_0 -_3": 0.0,
}
)
self.assertTrue(test_op.is_zero(tol=0.1))
with self.subTest("coefficients are all zero with smaller val"):
test_op = DummySparseLabelOp(
{
"+_0 -_1": 0.0,
"+_0 -_3": 1e-18,
}
)
self.assertTrue(test_op.is_zero())
with self.subTest("coefficients not all zero"):
test_op = DummySparseLabelOp(
{
"+_0 -_1": 0.0,
"+_0 -_3": 0.1,
}
)
self.assertFalse(test_op.is_zero())
with self.subTest("coefficients not all zero with tol"):
test_op = DummySparseLabelOp(
{
"+_0 -_1": 0.05,
"+_0 -_3": 0.0,
}
)
self.assertFalse(test_op.is_zero(tol=0.001))
with self.subTest("parameterized coefficient"):
test_op = DummySparseLabelOp({"+_0 -_1": a})
self.assertFalse(test_op.is_zero())
bound_op = test_op.assign_parameters({a: 0.0})
self.assertTrue(bound_op.is_zero())
def test_parameters(self):
"""Test parameters."""
op = DummySparseLabelOp({"+_0 -_1": a, "+_0 -_2": b})
self.assertEqual(op.parameters(), [a, b])
if __name__ == "__main__":
unittest.main()
|
qiskit-nature/test/second_q/operators/test_sparse_label_op.py/0
|
{
"file_path": "qiskit-nature/test/second_q/operators/test_sparse_label_op.py",
"repo_id": "qiskit-nature",
"token_count": 10917
}
| 147 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for the LatticePropertiesContainer."""
import unittest
from test import QiskitNatureTestCase
from qiskit_nature.second_q.problems import LatticePropertiesContainer
from qiskit_nature.second_q.properties import OccupiedModals
class TestLatticePropertiesContainer(QiskitNatureTestCase):
"""Tests for the LatticePropertiesContainer."""
def test_custom_property(self) -> None:
"""Tests support for custom property objects."""
container = LatticePropertiesContainer()
container.add(OccupiedModals([]))
self.assertIn(OccupiedModals, container)
if __name__ == "__main__":
unittest.main()
|
qiskit-nature/test/second_q/problems/test_lattice_properties_container.py/0
|
{
"file_path": "qiskit-nature/test/second_q/problems/test_lattice_properties_container.py",
"repo_id": "qiskit-nature",
"token_count": 352
}
| 148 |
{
"schema_name": "qcschema",
"schema_version": -1,
"molecule": {
"symbols": [
"H",
"Be"
],
"geometry": [
0.0,
0.0,
0.0,
0.0,
0.0,
1.3
],
"schema_name": "qcschema_molecule",
"schema_version": 2,
"molecular_charge": 0,
"molecular_multiplicity": 2
},
"driver": "energy",
"model": {
"method": "?",
"basis": "?"
},
"keywords": {},
"provenance": {
"creator": "PYSCF",
"version": "1.7.5.1",
"routine": "atom=H .0 .0 .0; Be .0 .0 1.3\nunit=Angstrom\ncharge=0\nspin=1\nbasis=sto3g\nhf_method=rhf\nconv_tol=1e-09\nmax_cycle=50\ninit_guess=minao\nmax_memory=None\n"
},
"return_result": -14.935097712726224,
"success": true,
"properties": {
"calcinfo_nbasis": 6,
"calcinfo_nmo": 3,
"calcinfo_nalpha": 2,
"calcinfo_nbeta": 1,
"calcinfo_natom": 2,
"return_energy": -14.935097712726224,
"nuclear_repulsion_energy": 1.6282375720615385
},
"wavefunction": {
"basis": "?",
"orbitals_a": "scf_orbitals_a",
"fock_a": "scf_fock_a",
"fock_mo_a": "scf_fock_mo_a",
"eigenvalues_a": "scf_eigenvalues_a",
"occupations_a": "scf_occupations_a",
"occupations_b": "scf_occupations_b",
"eri": "scf_eri",
"eri_mo_aa": "scf_eri_mo_aa",
"scf_orbitals_a": [
0.5636860534011622,
-0.16372711191918007,
-0.0,
-0.17587368406511147,
-0.2121672032524305,
-0.0,
0.4405754096486833,
0.7945832096964763,
-0.0,
1.7446231981341115e-17,
-2.1714858607626628e-16,
-0.0,
-0.0,
-0.0,
1.0,
-0.3036312342444414,
0.6267243304127147,
-0.0
],
"scf_fock_a": [
-1.282513631640323,
-0.49919420707435314,
-0.7914652390108632,
0.0,
0.0,
0.782656318103196,
-0.49919420707435314,
-5.988090725353365,
-1.6612372011722827,
0.0,
0.0,
0.019818385797814184,
-0.7914652390108632,
-1.6612372011722827,
-1.4252320999182235,
0.0,
0.0,
0.1533155897060977,
0.0,
0.0,
0.0,
-0.8486840678368444,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.8486840678368444,
0.0,
0.7826563181031959,
0.019818385797814184,
0.1533155897060977,
0.0,
0.0,
-0.9698143908000393
],
"scf_fock_mo_a": [
-1.3022881634030556,
0.03573327629426462,
0.0,
0.035733276294264635,
-0.8665234853775244,
0.0,
0.0,
0.0,
-0.8486840678368444
],
"scf_eigenvalues_a": [
-0.43531728864155517,
-0.03651666021784921,
0.222777129036414
],
"scf_occupations_a": [
1.0,
1.0,
1.0,
0.0,
0.0,
0.0
],
"scf_occupations_b": [
1.0,
1.0,
0.0,
0.0,
0.0,
0.0
],
"scf_eri": [
0.7746059439198978,
0.034382975771341286,
0.27897816975757744,
0.0,
0.0,
-0.37022884727501804,
0.03438297577134129,
0.40257248360379827,
0.10391643802889863,
0.0,
0.0,
-0.01732965062710787,
0.2789781697575774,
0.10391643802889865,
0.3583671402358731,
0.0,
0.0,
-0.12345115059534366,
0.0,
0.0,
0.0,
0.3324291683698645,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.3324291683698645,
0.0,
-0.370228847275018,
-0.017329650627107868,
-0.1234511505953437,
0.0,
0.0,
0.40993761616471264,
0.034382975771341286,
0.007012330241608969,
0.019313116812548985,
0.0,
0.0,
-0.021030868356067652,
0.007012330241608968,
0.11371106018682653,
0.023039875062197315,
0.0,
0.0,
-0.002082242031052746,
0.019313116812548988,
0.023039875062197318,
0.041688910597678916,
0.0,
0.0,
-0.003865786913415285,
0.0,
0.0,
0.0,
0.04138195133179216,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.04138195133179216,
0.0,
-0.02103086835606765,
-0.0020822420310527457,
-0.0038657869134152843,
0.0,
0.0,
0.04232572508063752,
0.27897816975757744,
0.019313116812548985,
0.1186730977248316,
0.0,
0.0,
-0.14742405502478304,
0.019313116812548985,
0.23850396468906215,
0.06042287274928736,
0.0,
0.0,
-0.007957012536656776,
0.11867309772483159,
0.06042287274928734,
0.18778008667387608,
0.0,
0.0,
-0.045679522812126186,
0.0,
0.0,
0.0,
0.1811175774356048,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.1811175774356048,
0.0,
-0.147424055024783,
-0.007957012536656774,
-0.0456795228121262,
0.0,
0.0,
0.200937824883785,
0.0,
0.0,
0.0,
0.012020131429138662,
0.0,
0.0,
0.0,
0.0,
0.0,
0.005999785266064326,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889527,
0.0,
0.0,
0.012020131429138662,
0.005999785266064324,
0.026873918062889534,
0.0,
0.0,
-0.008251758822015292,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015292,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.012020131429138662,
0.0,
0.0,
0.0,
0.0,
0.0,
0.005999785266064326,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889527,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.012020131429138662,
0.005999785266064324,
0.026873918062889534,
0.0,
0.0,
-0.008251758822015292,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015292,
0.0,
-0.370228847275018,
-0.02103086835606765,
-0.14742405502478304,
0.0,
0.0,
0.19092563385032615,
-0.02103086835606765,
-0.2432306329194988,
-0.06244888941704274,
0.0,
0.0,
0.013104686259842315,
-0.14742405502478304,
-0.06244888941704273,
-0.20372383244810804,
0.0,
0.0,
0.07171002694726968,
0.0,
0.0,
0.0,
-0.19171050925886976,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.19171050925886976,
0.0,
0.1909256338503262,
0.013104686259842322,
0.07171002694726968,
0.0,
0.0,
-0.22753895053234224,
0.034382975771341286,
0.00701233024160897,
0.019313116812548988,
0.0,
0.0,
-0.02103086835606765,
0.007012330241608969,
0.11371106018682653,
0.023039875062197318,
0.0,
0.0,
-0.002082242031052746,
0.019313116812548988,
0.023039875062197318,
0.04168891059767893,
0.0,
0.0,
-0.0038657869134152848,
0.0,
0.0,
0.0,
0.041381951331792156,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.041381951331792156,
0.0,
-0.02103086835606765,
-0.0020822420310527452,
-0.003865786913415284,
0.0,
0.0,
0.04232572508063751,
0.4025724836037983,
0.11371106018682657,
0.2385039646890621,
0.0,
0.0,
-0.24323063291949878,
0.11371106018682654,
2.29883056063192,
0.3920362561947013,
0.0,
0.0,
0.0,
0.2385039646890621,
0.3920362561947013,
0.5709882140215694,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.5695783072461756,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.5695783072461756,
0.0,
-0.24323063291949887,
0.0,
0.0,
0.0,
0.0,
0.5695783072461756,
0.10391643802889866,
0.02303987506219732,
0.06042287274928735,
0.0,
0.0,
-0.06244888941704273,
0.023039875062197318,
0.3920362561947015,
0.07953869855499242,
0.0,
0.0,
0.0,
0.06042287274928735,
0.07953869855499243,
0.14304119873646873,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.1430829175325974,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.1430829175325974,
0.0,
-0.06244888941704272,
0.0,
0.0,
0.0,
0.0,
0.1430829175325974,
0.0,
0.0,
0.0,
0.005999785266064324,
0.0,
0.0,
0.0,
0.0,
0.0,
0.01460669502708022,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823124,
0.0,
0.0,
0.005999785266064322,
0.014606695027080226,
0.02188809701482313,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.005999785266064324,
0.0,
0.0,
0.0,
0.0,
0.0,
0.01460669502708022,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823124,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.005999785266064322,
0.014606695027080226,
0.02188809701482313,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.017329650627107875,
-0.0020822420310527457,
-0.007957012536656776,
0.0,
0.0,
0.013104686259842313,
-0.0020822420310527457,
0.0,
0.0,
0.0,
0.0,
0.01460669502708022,
-0.007957012536656777,
0.0,
0.0,
0.0,
0.0,
0.021888097014823124,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.013104686259842317,
0.014606695027080226,
0.02188809701482313,
0.0,
0.0,
0.0,
0.2789781697575775,
0.019313116812548985,
0.1186730977248316,
0.0,
0.0,
-0.14742405502478304,
0.01931311681254898,
0.23850396468906215,
0.06042287274928735,
0.0,
0.0,
-0.007957012536656776,
0.1186730977248316,
0.06042287274928736,
0.18778008667387608,
0.0,
0.0,
-0.04567952281212618,
0.0,
0.0,
0.0,
0.1811175774356048,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.1811175774356048,
0.0,
-0.14742405502478306,
-0.007957012536656774,
-0.04567952281212619,
0.0,
0.0,
0.200937824883785,
0.10391643802889866,
0.023039875062197318,
0.06042287274928734,
0.0,
0.0,
-0.06244888941704273,
0.023039875062197315,
0.3920362561947014,
0.07953869855499242,
0.0,
0.0,
0.0,
0.06042287274928736,
0.07953869855499242,
0.14304119873646873,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.14308291753259736,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.14308291753259736,
0.0,
-0.06244888941704273,
0.0,
0.0,
0.0,
0.0,
0.14308291753259736,
0.3583671402358732,
0.04168891059767893,
0.18778008667387608,
0.0,
0.0,
-0.20372383244810818,
0.041688910597678916,
0.5709882140215694,
0.1430411987364687,
0.0,
0.0,
0.0,
0.18778008667387605,
0.14304119873646873,
0.41768314813042595,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4175892671607388,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4175892671607388,
0.0,
-0.20372383244810813,
0.0,
0.0,
0.0,
0.0,
0.4175892671607388,
0.0,
0.0,
0.0,
0.026873918062889524,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823134,
0.0,
0.0,
0.0,
0.0,
0.0,
0.09226493927763467,
0.0,
0.0,
0.02687391806288953,
0.02188809701482314,
0.09226493927763467,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889524,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823134,
0.0,
0.0,
0.0,
0.0,
0.0,
0.09226493927763467,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.02687391806288953,
0.02188809701482314,
0.09226493927763467,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.12345115059534371,
-0.0038657869134152843,
-0.04567952281212619,
0.0,
0.0,
0.07171002694726966,
-0.0038657869134152848,
0.0,
0.0,
0.0,
0.0,
0.021888097014823134,
-0.04567952281212621,
0.0,
0.0,
0.0,
0.0,
0.09226493927763467,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.07171002694726963,
0.02188809701482314,
0.09226493927763467,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.012020131429138662,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0059997852660643265,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889527,
0.0,
0.0,
0.012020131429138662,
0.005999785266064325,
0.026873918062889534,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
0.0,
0.005999785266064324,
0.0,
0.0,
0.0,
0.0,
0.0,
0.014606695027080221,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823124,
0.0,
0.0,
0.005999785266064323,
0.014606695027080226,
0.021888097014823134,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889527,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823134,
0.0,
0.0,
0.0,
0.0,
0.0,
0.09226493927763467,
0.0,
0.0,
0.026873918062889534,
0.021888097014823138,
0.09226493927763464,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.3324291683698645,
0.04138195133179216,
0.18111757743560492,
0.0,
0.0,
-0.19171050925886973,
0.04138195133179216,
0.5695783072461756,
0.14308291753259728,
0.0,
0.0,
0.0,
0.18111757743560492,
0.14308291753259728,
0.4175892671607391,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4498590410866698,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4013602826443278,
0.0,
-0.1917105092588697,
0.0,
0.0,
0.0,
0.0,
0.4013602826443278,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015292,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.012020131429138662,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0059997852660643265,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889527,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.012020131429138662,
0.005999785266064325,
0.026873918062889534,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
0.0,
0.005999785266064324,
0.0,
0.0,
0.0,
0.0,
0.0,
0.014606695027080221,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823124,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.005999785266064323,
0.014606695027080226,
0.021888097014823134,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889527,
0.0,
0.0,
0.0,
0.0,
0.0,
0.021888097014823134,
0.0,
0.0,
0.0,
0.0,
0.0,
0.09226493927763467,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.026873918062889534,
0.021888097014823138,
0.09226493927763464,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.3324291683698645,
0.04138195133179216,
0.18111757743560492,
0.0,
0.0,
-0.19171050925886973,
0.04138195133179216,
0.5695783072461756,
0.14308291753259728,
0.0,
0.0,
0.0,
0.18111757743560492,
0.14308291753259728,
0.4175892671607391,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4013602826443278,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4498590410866698,
0.0,
-0.1917105092588697,
0.0,
0.0,
0.0,
0.0,
0.4013602826443278,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015292,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
-0.370228847275018,
-0.021030868356067645,
-0.14742405502478304,
0.0,
0.0,
0.19092563385032618,
-0.02103086835606765,
-0.24323063291949873,
-0.06244888941704273,
0.0,
0.0,
0.013104686259842317,
-0.14742405502478304,
-0.06244888941704273,
-0.20372383244810807,
0.0,
0.0,
0.07171002694726966,
0.0,
0.0,
0.0,
-0.1917105092588697,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.1917105092588697,
0.0,
0.1909256338503262,
0.013104686259842319,
0.07171002694726966,
0.0,
0.0,
-0.22753895053234224,
-0.017329650627107878,
-0.0020822420310527457,
-0.007957012536656774,
0.0,
0.0,
0.013104686259842317,
-0.0020822420310527457,
0.0,
0.0,
0.0,
0.0,
0.014606695027080221,
-0.007957012536656776,
0.0,
0.0,
0.0,
0.0,
0.021888097014823124,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.013104686259842317,
0.014606695027080226,
0.021888097014823134,
0.0,
0.0,
0.0,
-0.12345115059534371,
-0.003865786913415284,
-0.0456795228121262,
0.0,
0.0,
0.07171002694726965,
-0.0038657869134152843,
0.0,
0.0,
0.0,
0.0,
0.021888097014823134,
-0.045679522812126214,
0.0,
0.0,
0.0,
0.0,
0.09226493927763467,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.07171002694726966,
0.021888097014823138,
0.09226493927763464,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015292,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015294,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.008251758822015292,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.0,
0.0,
0.0,
0.024249379221170965,
0.0,
0.40993761616471275,
0.04232572508063752,
0.2009378248837851,
0.0,
0.0,
-0.2275389505323422,
0.04232572508063751,
0.5695783072461756,
0.14308291753259728,
0.0,
0.0,
0.0,
0.20093782488378503,
0.14308291753259728,
0.4175892671607391,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4013602826443278,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.4013602826443278,
0.0,
-0.2275389505323422,
0.0,
0.0,
0.0,
0.0,
0.44985904108666974
],
"scf_eri_mo_aa": [
0.5723742128973133,
-0.0559359745376385,
0.0,
-0.055935974537638544,
0.3042842626233607,
0.0,
0.0,
0.0,
0.3665082095336424,
-0.05593597453763853,
0.019375287645106046,
0.0,
0.019375287645106043,
0.020202367947649232,
0.0,
0.0,
0.0,
0.014056764993639516,
0.0,
0.0,
0.036007006646011606,
0.0,
0.0,
0.02824399694440339,
0.036007006646011606,
0.02824399694440339,
0.0,
-0.05593597453763854,
0.019375287645106026,
0.0,
0.019375287645106033,
0.02020236794764919,
0.0,
0.0,
0.0,
0.01405676499363952,
0.3042842626233608,
0.020202367947649295,
0.0,
0.020202367947649225,
0.48162669180229034,
0.0,
0.0,
0.0,
0.40269913477854097,
0.0,
0.0,
0.028243996944403386,
0.0,
0.0,
0.05649509961576314,
0.028243996944403393,
0.05649509961576314,
0.0,
0.0,
0.0,
0.036007006646011606,
0.0,
0.0,
0.02824399694440339,
0.0360070066460116,
0.028243996944403382,
0.0,
0.0,
0.0,
0.028243996944403393,
0.0,
0.0,
0.05649509961576314,
0.02824399694440338,
0.05649509961576312,
0.0,
0.36650820953364244,
0.0140567649936397,
0.0,
0.014056764993639687,
0.40269913477854113,
0.0,
0.0,
0.0,
0.4498590410866698
]
}
}
|
qiskit-nature/test/second_q/transformers/resources/BeH_sto3g_reduced.json/0
|
{
"file_path": "qiskit-nature/test/second_q/transformers/resources/BeH_sto3g_reduced.json",
"repo_id": "qiskit-nature",
"token_count": 18407
}
| 149 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Generates spelling dictionaries for Sphinx and Pylint and combine them. """
from typing import Set, List
import sys
import os
import argparse
import shutil
import errno
import tempfile
from pathlib import Path
from sphinx.cmd.build import build_main as sphinx_build
from pylint import lint
class SpellDictGenerator:
"""Generates spelling dictionaries for Sphinx and Pylint"""
_DOCS_DIR = "docs"
_BUILD_DIR = "_build"
_STUBS_DIR = "stubs"
_JUPYTER_EXECUTE_DIR = "jupyter_execute"
_SPHINX_DICT_FILE = "dummy_spelling_wordlist.txt"
_SPELLING_SUFFIX = ".spelling"
_MAKE_FILE = "Makefile"
def __init__(self, root_dir: str, out_file: str) -> None:
self._root_dir = root_dir
self._output_file = out_file
self._docs_dir = os.path.join(self._root_dir, SpellDictGenerator._DOCS_DIR)
if not os.path.isdir(self._docs_dir):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), self._docs_dir)
self._build_dir = os.path.join(self._docs_dir, SpellDictGenerator._BUILD_DIR)
self._stubs_dir = os.path.join(self._docs_dir, SpellDictGenerator._STUBS_DIR)
self._jupyter_execute_dir = os.path.join(
self._docs_dir, SpellDictGenerator._JUPYTER_EXECUTE_DIR
)
self._sphinx_words: Set[str] = set()
self._pylint_words: Set[str] = set()
def generate_sphinx_spell_words(self) -> Set[str]:
"""
Generates Sphinx spelling dictionary
Returns:
spell words
"""
if os.path.isdir(self._build_dir):
shutil.rmtree(self._build_dir)
if os.path.isdir(self._stubs_dir):
shutil.rmtree(self._stubs_dir)
if os.path.isdir(self._jupyter_execute_dir):
shutil.rmtree(self._jupyter_execute_dir)
try:
os.mkdir(self._build_dir)
sphinx_dict_file = os.path.join(self._build_dir, SpellDictGenerator._SPHINX_DICT_FILE)
# create empty dictionary file
with open(sphinx_dict_file, "w", encoding="utf8"):
pass
sphinx_build(
[
"-b",
"spelling",
"-D",
f"spelling_word_list_filename={sphinx_dict_file}",
self._docs_dir,
self._build_dir,
]
)
self._sphinx_words = SpellDictGenerator._get_sphinx_spell_words(self._build_dir)
return self._sphinx_words
finally:
if os.path.isdir(self._build_dir):
shutil.rmtree(self._build_dir)
if os.path.isdir(self._stubs_dir):
shutil.rmtree(self._stubs_dir)
if os.path.isdir(self._jupyter_execute_dir):
shutil.rmtree(self._jupyter_execute_dir)
@staticmethod
def _get_sphinx_spell_words(path: str) -> Set[str]:
words = set()
for item in os.listdir(path):
fullpath = os.path.join(path, item)
file_path = Path(fullpath)
if file_path.is_dir() and not item.startswith("."):
word_list = SpellDictGenerator._get_sphinx_spell_words(fullpath)
words.update(word_list)
elif file_path.is_file() and file_path.suffix == SpellDictGenerator._SPELLING_SUFFIX:
word_list = SpellDictGenerator._extract_sphinx_spell_words(fullpath)
words.update(word_list)
return words
@staticmethod
def _extract_sphinx_spell_words(file_path: str) -> Set[str]:
words = set()
with open(file_path, "rt", encoding="utf8") as file:
for line in file:
start_idx = line.find("(")
end_idx = -1
if start_idx > 0:
end_idx = line.find(")", start_idx + 1)
if start_idx > 0 and end_idx > 0:
word = line[start_idx + 1 : end_idx]
words.add(word)
return words
def generate_pylint_spell_words(self) -> Set[str]:
"""
Generates Pylint spelling dictionary
Returns:
spell words
Raises:
FileNotFoundError: makefile not found
ValueError: Pylint spell not found
"""
# First read make file to extract pylint options
make_file = os.path.join(self._root_dir, SpellDictGenerator._MAKE_FILE)
if not os.path.isfile(make_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), make_file)
options = None
with open(make_file, "rt", encoding="utf8") as file:
pylint_spell = False
for line in file:
if line.startswith("spell:"):
pylint_spell = True
elif pylint_spell and line.find("pylint ") > 0:
options = line.split()
options = options[1:]
break
if options is None:
raise ValueError(f"Pylint spell command not found in makefile {make_file}")
idx = options.index("--spelling-private-dict-file=.pylintdict")
if idx < 0:
raise ValueError(f"Pylint spell dict option not found in makefile {make_file}")
with tempfile.TemporaryDirectory() as temp_dir:
temp_dict_path = os.path.join(temp_dir, ".pylintdict")
options[idx] = f"--spelling-private-dict-file={temp_dict_path}"
options.insert(idx, "--spelling-store-unknown-words=y")
lint.Run(options, exit=False)
with open(temp_dict_path, "rt", encoding="utf8") as temp_dict_file:
words = temp_dict_file.read().splitlines()
self._pylint_words.update(words)
return self._pylint_words
def merge_sort_dict_to_output(self) -> List[str]:
"""Merge and sort Sphinx and Pylint dicts"""
word_set = set(w.lower() for w in self._sphinx_words)
word_set.update(w.lower() for w in self._pylint_words)
words = sorted(word_set)
with open(self._output_file, "w", encoding="utf8") as out_file:
out_file.writelines([f"{word}\n" for word in words])
return words
def check_path(path):
"""valid path argument"""
if not path or os.path.isdir(path):
return path
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path")
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description="Qiskit Spelling Dictionary Generation Tool")
PARSER.add_argument(
"-path", type=check_path, metavar="path", required=False, help="Root path of project."
)
PARSER.add_argument(
"-output", metavar="output", required=False, default=".pylintdict", help="Output file."
)
ARGS = PARSER.parse_args()
if not ARGS.path:
ARGS.path = os.getcwd()
ARGS.path = os.path.abspath(os.path.realpath(os.path.expanduser(ARGS.path)))
ARGS.output = os.path.join(ARGS.path, ARGS.output)
OBJ = SpellDictGenerator(ARGS.path, ARGS.output)
OBJ.generate_sphinx_spell_words()
OBJ.generate_pylint_spell_words()
OBJ.merge_sort_dict_to_output()
sys.exit(0)
|
qiskit-nature/tools/generate_spell_dict.py/0
|
{
"file_path": "qiskit-nature/tools/generate_spell_dict.py",
"repo_id": "qiskit-nature",
"token_count": 3660
}
| 150 |
[DEFAULT]
test_path=./test
|
qiskit-optimization/.stestr.conf/0
|
{
"file_path": "qiskit-optimization/.stestr.conf",
"repo_id": "qiskit-optimization",
"token_count": 11
}
| 151 |
<jupyter_start><jupyter_text>Quantum Random Access Optimization The Quantum Random Access Optimization (QRAO) module is designed to enable users to leverage a new quantum method for combinatorial optimization problems [1]. This approach incorporates Quantum Random Access Codes (QRACs) as a tool to encode multiple classical binary variables into a single qubit, thereby saving quantum resources and enabling exploration of larger problem instances on a quantum computer. The encodings produce a local quantum Hamiltonian whose ground state can be approximated with standard algorithms such as VQE, and then rounded to yield approximation solutions of the original problem.QRAO through a series of 3 classes:1. The encoding class (`QuantumRandomAccessEncoding`): This class encodes the original problem into a relaxed problem that requires fewer resources to solve.2. The rounding schemes (`SemideterministicRounding` and `MagicRounding`): This scheme is used to round the solution obtained from the relaxed problem back to a solution of the original problem.3. The optimizer class (`QuantumRandomAccessOptimizer`): This class performs the high-level optimization algorithm, utilizing the capabilities of the encoding class and the rounding scheme.*References*[1] Bryce Fuller et al., *Approximate Solutions of Combinatorial Problems via Quantum Relaxations,* [arXiv:2111.03167](https://arxiv.org/abs/2111.03167)<jupyter_code>from qiskit_optimization.algorithms.qrao import (
QuantumRandomAccessEncoding,
SemideterministicRounding,
QuantumRandomAccessOptimizer,
)<jupyter_output><empty_output><jupyter_text>Set up a combinatorial optimization problemIn this tutorial, we will consider a random max-cut problem instance and use QRAO to try to find a maximum cut; in other words, a partition of the graph's vertices (nodes) into two sets that maximizes the number of edges between the sets.To begin, we utilize the `Maxcut` class from Qiskit Optimization's application module. It allows us to generate a `QuadraticProgram` representation of the given graph.Note that once our problem has been represented as a `QuadraticProgram`, it will need to be converted to the correct type, a [quadratic unconstrained binary optimization (QUBO)](https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization) problem, so that it is compatible with QRAO.A `QuadraticProgram` generated by `Maxcut` is already a QUBO, but if you define your own problem be sure you convert it to a QUBO before proceeding. Here is [a tutorial](https://qiskit-community.github.io/qiskit-optimization/tutorials/02_converters_for_quadratic_programs.html) on converting `QuadraticPrograms`.<jupyter_code>import networkx as nx
from qiskit_optimization.applications import Maxcut
seed = 1
num_nodes = 6
graph = nx.random_regular_graph(d=3, n=num_nodes, seed=seed)
nx.draw(graph, with_labels=True, pos=nx.spring_layout(graph, seed=seed))
maxcut = Maxcut(graph)
problem = maxcut.to_quadratic_program()
print(problem.prettyprint())<jupyter_output>Problem name: Max-cut
Maximize
-2*x_0*x_1 - 2*x_0*x_3 - 2*x_0*x_4 - 2*x_1*x_2 - 2*x_1*x_5 - 2*x_2*x_3
- 2*x_2*x_4 - 2*x_3*x_5 - 2*x_4*x_5 + 3*x_0 + 3*x_1 + 3*x_2 + 3*x_3 + 3*x_4
+ 3*x_5
Subject to
No constraints
Binary variables (6)
x_0 x_1 x_2 x_3 x_4 x_5<jupyter_text>Encode the problem into a quantum HamiltonianOnce we have appropriately configured our problem, we proceed to encode it using the `QuantumRandomAccessEncoding` class from the `qrao` module. This encoding step allows us to generate a quantum Hamiltonian operator that represents our problem. In particular, we employ a Quantum Random Access Code (QRAC) to encode multiple classical binary variables (corresponding to the nodes of our max-cut graph) into each qubit.It's important to note that the resulting "relaxed" Hamiltonian, produced by this encoding, will not be diagonal. This differs from the standard workflow in `qiskit-optimization`, which typically generates a diagonal (Ising) Hamiltonian suitable for optimization using a `MinimumEigenOptimizer`. You can find a tutorial on the `MinimumEigenOptimizer` [here](https://qiskit-community.github.io/qiskit-optimization/tutorials/03_minimum_eigen_optimizer.html).In our encoding process, we employ a $(3,1,p)-$QRAC, where each qubit can accommodate a maximum of 3 classical binary variables. The parameter $p$ represents the bit recovery probability achieved through measurement. Depending on the nature of the problem, some qubits may have fewer than 3 classical variables assigned to them. To evaluate the compression achieved, we can examine the `compression_ratio` attribute of the encoding, which provides the ratio between the number of original binary variables and the number of qubits used (at best, a factor of 3).<jupyter_code>from qiskit_optimization.algorithms.qrao import QuantumRandomAccessEncoding
# Create an encoding object with a maximum of 3 variables per qubit, aka a (3,1,p)-QRAC
encoding = QuantumRandomAccessEncoding(max_vars_per_qubit=3)
# Encode the QUBO problem into an encoded Hamiltonian
encoding.encode(problem)
# This is our encoded Hamiltonian
print(f"Our encoded Hamiltonian is:\n( {encoding.qubit_op} ).\n")
print(
"We achieve a compression ratio of "
f"({encoding.num_vars} binary variables : {encoding.num_qubits} qubits) "
f"β {encoding.compression_ratio}.\n"
)<jupyter_output>Our encoded Hamiltonian is:
( SparsePauliOp(['XX', 'XY', 'XZ', 'YX', 'ZX', 'YY', 'YZ', 'ZY', 'ZZ'],
coeffs=[1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j,
1.5+0.j]) ).
We achieve a compression ratio of (6 binary variables : 2 qubits) β 3.0.<jupyter_text>Solve the problem using the `QuantumRandomAccessOptimizer`Having successfully encoded our input problem as a relaxed Hamiltonian, we proceed to solve it using the `QuantumRandomAccessOptimizer`. This optimizer allows us to find an approximate solution to the relaxed problem by leveraging quantum computing techniques.To set up the optimizer, we need to specify two crucial components:1. **Minimum Eigensolver**: We specify a minimum eigensolver to heuristically search for the ground state of the relaxed problem Hamiltonian. As an example, we can use the Variational Quantum Eigensolver (VQE). For simulation purposes, we'll employ an simulator, but you can choose a quantum device as the backend if desired.2. **Rounding Scheme**: To map the ground state results back to a solution for the original problem, we specify a rounding scheme. By default, the `SemideterministicRounding` is used, but alternative scheme, `MagicRounding`, is also available.<jupyter_code>from qiskit_algorithms import VQE
from qiskit_algorithms.optimizers import COBYLA
from qiskit.circuit.library import RealAmplitudes
from qiskit.primitives import Estimator
from qiskit_optimization.algorithms.qrao import (
QuantumRandomAccessOptimizer,
SemideterministicRounding,
)
# Prepare the VQE algorithm
ansatz = RealAmplitudes(2)
vqe = VQE(
ansatz=ansatz,
optimizer=COBYLA(),
estimator=Estimator(),
)
# Use semi-deterministic rounding, known as "Pauli rounding"
# in https://arxiv.org/pdf/2111.03167v2.pdf
# (This is the default if no rounding scheme is specified.)
semidterministic_rounding = SemideterministicRounding()
# Construct the optimizer
qrao = QuantumRandomAccessOptimizer(min_eigen_solver=vqe, rounding_scheme=semidterministic_rounding)<jupyter_output><empty_output><jupyter_text>Finally, we move forward with solving the problem by invoking the `solve()` method. It's important to note that when calling `solve()`, we pass the `problem` itself as an argument. Although we previously used `encode()` in `QuantumRandomAccessEncoding` to provide a clear understanding of the flow, `solve(problem)` automatically encodes the problem internally using `QuantumRandomAccessEncoding`. This provides a streamlined and simplified workflow that eliminates the need for explicit encoding steps.The result is provides us as a `QuantumRandomAccessOptimizationResult`.The `x` contains the binary values representing the best solution found, while the `fval` contains the corresponding objective value.The `relaxed_fval` provides the expectation value of the relaxed Hamiltonian, adjusted to be in the units of the original optimization problem. For maximization problems, the best possible relaxed function value will always be greater than or equal to the best possible objective function value of the original problem. In practice, this often holds true for the best found value and best found objective function value as well.<jupyter_code># Solve the optimization problem
results = qrao.solve(problem)
print(
f"The objective function value: {results.fval}\n"
f"x: {results.x}\n"
f"relaxed function value: {-1 * results.relaxed_fval}\n"
)<jupyter_output>The objective function value: 6.0
x: [1 0 1 1 0 1]
relaxed function value: 8.999999989772657<jupyter_text>Interpret the solution In the context of [max-cut](https://en.wikipedia.org/wiki/Maximum_cut), the result's "optimal value" tells us which subset each node belongs to given the partition found by the optimizer.<jupyter_code>maxcut_partition = maxcut.interpret(results)
print(
f"The obtained solution places a partition between nodes {maxcut_partition[0]} "
f"and nodes {maxcut_partition[1]}."
)
maxcut.draw(results, pos=nx.spring_layout(graph, seed=seed))<jupyter_output>The obtained solution places a partition between nodes [1, 4] and nodes [0, 2, 3, 5].<jupyter_text>Inspect the results of subroutines The [MinimumEigensolverResult](https://qiskit-community.github.io/qiskit-algorithms/stubs/qiskit_algorithms.MinimumEigensolverResult.html) that results from performing VQE on the relaxed Hamiltonian is available:<jupyter_code>results.relaxed_result<jupyter_output><empty_output><jupyter_text>The result of the rounding scheme is also worth considering. In this example, we used the `SemideterministricRounding`. It's important to note that with semi-deterministic rounding, a single sample is generated as the result, making it the optimal solution candidate.However, if we use the `MagicRounding` instead, multiple samples would be generated, each with a probability associated with it. These probabilities sum up to one, providing a distribution of potential optimal solutions.<jupyter_code>results.samples<jupyter_output><empty_output><jupyter_text>Exact Problem Solution with the `NumpyMinimumEigensolver`To assess the performance of QRAO in approximating the optimal solution, we can utilize the `NumpyMinimumEigensolver`, an exact classical optimizer. We can obtain the exact optimal solution to the problem as follows:<jupyter_code>from qiskit_algorithms import NumPyMinimumEigensolver
from qiskit_optimization.algorithms import MinimumEigenOptimizer
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
exact_result = exact.solve(problem)
print(exact_result.prettyprint())<jupyter_output>objective function value: 9.0
variable values: x_0=0.0, x_1=1.0, x_2=0.0, x_3=1.0, x_4=1.0, x_5=0.0
status: SUCCESS<jupyter_text>The approximation ratio (QRAO's objective function value divided by the optimal objective function value) tells us how closely QRAO approximated the optimal solution to the problem.<jupyter_code>print("QRAO Approximate Optimal Function Value:", results.fval)
print("Exact Optimal Function Value:", exact_result.fval)
print(f"Approximation Ratio: {results.fval / exact_result.fval :.2f}")<jupyter_output>QRAO Approximate Optimal Function Value: 6.0
Exact Optimal Function Value: 9.0
Approximation Ratio: 0.67<jupyter_text>Solve the problem using the `QuantumRandomAccessOptimizer` with `MagicRounding`Magic rounding is a quantum technique employed to map the ground state results of our encoded Hamiltonian back to a solution of the original problem. Unlike semi-deterministic rounding, magic rounding requires a quantum backend, which can be either hardware or a simulator. The backend is passed to the `MagicRounding` class through a `Sampler`, which also determines the total number of shots (samples) that magic rounding will utilize. Note that to specify the backend, you need to choose a `Sampler` from providers such as Aer or IBM Runtime. Consequently, we need to specify `Estimator` and `Sampler` for the optimizer and the rounding scheme, respectively.In practice, users may choose to set a significantly higher number of magic rounding shots compared to the shots used by the minimum eigensolver for the relaxed problem. This difference arises because the minimum eigensolver estimates expectation values, while the magic rounding scheme returns the sample corresponding to the maximum function value found. The number of magic rounding shots directly impacts the diversity of the computational basis we can generate.When estimating an expectation value, increasing the number of shots enhances the convergence to the true value. However, when aiming to identify the largest possible function value, we often sample from the tail of a distribution of outcomes. As a result, until we observe the highest value outcome in our distribution, each additional shot increases the expected return value.In this tutorial, we use the `Estimator` for solving the relaxed Hamiltonian and the `Sampler` for performing magic rounding. Here, 10 times as many shots are used in the `Sampler`. As the number of qubits increases, you may need more shots or `weighted` basis sampling, as explained above."<jupyter_code>from qiskit.primitives import Sampler
from qiskit_optimization.algorithms.qrao import MagicRounding
estimator = Estimator(options={"shots": 1000, "seed": seed})
sampler = Sampler(options={"shots": 10000, "seed": seed})
# Prepare the VQE algorithm
ansatz = RealAmplitudes(2)
vqe = VQE(
ansatz=ansatz,
optimizer=COBYLA(),
estimator=estimator,
)
# Use magic rounding
magic_rounding = MagicRounding(sampler=sampler)
# Construct the optimizer
qrao = QuantumRandomAccessOptimizer(min_eigen_solver=vqe, rounding_scheme=magic_rounding)
results = qrao.solve(problem)
print(
f"The objective function value: {results.fval}\n"
f"x: {results.x}\n"
f"relaxed function value: {-1 * results.relaxed_fval}\n"
)<jupyter_output>The objective function value: 9.0
x: [1 0 1 0 0 1]
relaxed function value: 8.999996519407159<jupyter_text>Since magic rounding relies on nondeterministic measurements, the method collects a number of samples based on the shots count provided to the `Sampler` mentioned earlier. These samples are then consolidated, taking into account duplicates and calculating the empirical probability for each `SolutionSample`. Each sample in the consolidation process includes a corresponding function value (`fval`).From the consolidated samples, we select the sample with the "optimal" function value. In the case of a max-cut problem, this means choosing the sample with the largest function value as our solution.<jupyter_code>print(f"The number of distinct samples is {len(results.samples)}.")
print("Top 10 samples with the largest fval:")
for sample in results.samples[:10]:
print(sample)<jupyter_output>The number of distinct samples is 56.
Top 10 samples with the largest fval:
SolutionSample(x=array([1, 0, 1, 0, 0, 1]), fval=9.0, probability=0.0094, status=<OptimizationResultStatus.SUCCESS: 0>)
SolutionSample(x=array([0, 1, 0, 1, 1, 0]), fval=9.0, probability=0.0112, status=<OptimizationResultStatus.SUCCESS: 0>)
SolutionSample(x=array([0, 0, 0, 1, 1, 0]), fval=6.0, probability=0.0195, status=<OptimizationResultStatus.SUCCESS: 0>)
SolutionSample(x=array([1, 1, 1, 0, 0, 1]), fval=6.0, probability=0.0205, status=<OptimizationResultStatus.SUCCESS: 0>)
SolutionSample(x=array([0, 1, 1, 1, 1, 0]), fval=6.0, probability=0.0214, status=<OptimizationResultStatus.SUCCESS: 0>)
SolutionSample(x=array([1, 0, 0, 0, 0, 1]), fval=6.0, probability=0.0194, status=<OptimizationResultStatus.SUCCESS: 0>)
SolutionSample(x=array([1, 0, 1, 0, 0, 0]), fval=6.0, probability=0.0204, status=<OptimizationResultStatus.SUCCESS: 0>)
SolutionSample(x=array([0, 1, 0, 1, 1, 1]), fval=6.0, probability=0.0215999999999999[...]<jupyter_text>Alternative: Solve the Problem in Two Explicit StepsIn the previous part of this tutorial, we utilized the `qrao.solve()` method, which solved the encoded problem (the ground state of the relaxed Hamiltonian) and performed rounding to map the ground state results back to a solution of the original problem. However, it is also possible to explicitly break down the calculation into these two distinct steps. This can be beneficial, especially when comparing solutions obtained across multiple rounding schemes applied to a candidate ground state.In this section, we will explore how to perform each of these steps explicitly. Manually solve the relaxed problem.Let's start by invoking the `qrao.solve_relaxed()` method to directly solve the relaxed problem encoded by `QuantumRandomAccessEncoding`.This method allows us to focus solely on solving the relaxed problem without performing rounding.By invoking `qrao.solve_relaxed()`, we obtain two essential outputs:- `MinimumEigensolverResult`: This object contains the results of running the minimum eigen optimizer such as the VQE on the relaxed problem. It provides information about the eigenvalue, and other relevant details. You can refer to the Qiskit Algorithms [documentation](https://docs.quantum.ibm.com/api/qiskit/qiskit.algorithms.MinimumEigensolverResult) for a comprehensive explanation of the entries within this object.- `RoundingContext`: This object encapsulates essential information about the encoding and the solution of the relaxed problem in a form that is ready for consumption by the rounding schemes.<jupyter_code># Encode the QUBO problem into a relaxed Hamiltonian
encoding = QuantumRandomAccessEncoding(max_vars_per_qubit=3)
encoding.encode(problem)
# Solve the relaxed problem
relaxed_results, rounding_context = qrao.solve_relaxed(encoding)
for k in dir(relaxed_results):
if not k.startswith("_"):
print(f"{k}: {getattr(relaxed_results, k)}")<jupyter_output>aux_operators_evaluated: [(0.010835872623325702, {'variance': 0.9999999914513272, 'shots': 1000}), (0.026074300411246972, {'variance': 0.999999991452347, 'shots': 1000}), (0.01044933784106082, {'variance': 1.0, 'shots': 1000}), (-0.04120945001189341, {'variance': 1.0, 'shots': 1000}), (0.02868127134978543, {'variance': 0.9999999973575187, 'shots': 1000}), (0.014064208211884945, {'variance': 0.9999999973585384, 'shots': 1000})]
combine: <bound method AlgorithmResult.combine of <qiskit_algorithms.minimum_eigensolvers.vqe.VQEResult object at 0x14789b880>>
cost_function_evals: 114
eigenvalue: -4.499994593889271
optimal_circuit: ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
q_0: β€0 β
β RealAmplitudes(ΞΈ[0],ΞΈ[1],ΞΈ[2],ΞΈ[3],ΞΈ[4],ΞΈ[5],ΞΈ[6],ΞΈ[7]) β
q_1: β€1 β
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
optimal_parameters: {ParameterVectorEl[...]<jupyter_text>Manually perform rounding on the relaxed problem resultsNext, we proceed with rounding the results obtained from solving the relaxed problem. To achieve this, we call the `round()` method on an instance of the desired rounding scheme and pass it the `RoundingContext` object. Below, we provide an example for both rounding schemes, utilizing the relaxed solution obtained in the previous step.By manually performing the rounding step, we have more flexibility and control over the rounding scheme applied to the relaxed problem results. This allows for greater exploration and comparison of different rounding strategies.<jupyter_code># Round the relaxed solution using semi-deterministic rounding
semidterministic_rounding = SemideterministicRounding()
sdr_results = semidterministic_rounding.round(rounding_context)
qrao_results_sdr = qrao.process_result(
problem=problem, encoding=encoding, relaxed_result=relaxed_results, rounding_result=sdr_results
)
print(
f"The objective function value: {qrao_results_sdr.fval}\n"
f"x: {qrao_results_sdr.x}\n"
f"relaxed function value: {-1 * qrao_results_sdr.relaxed_fval}\n"
f"The number of distinct samples is {len(qrao_results_sdr.samples)}."
)
magic_rounding = MagicRounding(sampler=sampler)
mr_results = magic_rounding.round(rounding_context)
qrao_results_mr = qrao.process_result(
problem=problem, encoding=encoding, relaxed_result=relaxed_results, rounding_result=mr_results
)
print(
f"The objective function value: {qrao_results_mr.fval}\n"
f"x: {qrao_results_mr.x}\n"
f"relaxed function value: {-1 * qrao_results_mr.relaxed_fval}\n"
f"The number of distinct samples is {len(qrao_results_mr.samples)}."
)<jupyter_output>The objective function value: 9.0
x: [1 0 1 0 0 1]
relaxed function value: -8.999994593889271
The number of distinct samples is 56.<jupyter_text>Appendix How to verify correctness of your encodingWe assume for sake of the QRAO method that **the relaxation commutes with the objective function.** This notebook demonstrates how one can verify this for any problem (a `QuadraticProgram` in the language of Qiskit Optimization). One might want to verify this for pedagogical purposes, or as a sanity check when investigating unexpected behavior with the QRAO. Any problem that does not commute should be considered a bug, and if such a problem is discovered, we encourage that you submit it as [an issue on GitHub](https://github.com/qiskit-community/qiskit-optimization/issues).The `EncodingCommutationVerifier` class allows one to conveniently iterate over all decision variable states and compare each objective value with the corresponding encoded objective value, in order to identify any discrepancy.<jupyter_code>from qiskit_optimization.algorithms.qrao import EncodingCommutationVerifier
seed = 1
num_nodes = 6
graph = nx.random_regular_graph(d=3, n=num_nodes, seed=seed)
nx.draw(graph, with_labels=True, pos=nx.spring_layout(graph, seed=seed))
maxcut = Maxcut(graph)
problem = maxcut.to_quadratic_program()
print(problem.prettyprint())<jupyter_output>Problem name: Max-cut
Maximize
-2*x_0*x_1 - 2*x_0*x_3 - 2*x_0*x_4 - 2*x_1*x_2 - 2*x_1*x_5 - 2*x_2*x_3
- 2*x_2*x_4 - 2*x_3*x_5 - 2*x_4*x_5 + 3*x_0 + 3*x_1 + 3*x_2 + 3*x_3 + 3*x_4
+ 3*x_5
Subject to
No constraints
Binary variables (6)
x_0 x_1 x_2 x_3 x_4 x_5<jupyter_text>As before, we `encode()` the problem using the QuantumRandomAccessEncoding class:<jupyter_code>encoding = QuantumRandomAccessEncoding(max_vars_per_qubit=3)
encoding.encode(problem)
print("Encoded Problem:\n=================")
print(encoding.qubit_op) # The Hamiltonian without the offset
print("Offset = ", encoding.offset)
print("Variables encoded on each qubit: ", encoding.q2vars)<jupyter_output>Encoded Problem:
=================
SparsePauliOp(['XX', 'XY', 'XZ', 'YX', 'ZX', 'YY', 'YZ', 'ZY', 'ZZ'],
coeffs=[1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j, 1.5+0.j,
1.5+0.j])
Offset = -4.5
Variables encoded on each qubit: [[0, 2, 5], [1, 3, 4]]<jupyter_text>Finally, we iterate over every decision variable state using `EncodingCommutationVerifier` and verify that, in each case, the problem objective value matches the encoded objective value:<jupyter_code>import numpy as np
verifier = EncodingCommutationVerifier(encoding, estimator=Estimator())
if not len(verifier) == 2**encoding.num_vars:
print("The number results of the encoded problem is not equal to 2 ** num_vars.")
for str_dvars, obj_val, encoded_obj_val in verifier:
if not np.isclose(obj_val, encoded_obj_val):
print(
f"Violation identified: {str_dvars} evaluates to {obj_val} "
f"but the encoded problem evaluates to {encoded_obj_val}."
)<jupyter_output><empty_output><jupyter_text>If you are able to construct a problem that causes a violation, it is quite possible that you have discovered a bug in the `QuantumRandomAccessEncoding` logic. We would greatly appreciate it if you could share the problem with us by [submitting it as an issue](https://github.com/Qiskit/qiskit-optimization/issues) on GitHub.<jupyter_code>import tutorial_magics
%qiskit_version_table
%qiskit_copyright<jupyter_output><empty_output>
|
qiskit-optimization/docs/tutorials/12_quantum_random_access_optimizer.ipynb/0
|
{
"file_path": "qiskit-optimization/docs/tutorials/12_quantum_random_access_optimizer.ipynb",
"repo_id": "qiskit-optimization",
"token_count": 7485
}
| 152 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Implementation of the Goemans-Williamson algorithm as an optimizer.
Requires CVXPY to run.
"""
import logging
from typing import Optional, List, Tuple, Union, cast
import numpy as np
import qiskit_optimization.optionals as _optionals
from .optimization_algorithm import (
OptimizationResult,
OptimizationResultStatus,
OptimizationAlgorithm,
SolutionSample,
)
from ..converters.flip_problem_sense import MinimizeToMaximize
from ..problems.quadratic_program import QuadraticProgram
from ..problems.variable import Variable
logger = logging.getLogger(__name__)
class GoemansWilliamsonOptimizationResult(OptimizationResult):
"""
Contains results of the Goemans-Williamson algorithm. The properties ``x`` and ``fval`` contain
values of just one solution. Explore ``samples`` for all possible solutions.
"""
def __init__(
self,
x: Optional[Union[List[float], np.ndarray]],
fval: float,
variables: List[Variable],
status: OptimizationResultStatus,
samples: Optional[List[SolutionSample]],
sdp_solution: Optional[np.ndarray] = None,
) -> None:
"""
Args:
x: the optimal value found in the optimization.
fval: the optimal function value.
variables: the list of variables of the optimization problem.
status: the termination status of the optimization algorithm.
samples: the solution samples.
sdp_solution: an SDP solution of the problem.
"""
super().__init__(x, fval, variables, status, samples=samples)
self._sdp_solution = sdp_solution
@property
def sdp_solution(self) -> Optional[np.ndarray]:
"""
Returns:
Returns an SDP solution of the problem.
"""
return self._sdp_solution
@_optionals.HAS_CVXPY.require_in_instance
class GoemansWilliamsonOptimizer(OptimizationAlgorithm):
"""
Goemans-Williamson algorithm to approximate the max-cut of a problem.
The quadratic program for max-cut is given by:
max sum_{i,j<i} w[i,j]*x[i]*(1-x[j])
Therefore the quadratic term encodes the negative of the adjacency matrix of
the graph.
"""
def __init__(
self,
num_cuts: int,
sort_cuts: bool = True,
unique_cuts: bool = True,
seed: int = 0,
):
"""
Args:
num_cuts: Number of cuts to generate.
sort_cuts: True if sort cuts by their values.
unique_cuts: The solve method returns only unique cuts, thus there may be less cuts
than ``num_cuts``.
seed: A seed value for the random number generator.
"""
super().__init__()
self._num_cuts = num_cuts
self._sort_cuts = sort_cuts
self._unique_cuts = unique_cuts
np.random.seed(seed)
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
"""Checks whether a given problem can be solved with the optimizer implementing this method.
Args:
problem: The optimization problem to check compatibility.
Returns:
Returns the incompatibility message. If the message is empty no issues were found.
"""
message = ""
if problem.get_num_binary_vars() != problem.get_num_vars():
message = (
f"Only binary variables are supported, while the total number of variables "
f"{problem.get_num_vars()} and there are {problem.get_num_binary_vars()} "
f"binary variables across them"
)
return message
def solve(self, problem: QuadraticProgram) -> OptimizationResult:
"""
Returns a list of cuts generated according to the Goemans-Williamson algorithm.
Args:
problem: The quadratic problem that encodes the max-cut problem.
Returns:
cuts: A list of generated cuts.
"""
# pylint: disable=import-error
from cvxpy import DCPError, DGPError, SolverError
self._verify_compatibility(problem)
min2max = MinimizeToMaximize()
problem = min2max.convert(problem)
adj_matrix = self._extract_adjacency_matrix(problem)
try:
chi = self._solve_max_cut_sdp(adj_matrix)
except (DCPError, DGPError, SolverError):
logger.error("Can't solve SDP problem")
return GoemansWilliamsonOptimizationResult(
x=[],
fval=0,
variables=problem.variables,
status=OptimizationResultStatus.FAILURE,
samples=[],
)
cuts = self._generate_random_cuts(chi, len(adj_matrix))
numeric_solutions = [
(cuts[i, :], self.max_cut_value(cuts[i, :], adj_matrix)) for i in range(self._num_cuts)
]
if self._sort_cuts:
numeric_solutions.sort(key=lambda x: -x[1])
if self._unique_cuts:
numeric_solutions = self._get_unique_cuts(numeric_solutions)
numeric_solutions = numeric_solutions[: self._num_cuts]
samples = [
SolutionSample(
x=solution[0],
fval=solution[1],
probability=1.0 / len(numeric_solutions),
status=OptimizationResultStatus.SUCCESS,
)
for solution in numeric_solutions
]
return cast(
GoemansWilliamsonOptimizationResult,
self._interpret(
x=samples[0].x,
problem=problem,
converters=[min2max],
result_class=GoemansWilliamsonOptimizationResult,
samples=samples,
),
)
def _get_unique_cuts(
self, solutions: List[Tuple[np.ndarray, float]]
) -> List[Tuple[np.ndarray, float]]:
"""
Returns:
Unique Goemans-Williamson cuts.
"""
# Remove symmetry in the cuts to chose the unique ones.
# Cuts 010 and 101 are symmetric(same cut), so we convert all cuts
# starting from 1 to start from 0. In the next loop repetitive cuts will be removed.
for idx, cut in enumerate(solutions):
if cut[0][0] == 1:
solutions[idx] = (
np.array([0 if _ == 1 else 1 for _ in cut[0]]),
cut[1],
)
seen_cuts = set()
unique_cuts = []
for cut in solutions:
cut_str = "".join([str(_) for _ in cut[0]])
if cut_str in seen_cuts:
continue
seen_cuts.add(cut_str)
unique_cuts.append(cut)
return unique_cuts
@staticmethod
def _extract_adjacency_matrix(problem: QuadraticProgram) -> np.ndarray:
"""
Extracts the adjacency matrix from the given quadratic program.
Args:
problem: A QuadraticProgram describing the max-cut optimization problem.
Returns:
adjacency matrix of the graph.
"""
adj_matrix = -problem.objective.quadratic.coefficients.toarray()
adj_matrix = (adj_matrix + adj_matrix.T) / 2
return adj_matrix
def _solve_max_cut_sdp(self, adj_matrix: np.ndarray) -> np.ndarray:
"""
Calculates the maximum weight cut by generating |V| vectors with a vector program,
then generating a random plane that cuts the vertices. This is the Goemans-Williamson
algorithm that gives a .878-approximation.
Returns:
chi: a list of length |V| where the i-th element is +1 or -1, representing which
set the it-h vertex is in. Returns None if an error occurs.
"""
# pylint: disable=import-error
import cvxpy as cvx
num_vertices = len(adj_matrix)
constraints, expr = [], 0
# variables
x = cvx.Variable((num_vertices, num_vertices), PSD=True)
# constraints
for i in range(num_vertices):
constraints.append(x[i, i] == 1)
# objective function
expr = cvx.sum(cvx.multiply(adj_matrix, (np.ones((num_vertices, num_vertices)) - x)))
# solve
problem = cvx.Problem(cvx.Maximize(expr), constraints)
problem.solve()
return x.value
def _generate_random_cuts(self, chi: np.ndarray, num_vertices: int) -> np.ndarray:
"""
Random hyperplane partitions vertices.
Args:
chi: a list of length |V| where the i-th element is +1 or -1, representing
which set the i-th vertex is in.
num_vertices: the number of vertices in the graph
Returns:
An array of random cuts.
"""
eigenvalues = np.linalg.eigh(chi)[0]
if min(eigenvalues) < 0:
chi = chi + (1.001 * abs(min(eigenvalues)) * np.identity(num_vertices))
elif min(eigenvalues) == 0:
chi = chi + 0.00001 * np.identity(num_vertices)
x = np.linalg.cholesky(chi).T
r = np.random.normal(size=(self._num_cuts, num_vertices))
return (np.dot(r, x) > 0) + 0
@staticmethod
def max_cut_value(x: np.ndarray, adj_matrix: np.ndarray):
"""Compute the value of a cut from an adjacency matrix and a list of binary values.
Args:
x: a list of binary value in numpy array.
adj_matrix: adjacency matrix.
Returns:
float: value of the cut.
"""
cut_matrix = np.outer(x, (1 - x))
return np.sum(adj_matrix * cut_matrix)
|
qiskit-optimization/qiskit_optimization/algorithms/goemans_williamson_optimizer.py/0
|
{
"file_path": "qiskit-optimization/qiskit_optimization/algorithms/goemans_williamson_optimizer.py",
"repo_id": "qiskit-optimization",
"token_count": 4446
}
| 153 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Implementation of the warm start QAOA optimizer."""
import copy
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Tuple, Union, cast
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit_algorithms import QAOA
from ..converters.quadratic_program_converter import QuadraticProgramConverter
from ..exceptions import QiskitOptimizationError
from ..problems.quadratic_program import QuadraticProgram
from ..problems.variable import VarType
from .minimum_eigen_optimizer import MinimumEigenOptimizationResult, MinimumEigenOptimizer
from .optimization_algorithm import OptimizationAlgorithm, OptimizationResultStatus, SolutionSample
class BaseAggregator(ABC):
"""A base abstract class for aggregates results"""
@abstractmethod
def aggregate(self, results: List[MinimumEigenOptimizationResult]) -> List[SolutionSample]:
"""
Aggregates the results.
Args:
results: List of result objects that need to be combined.
Returns:
Aggregated samples.
"""
raise NotImplementedError
class MeanAggregator(BaseAggregator):
"""Aggregates the results by averaging the probability of each sample."""
def aggregate(self, results: List[MinimumEigenOptimizationResult]) -> List[SolutionSample]:
"""
Args:
results: List of result objects that need to be combined.
Returns:
Aggregated samples by averaging them.
"""
# Use a dict for fast solution look-up
# Key: sample code, value: tuple of fval, probability
dict_samples: Dict[str, Tuple[float, float]] = {}
def _to_string(x: np.ndarray) -> str:
return "".join(str(int(v)) for v in x)
def _from_string(string) -> np.ndarray:
return np.array([float(c) for c in string])
# Sum up all the probabilities in the results
for result in results:
for sample in result.samples:
# state, fval, prob = sample[0], sample[1], sample[2]
state, fval, prob = (
_to_string(sample.x),
sample.fval,
sample.probability,
)
if state in dict_samples:
dict_samples[state] = (fval, dict_samples[state][1] + prob)
else:
dict_samples[state] = (fval, prob)
# Divide by the number of results to normalize
aggregated_samples = []
num_results = len(results)
for state, val_prob in dict_samples.items():
# sample = (state, dict_samples[state][0], dict_samples[state][1] / num_results)
sample = SolutionSample(
x=_from_string(state),
fval=val_prob[0],
probability=val_prob[1] / num_results,
status=OptimizationResultStatus.SUCCESS,
)
aggregated_samples.append(sample)
return aggregated_samples
class WarmStartQAOAFactory:
"""
A factory that produces quantum circuits for the QAOA implementation. The methods of this
factory can be overridden to modify behavior of QAOA. This implementation generates quantum
circuits for initial state and mixer to warm start QAOA.
"""
def __init__(self, epsilon: float) -> None:
"""
Args:
epsilon: the regularization parameter that changes the initial variables according to
xi = epsilon if xi < epsilon
xi = 1-epsilon if xi > epsilon.
The regularization parameter epsilon should be between 0 and 0.5. When it
is 0.5 then warm start corresponds to standard QAOA.
Raises:
QiskitOptimizationError: if ``epsilon`` is not in the range [0, 0.5].
"""
if epsilon < 0.0 or epsilon > 0.5:
raise QiskitOptimizationError(
f"Epsilon for warm-start QAOA needs to be between 0 and 0.5, "
f"actual value: {epsilon}"
)
self._epsilon = epsilon
def create_initial_variables(self, solution: np.ndarray) -> List[float]:
"""
Creates initial variable values to warm start QAOA.
Args:
solution: a solution obtained for the relaxed problem.
Returns:
A list of initial variables constructed from a relaxed solution.
"""
initial_variables = []
for variable in solution:
if variable < self._epsilon:
initial_variables.append(self._epsilon)
elif variable > 1.0 - self._epsilon:
initial_variables.append(1.0 - self._epsilon)
else:
initial_variables.append(variable)
return initial_variables
def create_initial_state(self, initial_variables: List[float]) -> QuantumCircuit:
"""
Creates an initial state quantum circuit to warm start QAOA.
Args:
initial_variables: Already created initial variables.
Returns:
A quantum circuit that represents initial state.
"""
circuit = QuantumCircuit(len(initial_variables))
for index, relaxed_value in enumerate(initial_variables):
theta = 2 * np.arcsin(np.sqrt(relaxed_value))
circuit.ry(theta, index)
return circuit
def create_mixer(self, initial_variables: List[float]) -> QuantumCircuit:
"""
Creates an evolved mixer circuit as Ry(theta)Rz(-2beta)Ry(-theta).
Args:
initial_variables: Already created initial variables.
Returns:
A quantum circuit to be used as a mixer in QAOA.
"""
circuit = QuantumCircuit(len(initial_variables))
beta = Parameter("beta")
for index, relaxed_value in enumerate(initial_variables):
theta = 2 * np.arcsin(np.sqrt(relaxed_value))
circuit.ry(-theta, index)
circuit.rz(-2.0 * beta, index)
circuit.ry(theta, index)
return circuit
class WarmStartQAOAOptimizer(MinimumEigenOptimizer):
"""A meta-algorithm that uses a pre-solver to solve a relaxed version of the problem.
Users must implement their own pre solvers by inheriting from the base class.
References:
[1]: Daniel J. Egger et al., Warm-starting quantum optimization.
`arXiv:2009.10095 <https://arxiv.org/abs/2009.10095>`_
"""
def __init__(
self,
pre_solver: OptimizationAlgorithm,
relax_for_pre_solver: bool,
qaoa: QAOA,
epsilon: float = 0.25,
num_initial_solutions: int = 1,
warm_start_factory: Optional[WarmStartQAOAFactory] = None,
aggregator: Optional[BaseAggregator] = None,
penalty: Optional[float] = None,
converters: Optional[
Union[QuadraticProgramConverter, List[QuadraticProgramConverter]]
] = None,
) -> None:
"""Initializes the optimizer. For correct initialization either
``epsilon`` or ``circuit_factory`` must be passed. If only ``epsilon`` is specified
(either an explicit value or the default one), then an instance of
:class:`~qiskit.optimization.algorithms.WarmStartQAOACircuitFactory` is created.
If ``circuit_factory`` is specified then this instance is used in the implementation
and ``epsilon`` value is ignored.
Args:
pre_solver: An instance of an optimizer to solve the relaxed version of the problem.
relax_for_pre_solver: True if the problem must be relaxed to the continuous case
before passing it to the pre-solver.
qaoa: A QAOA instance to be used in the computations.
epsilon: the regularization parameter that changes the initial variables according to
xi = epsilon if xi < epsilon
xi = 1-epsilon if xi > epsilon.
The regularization parameter epsilon should be between 0 and 0.5. When it
is 0.5 then warm start corresponds to standard QAOA. If ``circuit_factory`` is
specified then this parameter is ignored. Default value is 0.25.
num_initial_solutions: An optional number of relaxed (continuous) solutions to use.
Default value is 1.
warm_start_factory: An optional instance of the factory to warm start QAOA.
This factory is used to create circuits for initial state and mixer.
If ``None`` is specified then a default one,
an instance of :class:`~qiskit.optimization.algorithms.WarmStartQAOACircuitFactory`
is created using the value of the ``epsilon`` parameter.
aggregator: Class that aggregates different results. This is used if the pre-solver
returns several initial states.
penalty: The penalty factor to be used, or ``None`` for applying a default logic.
converters: The converters to use for converting a problem into a different form.
By default, when None is specified, an internally created instance of
:class:`~qiskit.optimization.converters.QuadraticProgramToQubo` will be used.
Raises:
QiskitOptimizationError: if ``epsilon`` is not in the range [0, 0.5].
"""
if epsilon < 0.0 or epsilon > 0.5:
raise QiskitOptimizationError(
f"Epsilon for warm-start QAOA needs to be between 0 and 0.5, "
f"actual value: {epsilon}"
)
self._pre_solver = pre_solver
self._relax_for_pre_solver = relax_for_pre_solver
self._qaoa = qaoa
self._epsilon = epsilon
self._num_initial_solutions = num_initial_solutions
if warm_start_factory is None:
warm_start_factory = WarmStartQAOAFactory(epsilon)
self._warm_start_factory = warm_start_factory
if num_initial_solutions > 1 and aggregator is None:
aggregator = MeanAggregator()
self._aggregator = aggregator
super().__init__(qaoa, penalty, converters)
def solve(self, problem: QuadraticProgram) -> MinimumEigenOptimizationResult:
"""Tries to solves the given problem using the optimizer.
The pre-solver is run to warm-start the solver. Next, the optimizer is run
to try to solve the optimization problem.
Args:
problem: The problem to be solved.
Returns:
The result of the optimizer applied to the problem.
Raises:
QiskitOptimizationError: If problem not compatible or the presolver can't solve
a problem.
"""
message = self.get_compatibility_msg(problem)
if len(message) > 0:
raise QiskitOptimizationError(f"Incompatible problem: {message}")
# convert problem to minimization QUBO or another form if converters are specified
converted_problem = self._convert(problem, self._converters)
# if the pre-solver can't solve the problem then it should be relaxed.
if self._relax_for_pre_solver:
pre_solver_problem = self._relax_problem(converted_problem)
else:
pre_solver_problem = converted_problem
opt_result = self._pre_solver.solve(pre_solver_problem)
if opt_result.status != OptimizationResultStatus.SUCCESS:
raise QiskitOptimizationError(
f"Presolver returned status {opt_result.status}, " f"the problem can't be solved"
)
# we pick only a certain number of the pre-solved solutions.
num_pre_solutions = min(self._num_initial_solutions, len(opt_result.samples))
pre_solutions = opt_result.samples[:num_pre_solutions]
# construct operator and offset
operator, offset = converted_problem.to_ising()
results: List[MinimumEigenOptimizationResult] = []
for pre_solution in pre_solutions:
# Set the solver using the result of the pre-solver.
initial_variables = self._warm_start_factory.create_initial_variables(pre_solution.x)
self._qaoa.initial_state = self._warm_start_factory.create_initial_state(
initial_variables
)
self._qaoa.mixer = self._warm_start_factory.create_mixer(initial_variables)
# approximate ground state of operator using min eigen solver.
results.append(self._solve_internal(operator, offset, converted_problem, problem))
if len(results) == 1:
# there's no need to call _interpret, it is already done by MinimumEigenOptimizer
return results[0]
else:
samples = self._aggregator.aggregate(results)
samples.sort(key=lambda sample: problem.objective.sense.value * sample.fval)
# translate result back to the original variables
return cast(
MinimumEigenOptimizationResult,
self._interpret(
x=samples[0].x,
problem=problem,
converters=self._converters,
result_class=MinimumEigenOptimizationResult,
samples=samples,
),
)
@staticmethod
def _relax_problem(problem: QuadraticProgram) -> QuadraticProgram:
"""
Change all variables to continuous.
Args:
problem: Problem to relax.
Returns:
A copy of the original problem where all variables are continuous.
"""
relaxed_problem = copy.deepcopy(problem)
for variable in relaxed_problem.variables:
variable.vartype = VarType.CONTINUOUS
return relaxed_problem
|
qiskit-optimization/qiskit_optimization/algorithms/warm_start_qaoa_optimizer.py/0
|
{
"file_path": "qiskit-optimization/qiskit_optimization/algorithms/warm_start_qaoa_optimizer.py",
"repo_id": "qiskit-optimization",
"token_count": 6042
}
| 154 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2018, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""An application class for the vertex cover."""
from typing import Dict, List, Optional, Union
import networkx as nx
import numpy as np
from docplex.mp.model import Model
from qiskit_optimization.algorithms import OptimizationResult
from qiskit_optimization.problems.quadratic_program import QuadraticProgram
from qiskit_optimization.translators import from_docplex_mp
from .graph_optimization_application import GraphOptimizationApplication
class VertexCover(GraphOptimizationApplication):
"""Optimization application for the "vertex cover" [1] problem based on a NetworkX graph.
References:
[1]: "Vertex cover", https://en.wikipedia.org/wiki/Vertex_cover
"""
def to_quadratic_program(self) -> QuadraticProgram:
"""Convert a vertex cover instance into a
:class:`~qiskit_optimization.problems.QuadraticProgram`
Returns:
The :class:`~qiskit_optimization.problems.QuadraticProgram` created
from the vertex cover instance.
"""
mdl = Model(name="Vertex cover")
n = self._graph.number_of_nodes()
x = {i: mdl.binary_var(name=f"x_{i}") for i in range(n)}
objective = mdl.sum(x[i] for i in x)
for w, v in self._graph.edges:
mdl.add_constraint(x[w] + x[v] >= 1)
mdl.minimize(objective)
op = from_docplex_mp(mdl)
return op
def interpret(self, result: Union[OptimizationResult, np.ndarray]) -> List[int]:
"""Interpret a result as a list of node indices
Args:
result : The calculated result of the problem
Returns:
A list of node indices whose corresponding variable is 1
"""
x = self._result_to_x(result)
vertex_cover = []
for i, value in enumerate(x):
if value:
vertex_cover.append(i)
return vertex_cover
def _draw_result(
self,
result: Union[OptimizationResult, np.ndarray],
pos: Optional[Dict[int, np.ndarray]] = None,
) -> None:
"""Draw the result with colors
Args:
result : The calculated result for the problem
pos: The positions of nodes
"""
x = self._result_to_x(result)
nx.draw(self._graph, node_color=self._node_colors(x), pos=pos, with_labels=True)
def _node_colors(self, x: np.ndarray) -> List[str]:
# Return a list of strings for draw.
# Color a node with red when the corresponding variable is 1.
# Otherwise color it with dark gray.
return ["r" if x[node] else "darkgrey" for node in self._graph.nodes]
|
qiskit-optimization/qiskit_optimization/applications/vertex_cover.py/0
|
{
"file_path": "qiskit-optimization/qiskit_optimization/applications/vertex_cover.py",
"repo_id": "qiskit-optimization",
"token_count": 1213
}
| 155 |
---
upgrade:
- |
Simplifies :class:`qiskit_optimization.algorithms.CplexOptimizer` by
calling CPLEX from ``docplex.mp.model.Model.solve`` directly.
Also adds a fallback code if no solution is found by CPLEX.
- |
Adds ``cplex_parameters`` as a dictionary to
:class:`qiskit_optimization.algorithms.CplexOptimizer`
so that users can set
`CPLEX parameters <https://www.ibm.com/docs/en/icos/20.1.0?topic=cplex-parameters>`_
such as time limit and number of threads.
|
qiskit-optimization/releasenotes/notes/0.2/add-cplex-params-2b7bd7bde01400c7.yaml/0
|
{
"file_path": "qiskit-optimization/releasenotes/notes/0.2/add-cplex-params-2b7bd7bde01400c7.yaml",
"repo_id": "qiskit-optimization",
"token_count": 191
}
| 156 |
---
features:
- |
:func:`qiskit_optimization.translators.from_docplex_mp` supports logical
expressions of Docplex, i.e.,
`logical_and <https://ibmdecisionoptimization.github.io/docplex-doc/mp/docplex.mp.model.html#docplex.mp.model.Model.logical_and>`_,
`logical_or <https://ibmdecisionoptimization.github.io/docplex-doc/mp/docplex.mp.model.html#docplex.mp.model.Model.logical_or>`_,
and `logical_not <https://ibmdecisionoptimization.github.io/docplex-doc/mp/docplex.mp.model.html#docplex.mp.model.Model.logical_not>`_.
For example:
.. code-block:: python
from docplex.mp.model import Model
from qiskit_optimization.translators import from_docplex_mp
mod = Model()
x = mod.binary_var('x')
y = mod.binary_var('y')
mod.add_constraint(mod.logical_and(x, y) <= 1)
qp = from_docplex_mp(mod)
fixes:
- |
If an indicator constraint of a Docplex model does not have a name,
:func:`qiskit_optimization.translators.from_docplex_mp` adds a name ``ind{number}``
for sense ``<=`` and ``>=`` or names ``ind{number}_LE`` and ``ind{number}_GE``
for sense ``==``.
- |
If an indicator constraint of a Docplex model includes ``binary_var``
as part of ``linear_ct``, :func:`qiskit_optimization.translators.from_docplex_mp`
handles the coefficient properly.
- |
If a trivial constraint is included in a Docplex model,
:func:`qiskit_optimization.translators.from_docplex_mp` raises an ``UserWarning``
and converts it into a constraint of :class:`qiskit_optimization.problems.QuadraticProgram` as is.
- |
If a trivial constraint is included in :class:`qiskit_optimization.problems.QuadraticProgram`,
:func:`qiskit_optimization.translators.to_docplex_mp` converts it
into a constraint of Docplex without any error.
|
qiskit-optimization/releasenotes/notes/0.3/refactor-from_docplex_mp-d2f637d000429541.yaml/0
|
{
"file_path": "qiskit-optimization/releasenotes/notes/0.3/refactor-from_docplex_mp-d2f637d000429541.yaml",
"repo_id": "qiskit-optimization",
"token_count": 710
}
| 157 |
---
upgrade:
- |
The previously deprecated ``VQEProgram`` and ``QAOAProgram`` classes have been removed.
They were originally deprecated in the Qiskit Optimization 0.3.0 release.
|
qiskit-optimization/releasenotes/notes/0.5/remove-vqe-qaoa-programs-152a997734296fe2.yaml/0
|
{
"file_path": "qiskit-optimization/releasenotes/notes/0.5/remove-vqe-qaoa-programs-152a997734296fe2.yaml",
"repo_id": "qiskit-optimization",
"token_count": 57
}
| 158 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2018, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Gurobi Optimizer """
import unittest
from test.optimization_test_case import QiskitOptimizationTestCase
from ddt import data, ddt
import numpy as np
import qiskit_optimization.optionals as _optionals
from qiskit_optimization.algorithms import GurobiOptimizer
from qiskit_optimization.problems import QuadraticProgram
@ddt
class TestGurobiOptimizer(QiskitOptimizationTestCase):
"""Gurobi Optimizer Tests."""
@data(
("op_ip1.lp", [0, 2], 6),
("op_mip1.lp", [0, 1, 1], 5.5),
("op_lp1.lp", [0.25, 1.75], 5.8750),
)
@unittest.skipIf(not _optionals.HAS_GUROBIPY, "Gurobi not available.")
@unittest.skipIf(not _optionals.HAS_CPLEX, "CPLEX not available.")
def test_gurobi_optimizer(self, config):
"""Gurobi Optimizer Test"""
# unpack configuration
gurobi_optimizer = GurobiOptimizer(disp=False)
filename, x, fval = config
# load optimization problem
problem = QuadraticProgram()
lp_file = self.get_resource_path(filename, "algorithms/resources")
problem.read_from_lp_file(lp_file)
# solve problem with gurobi
result = gurobi_optimizer.solve(problem)
# analyze results
self.assertAlmostEqual(result.fval, fval)
np.testing.assert_array_almost_equal(result.x, x)
if __name__ == "__main__":
unittest.main()
|
qiskit-optimization/test/algorithms/test_gurobi_optimizer.py/0
|
{
"file_path": "qiskit-optimization/test/algorithms/test_gurobi_optimizer.py",
"repo_id": "qiskit-optimization",
"token_count": 716
}
| 159 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test OptimizationApplication class"""
import unittest
from test.optimization_test_case import QiskitOptimizationTestCase
import numpy as np
from ddt import data, ddt
from qiskit.result import QuasiDistribution
from qiskit_optimization.applications import OptimizationApplication
@ddt
class TestOptimizationApplication(QiskitOptimizationTestCase):
"""Test OptimizationApplication class"""
@data(
np.array([0, 0, 1, 0]),
{"10": 0.8, "01": 0.2},
QuasiDistribution({"10": 0.8, "01": 0.2}),
)
def test_sample_most_likely(self, state_vector):
"""Test sample_most_likely"""
result = OptimizationApplication.sample_most_likely(state_vector)
np.testing.assert_allclose(result, [0, 1])
if __name__ == "__main__":
unittest.main()
|
qiskit-optimization/test/applications/test_optimization_application.py/0
|
{
"file_path": "qiskit-optimization/test/applications/test_optimization_application.py",
"repo_id": "qiskit-optimization",
"token_count": 432
}
| 160 |
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2020, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test LinearExpression """
import unittest
from test.optimization_test_case import QiskitOptimizationTestCase
import numpy as np
from scipy.sparse import dok_matrix
from qiskit_optimization import INFINITY, QiskitOptimizationError, QuadraticProgram
from qiskit_optimization.problems import LinearExpression
class TestLinearExpression(QiskitOptimizationTestCase):
"""Test LinearExpression."""
def test_init(self):
"""test init."""
quadratic_program = QuadraticProgram()
for _ in range(5):
quadratic_program.continuous_var()
coefficients_list = list(range(5))
coefficients_array = np.array(coefficients_list)
coefficients_dok = dok_matrix([coefficients_list])
coefficients_dict_int = {i: i for i in range(1, 5)}
coefficients_dict_str = {f"x{i}": i for i in range(1, 5)}
for coeffs in [
coefficients_list,
coefficients_array,
coefficients_dok,
coefficients_dict_int,
coefficients_dict_str,
]:
linear = LinearExpression(quadratic_program, coeffs)
self.assertEqual((linear.coefficients != coefficients_dok).nnz, 0)
self.assertTrue((linear.to_array() == coefficients_list).all())
self.assertDictEqual(linear.to_dict(use_name=False), coefficients_dict_int)
self.assertDictEqual(linear.to_dict(use_name=True), coefficients_dict_str)
def test_get_item(self):
"""test get_item."""
quadratic_program = QuadraticProgram()
for _ in range(5):
quadratic_program.continuous_var()
coefficients = list(range(5))
linear = LinearExpression(quadratic_program, coefficients)
for i, v in enumerate(coefficients):
self.assertEqual(linear[i], v)
def test_setters(self):
"""test setters."""
quadratic_program = QuadraticProgram()
for _ in range(5):
quadratic_program.continuous_var()
zeros = np.zeros(quadratic_program.get_num_vars())
linear = LinearExpression(quadratic_program, zeros)
coefficients_list = list(range(5))
coefficients_array = np.array(coefficients_list)
coefficients_dok = dok_matrix([coefficients_list])
coefficients_dict_int = {i: i for i in range(1, 5)}
coefficients_dict_str = {f"x{i}": i for i in range(1, 5)}
for coeffs in [
coefficients_list,
coefficients_array,
coefficients_dok,
coefficients_dict_int,
coefficients_dict_str,
]:
linear.coefficients = coeffs
self.assertEqual((linear.coefficients != coefficients_dok).nnz, 0)
self.assertTrue((linear.to_array() == coefficients_list).all())
self.assertDictEqual(linear.to_dict(use_name=False), coefficients_dict_int)
self.assertDictEqual(linear.to_dict(use_name=True), coefficients_dict_str)
def test_evaluate(self):
"""test evaluate."""
quadratic_program = QuadraticProgram()
x = [quadratic_program.continuous_var() for _ in range(5)]
coefficients_list = list(range(5))
linear = LinearExpression(quadratic_program, coefficients_list)
values_list = list(range(len(x)))
values_array = np.array(values_list)
values_dict_int = {i: i for i in range(len(x))}
values_dict_str = {f"x{i}": i for i in range(len(x))}
for values in [values_list, values_array, values_dict_int, values_dict_str]:
self.assertEqual(linear.evaluate(values), 30)
def test_evaluate_gradient(self):
"""test evaluate gradient."""
quadratic_program = QuadraticProgram()
x = [quadratic_program.continuous_var() for _ in range(5)]
coefficients_list = list(range(5))
linear = LinearExpression(quadratic_program, coefficients_list)
values_list = list(range(len(x)))
values_array = np.array(values_list)
values_dict_int = {i: i for i in range(len(x))}
values_dict_str = {f"x{i}": i for i in range(len(x))}
for values in [values_list, values_array, values_dict_int, values_dict_str]:
np.testing.assert_almost_equal(linear.evaluate_gradient(values), coefficients_list)
def test_bounds(self):
"""test lowerbound and upperbound"""
with self.subTest("bounded"):
quadratic_program = QuadraticProgram()
quadratic_program.continuous_var_list(3, lowerbound=-1, upperbound=2)
coefficients_list = list(range(3))
bounds = LinearExpression(quadratic_program, coefficients_list).bounds
self.assertAlmostEqual(bounds.lowerbound, -3)
self.assertAlmostEqual(bounds.upperbound, 6)
with self.subTest("bounded2"):
quadratic_program = QuadraticProgram()
quadratic_program.integer_var(lowerbound=-2, upperbound=-1, name="x")
quadratic_program.integer_var(lowerbound=2, upperbound=4, name="y")
bounds = LinearExpression(quadratic_program, {"x": 1, "y": 10}).bounds
self.assertAlmostEqual(bounds.lowerbound, 18)
self.assertAlmostEqual(bounds.upperbound, 39)
bounds = LinearExpression(quadratic_program, {"x": -1, "y": 10}).bounds
self.assertAlmostEqual(bounds.lowerbound, 21)
self.assertAlmostEqual(bounds.upperbound, 42)
bounds = LinearExpression(quadratic_program, {"x": 1, "y": -10}).bounds
self.assertAlmostEqual(bounds.lowerbound, -42)
self.assertAlmostEqual(bounds.upperbound, -21)
bounds = LinearExpression(quadratic_program, {"x": -1, "y": -10}).bounds
self.assertAlmostEqual(bounds.lowerbound, -39)
self.assertAlmostEqual(bounds.upperbound, -18)
bounds = LinearExpression(quadratic_program, {"x": 0, "y": 0}).bounds
self.assertAlmostEqual(bounds.lowerbound, 0)
self.assertAlmostEqual(bounds.upperbound, 0)
with self.assertRaises(QiskitOptimizationError):
quadratic_program = QuadraticProgram()
quadratic_program.continuous_var_list(3, lowerbound=0, upperbound=INFINITY)
coefficients_list = list(range(3))
_ = LinearExpression(quadratic_program, coefficients_list).bounds
with self.assertRaises(QiskitOptimizationError):
quadratic_program = QuadraticProgram()
quadratic_program.continuous_var_list(3, lowerbound=-INFINITY, upperbound=0)
coefficients_list = list(range(3))
_ = LinearExpression(quadratic_program, coefficients_list).bounds
def test_str_repr(self):
"""Test str and repr"""
with self.subTest("5 variables"):
n = 5
quadratic_program = QuadraticProgram()
quadratic_program.binary_var_list(n) # x0,...,x4
expr = LinearExpression(quadratic_program, [float(e) for e in range(n)])
self.assertEqual(str(expr), "x1 + 2*x2 + 3*x3 + 4*x4")
self.assertEqual(repr(expr), "<LinearExpression: x1 + 2*x2 + 3*x3 + 4*x4>")
with self.subTest("50 variables"):
# pylint: disable=cyclic-import
from qiskit_optimization.translators.prettyprint import DEFAULT_TRUNCATE
n = 50
quadratic_program = QuadraticProgram()
quadratic_program.binary_var_list(n) # x0,...,x49
expr = LinearExpression(quadratic_program, [float(e) for e in range(n)])
expected = " ".join(
["x1"] + sorted([f"+ {i}*x{i}" for i in range(2, n)], key=lambda e: e.split(" ")[1])
)
self.assertEqual(str(expr), expected)
self.assertEqual(repr(expr), f"<LinearExpression: {expected[:DEFAULT_TRUNCATE]}...>")
if __name__ == "__main__":
unittest.main()
|
qiskit-optimization/test/problems/test_linear_expression.py/0
|
{
"file_path": "qiskit-optimization/test/problems/test_linear_expression.py",
"repo_id": "qiskit-optimization",
"token_count": 3656
}
| 161 |
[workspace]
members = ["crates/*"]
resolver = "2"
[workspace.package]
version = "1.3.0"
edition = "2021"
rust-version = "1.70" # Keep in sync with README.md and rust-toolchain.toml.
license = "Apache-2.0"
# Shared dependencies that can be inherited. This just helps a little with
# making sure our crates don't directly depend on different versions of things,
# although we can't help it if our transitive dependencies pull in more.
#
# Each crate can add on specific features freely as it inherits.
[workspace.dependencies]
bytemuck = "1.17"
indexmap.version = "2.4.0"
hashbrown.version = "0.14.5"
num-bigint = "0.4"
num-complex = "0.4"
ndarray = "^0.15.6"
numpy = "0.21.0"
smallvec = "1.13"
thiserror = "1.0"
rustworkx-core = "0.15"
approx = "0.5"
itertools = "0.13.0"
ahash = "0.8.11"
rayon = "1.10"
# Most of the crates don't need the feature `extension-module`, since only `qiskit-pyext` builds an
# actual C extension (the feature disables linking in `libpython`, which is forbidden in Python
# distributions). We only activate that feature when building the C extension module; we still need
# it disabled for Rust-only tests to avoid linker errors with it not being loaded. See
# https://pyo3.rs/main/features#extension-module for more.
pyo3 = { version = "0.21.2", features = ["abi3-py38"] }
# These are our own crates.
qiskit-accelerate = { path = "crates/accelerate" }
qiskit-circuit = { path = "crates/circuit" }
qiskit-qasm2 = { path = "crates/qasm2" }
qiskit-qasm3 = { path = "crates/qasm3" }
[profile.release]
lto = 'fat'
codegen-units = 1
|
qiskit/Cargo.toml/0
|
{
"file_path": "qiskit/Cargo.toml",
"repo_id": "qiskit",
"token_count": 562
}
| 162 |
// This code is part of Qiskit.
//
// (C) Copyright IBM 2022
//
// This code is licensed under the Apache License, Version 2.0. You may
// obtain a copy of this license in the LICENSE.txt file in the root directory
// of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
//
// Any modifications or derivative works of this code must retain this
// copyright notice, and modified files need to carry a notice indicating
// that they have been altered from the originals.
use pyo3::intern;
use pyo3::prelude::*;
use pyo3::types::PyDict;
use pyo3::wrap_pyfunction;
use pyo3::Python;
use num_complex::Complex64;
use numpy::ndarray::linalg::kron;
use numpy::ndarray::{aview2, Array2, ArrayView2};
use numpy::{IntoPyArray, PyArray2, PyReadonlyArray2};
use smallvec::SmallVec;
use qiskit_circuit::bit_data::BitData;
use qiskit_circuit::circuit_instruction::CircuitInstruction;
use qiskit_circuit::dag_node::DAGOpNode;
use qiskit_circuit::gate_matrix::ONE_QUBIT_IDENTITY;
use qiskit_circuit::imports::QI_OPERATOR;
use qiskit_circuit::operations::Operation;
use crate::QiskitError;
fn get_matrix_from_inst<'py>(
py: Python<'py>,
inst: &'py CircuitInstruction,
) -> PyResult<Array2<Complex64>> {
if let Some(mat) = inst.operation.matrix(&inst.params) {
Ok(mat)
} else if inst.operation.try_standard_gate().is_some() {
Err(QiskitError::new_err(
"Parameterized gates can't be consolidated",
))
} else {
Ok(QI_OPERATOR
.get_bound(py)
.call1((inst.get_operation(py)?,))?
.getattr(intern!(py, "data"))?
.extract::<PyReadonlyArray2<Complex64>>()?
.as_array()
.to_owned())
}
}
/// Return the matrix Operator resulting from a block of Instructions.
#[pyfunction]
#[pyo3(text_signature = "(op_list, /")]
pub fn blocks_to_matrix(
py: Python,
op_list: Vec<PyRef<DAGOpNode>>,
block_index_map_dict: &Bound<PyDict>,
) -> PyResult<Py<PyArray2<Complex64>>> {
// Build a BitData in block_index_map_dict order. block_index_map_dict is a dict of bits to
// indices mapping the order of the qargs in the block. There should only be 2 entries since
// there are only 2 qargs here (e.g. `{Qubit(): 0, Qubit(): 1}`) so we need to ensure that
// we added the qubits to bit data in the correct index order.
let mut index_map: Vec<PyObject> = (0..block_index_map_dict.len()).map(|_| py.None()).collect();
for bit_tuple in block_index_map_dict.items() {
let (bit, index): (PyObject, usize) = bit_tuple.extract()?;
index_map[index] = bit;
}
let mut bit_map: BitData<u32> = BitData::new(py, "qargs".to_string());
for bit in index_map {
bit_map.add(py, bit.bind(py), true)?;
}
let identity = aview2(&ONE_QUBIT_IDENTITY);
let first_node = &op_list[0];
let input_matrix = get_matrix_from_inst(py, &first_node.instruction)?;
let mut matrix: Array2<Complex64> = match bit_map
.map_bits(first_node.instruction.qubits.bind(py).iter())?
.collect::<Vec<_>>()
.as_slice()
{
[0] => kron(&identity, &input_matrix),
[1] => kron(&input_matrix, &identity),
[0, 1] => input_matrix,
[1, 0] => change_basis(input_matrix.view()),
[] => Array2::eye(4),
_ => unreachable!(),
};
for node in op_list.into_iter().skip(1) {
let op_matrix = get_matrix_from_inst(py, &node.instruction)?;
let q_list = bit_map
.map_bits(node.instruction.qubits.bind(py).iter())?
.map(|x| x as u8)
.collect::<SmallVec<[u8; 2]>>();
let result = match q_list.as_slice() {
[0] => Some(kron(&identity, &op_matrix)),
[1] => Some(kron(&op_matrix, &identity)),
[1, 0] => Some(change_basis(op_matrix.view())),
[] => Some(Array2::eye(4)),
_ => None,
};
matrix = match result {
Some(result) => result.dot(&matrix),
None => op_matrix.dot(&matrix),
};
}
Ok(matrix.into_pyarray_bound(py).unbind())
}
/// Switches the order of qubits in a two qubit operation.
#[inline]
pub fn change_basis(matrix: ArrayView2<Complex64>) -> Array2<Complex64> {
let mut trans_matrix: Array2<Complex64> = matrix.reversed_axes().to_owned();
for index in 0..trans_matrix.ncols() {
trans_matrix.swap([1, index], [2, index]);
}
trans_matrix = trans_matrix.reversed_axes();
for index in 0..trans_matrix.ncols() {
trans_matrix.swap([1, index], [2, index]);
}
trans_matrix
}
pub fn convert_2q_block_matrix(m: &Bound<PyModule>) -> PyResult<()> {
m.add_wrapped(wrap_pyfunction!(blocks_to_matrix))?;
Ok(())
}
|
qiskit/crates/accelerate/src/convert_2q_block_matrix.rs/0
|
{
"file_path": "qiskit/crates/accelerate/src/convert_2q_block_matrix.rs",
"repo_id": "qiskit",
"token_count": 2083
}
| 163 |
// This code is part of Qiskit.
//
// (C) Copyright IBM 2022
//
// This code is licensed under the Apache License, Version 2.0. You may
// obtain a copy of this license in the LICENSE.txt file in the root directory
// of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
//
// Any modifications or derivative works of this code must retain this
// copyright notice, and modified files need to carry a notice indicating
// that they have been altered from the originals.
#![allow(clippy::too_many_arguments)]
use pyo3::prelude::*;
use pyo3::Python;
use hashbrown::HashSet;
use ndarray::prelude::*;
use numpy::{IntoPyArray, PyArray, PyReadonlyArray2};
use rand::prelude::*;
use rand_pcg::Pcg64Mcg;
use rayon::prelude::*;
use crate::getenv_use_multiple_threads;
use crate::nlayout::{NLayout, PhysicalQubit};
use super::heuristic::Heuristic;
use super::neighbor_table::NeighborTable;
use super::route::{swap_map, swap_map_trial, RoutingTargetView};
use super::sabre_dag::SabreDAG;
use super::swap_map::SwapMap;
use super::{NodeBlockResults, SabreResult};
use crate::dense_layout::best_subset_inner;
#[pyfunction]
#[pyo3(signature = (dag, neighbor_table, distance_matrix, heuristic, max_iterations, num_swap_trials, num_random_trials, seed=None, partial_layouts=vec![]))]
pub fn sabre_layout_and_routing(
py: Python,
dag: &SabreDAG,
neighbor_table: &NeighborTable,
distance_matrix: PyReadonlyArray2<f64>,
heuristic: &Heuristic,
max_iterations: usize,
num_swap_trials: usize,
num_random_trials: usize,
seed: Option<u64>,
mut partial_layouts: Vec<Vec<Option<u32>>>,
) -> (NLayout, PyObject, (SwapMap, PyObject, NodeBlockResults)) {
let run_in_parallel = getenv_use_multiple_threads();
let target = RoutingTargetView {
neighbors: neighbor_table,
coupling: &neighbor_table.coupling_graph(),
distance: distance_matrix.as_array(),
};
let mut starting_layouts: Vec<Vec<Option<u32>>> =
(0..num_random_trials).map(|_| vec![]).collect();
starting_layouts.append(&mut partial_layouts);
// Run a dense layout trial
starting_layouts.push(compute_dense_starting_layout(
dag.num_qubits,
&target,
run_in_parallel,
));
let outer_rng = match seed {
Some(seed) => Pcg64Mcg::seed_from_u64(seed),
None => Pcg64Mcg::from_entropy(),
};
let seed_vec: Vec<u64> = outer_rng
.sample_iter(&rand::distributions::Standard)
.take(starting_layouts.len())
.collect();
let res = if run_in_parallel && starting_layouts.len() > 1 {
seed_vec
.into_par_iter()
.enumerate()
.map(|(index, seed_trial)| {
(
index,
layout_trial(
&target,
dag,
heuristic,
seed_trial,
max_iterations,
num_swap_trials,
run_in_parallel,
&starting_layouts[index],
),
)
})
.min_by_key(|(index, (_, _, result))| {
(
result.map.map.values().map(|x| x.len()).sum::<usize>(),
*index,
)
})
.unwrap()
.1
} else {
seed_vec
.into_iter()
.enumerate()
.map(|(index, seed_trial)| {
layout_trial(
&target,
dag,
heuristic,
seed_trial,
max_iterations,
num_swap_trials,
run_in_parallel,
&starting_layouts[index],
)
})
.min_by_key(|(_, _, result)| result.map.map.values().map(|x| x.len()).sum::<usize>())
.unwrap()
};
(
res.0,
PyArray::from_vec_bound(py, res.1).into(),
(
res.2.map,
res.2.node_order.into_pyarray_bound(py).into(),
res.2.node_block_results,
),
)
}
fn layout_trial(
target: &RoutingTargetView,
dag: &SabreDAG,
heuristic: &Heuristic,
seed: u64,
max_iterations: usize,
num_swap_trials: usize,
run_swap_in_parallel: bool,
starting_layout: &[Option<u32>],
) -> (NLayout, Vec<PhysicalQubit>, SabreResult) {
let num_physical_qubits: u32 = target.neighbors.num_qubits().try_into().unwrap();
let mut rng = Pcg64Mcg::seed_from_u64(seed);
// This is purely for RNG compatibility during a refactor.
let routing_seed = Pcg64Mcg::seed_from_u64(seed).next_u64();
// Pick a random initial layout including a full ancilla allocation.
let mut initial_layout = {
let physical_qubits: Vec<PhysicalQubit> = if !starting_layout.is_empty() {
let used_bits: HashSet<u32> = starting_layout
.iter()
.filter_map(|x| x.as_ref())
.copied()
.collect();
let mut free_bits: Vec<u32> = (0..num_physical_qubits)
.filter(|x| !used_bits.contains(x))
.collect();
free_bits.shuffle(&mut rng);
(0..num_physical_qubits)
.map(|x| {
let bit_index = match starting_layout.get(x as usize) {
Some(phys) => phys.unwrap_or_else(|| free_bits.pop().unwrap()),
None => free_bits.pop().unwrap(),
};
PhysicalQubit::new(bit_index)
})
.collect()
} else {
let mut physical_qubits: Vec<PhysicalQubit> =
(0..num_physical_qubits).map(PhysicalQubit::new).collect();
physical_qubits.shuffle(&mut rng);
physical_qubits
};
NLayout::from_virtual_to_physical(physical_qubits).unwrap()
};
// Sabre routing currently enforces that control-flow blocks return to their starting layout,
// which means they don't actually affect any heuristics that affect our layout choice.
let dag_no_control_forward = SabreDAG {
num_qubits: dag.num_qubits,
num_clbits: dag.num_clbits,
dag: dag.dag.clone(),
nodes: dag.nodes.clone(),
first_layer: dag.first_layer.clone(),
node_blocks: dag
.node_blocks
.keys()
.map(|index| (*index, Vec::new()))
.collect(),
};
let dag_no_control_reverse = SabreDAG::new(
dag_no_control_forward.num_qubits,
dag_no_control_forward.num_clbits,
dag_no_control_forward.nodes.iter().rev().cloned().collect(),
dag_no_control_forward.node_blocks.clone(),
)
.unwrap();
for _iter in 0..max_iterations {
for dag in [&dag_no_control_forward, &dag_no_control_reverse] {
let (_result, final_layout) =
swap_map_trial(target, dag, heuristic, &initial_layout, routing_seed);
initial_layout = final_layout;
}
}
let (sabre_result, final_layout) = swap_map(
target,
dag,
heuristic,
&initial_layout,
Some(seed),
num_swap_trials,
Some(run_swap_in_parallel),
);
let final_permutation = initial_layout
.iter_physical()
.map(|(_, virt)| virt.to_phys(&final_layout))
.collect();
(initial_layout, final_permutation, sabre_result)
}
fn compute_dense_starting_layout(
num_qubits: usize,
target: &RoutingTargetView,
run_in_parallel: bool,
) -> Vec<Option<u32>> {
let mut adj_matrix = target.distance.to_owned();
if run_in_parallel {
adj_matrix.par_mapv_inplace(|x| if x == 1. { 1. } else { 0. });
} else {
adj_matrix.mapv_inplace(|x| if x == 1. { 1. } else { 0. });
}
let [_rows, _cols, map] = best_subset_inner(
num_qubits,
adj_matrix.view(),
0,
0,
false,
true,
aview2(&[[0.]]),
);
map.into_iter().map(|x| Some(x as u32)).collect()
}
|
qiskit/crates/accelerate/src/sabre/layout.rs/0
|
{
"file_path": "qiskit/crates/accelerate/src/sabre/layout.rs",
"repo_id": "qiskit",
"token_count": 4105
}
| 164 |
// This code is part of Qiskit.
//
// (C) Copyright IBM 2024
//
// This code is licensed under the Apache License, Version 2.0. You may
// obtain a copy of this license in the LICENSE.txt file in the root directory
// of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
//
// Any modifications or derivative works of this code must retain this
// copyright notice, and modified files need to carry a notice indicating
// that they have been altered from the originals.
use ndarray::{concatenate, s, Array2, ArrayView2, ArrayViewMut2, Axis};
use rand::{Rng, SeedableRng};
use rand_pcg::Pcg64Mcg;
/// Binary matrix multiplication
pub fn binary_matmul_inner(
mat1: ArrayView2<bool>,
mat2: ArrayView2<bool>,
) -> Result<Array2<bool>, String> {
let n1_rows = mat1.nrows();
let n1_cols = mat1.ncols();
let n2_rows = mat2.nrows();
let n2_cols = mat2.ncols();
if n1_cols != n2_rows {
return Err(format!(
"Cannot multiply matrices with inappropriate dimensions {}, {}",
n1_cols, n2_rows
));
}
Ok(Array2::from_shape_fn((n1_rows, n2_cols), |(i, j)| {
(0..n2_rows)
.map(|k| mat1[[i, k]] & mat2[[k, j]])
.fold(false, |acc, v| acc ^ v)
}))
}
/// Gauss elimination of a matrix mat with m rows and n columns.
/// If full_elim = True, it allows full elimination of mat[:, 0 : ncols]
/// Returns the matrix mat, and the permutation perm that was done on the rows during the process.
/// perm[0 : rank] represents the indices of linearly independent rows in the original matrix.
pub fn gauss_elimination_with_perm_inner(
mut mat: ArrayViewMut2<bool>,
ncols: Option<usize>,
full_elim: Option<bool>,
) -> Vec<usize> {
let (m, mut n) = (mat.nrows(), mat.ncols()); // no. of rows and columns
if let Some(ncols_val) = ncols {
n = usize::min(n, ncols_val); // no. of active columns
}
let mut perm: Vec<usize> = Vec::from_iter(0..m);
let mut r = 0; // current rank
let k = 0; // current pivot column
let mut new_k = 0;
while (r < m) && (k < n) {
let mut is_non_zero = false;
let mut new_r = r;
for j in k..n {
new_k = k;
for i in r..m {
if mat[(i, j)] {
is_non_zero = true;
new_k = j;
new_r = i;
break;
}
}
if is_non_zero {
break;
}
}
if !is_non_zero {
return perm; // A is in the canonical form
}
if new_r != r {
let temp_r = mat.slice_mut(s![r, ..]).to_owned();
let temp_new_r = mat.slice_mut(s![new_r, ..]).to_owned();
mat.slice_mut(s![r, ..]).assign(&temp_new_r);
mat.slice_mut(s![new_r, ..]).assign(&temp_r);
perm.swap(r, new_r);
}
// Copy source row to avoid trying multiple borrows at once
let row0 = mat.row(r).to_owned();
mat.axis_iter_mut(Axis(0))
.enumerate()
.filter(|(i, row)| {
(full_elim == Some(true) && (*i < r) && row[new_k])
|| (*i > r && *i < m && row[new_k])
})
.for_each(|(_i, mut row)| {
row.zip_mut_with(&row0, |x, &y| *x ^= y);
});
r += 1;
}
perm
}
/// Given a boolean matrix A after Gaussian elimination, computes its rank
/// (i.e. simply the number of nonzero rows)
pub fn compute_rank_after_gauss_elim_inner(mat: ArrayView2<bool>) -> usize {
let rank: usize = mat
.axis_iter(Axis(0))
.map(|row| row.fold(false, |out, val| out | *val) as usize)
.sum();
rank
}
/// Given a boolean matrix mat computes its rank
pub fn compute_rank_inner(mat: ArrayView2<bool>) -> usize {
let mut temp_mat = mat.to_owned();
gauss_elimination_with_perm_inner(temp_mat.view_mut(), None, Some(false));
let rank = compute_rank_after_gauss_elim_inner(temp_mat.view());
rank
}
/// Given a square boolean matrix mat, tries to compute its inverse.
pub fn calc_inverse_matrix_inner(
mat: ArrayView2<bool>,
verify: bool,
) -> Result<Array2<bool>, String> {
if mat.shape()[0] != mat.shape()[1] {
return Err("Matrix to invert is a non-square matrix.".to_string());
}
let n = mat.shape()[0];
// concatenate the matrix and identity
let identity_matrix: Array2<bool> = Array2::from_shape_fn((n, n), |(i, j)| i == j);
let mut mat1 = concatenate(Axis(1), &[mat.view(), identity_matrix.view()]).unwrap();
gauss_elimination_with_perm_inner(mat1.view_mut(), None, Some(true));
let r = compute_rank_after_gauss_elim_inner(mat1.slice(s![.., 0..n]));
if r < n {
return Err("The matrix is not invertible.".to_string());
}
let invmat = mat1.slice(s![.., n..2 * n]).to_owned();
if verify {
let mat2 = binary_matmul_inner(mat, (&invmat).into())?;
let identity_matrix: Array2<bool> = Array2::from_shape_fn((n, n), |(i, j)| i == j);
if mat2.ne(&identity_matrix) {
return Err("The inverse matrix is not correct.".to_string());
}
}
Ok(invmat)
}
/// Mutate a matrix inplace by adding the value of the ``ctrl`` row to the
/// ``target`` row. If ``add_cols`` is true, add columns instead of rows.
pub fn _add_row_or_col(mut mat: ArrayViewMut2<bool>, add_cols: &bool, ctrl: usize, trgt: usize) {
// get the two rows (or columns)
let info = if *add_cols {
(s![.., ctrl], s![.., trgt])
} else {
(s![ctrl, ..], s![trgt, ..])
};
let (row0, mut row1) = mat.multi_slice_mut(info);
// add them inplace
row1.zip_mut_with(&row0, |x, &y| *x ^= y);
}
/// Generate a random invertible n x n binary matrix.
pub fn random_invertible_binary_matrix_inner(num_qubits: usize, seed: Option<u64>) -> Array2<bool> {
let mut rng = match seed {
Some(seed) => Pcg64Mcg::seed_from_u64(seed),
None => Pcg64Mcg::from_entropy(),
};
let mut matrix = Array2::from_elem((num_qubits, num_qubits), false);
loop {
for value in matrix.iter_mut() {
*value = rng.gen_bool(0.5);
}
let rank = compute_rank_inner(matrix.view());
if rank == num_qubits {
break;
}
}
matrix
}
/// Check that a binary matrix is invertible.
pub fn check_invertible_binary_matrix_inner(mat: ArrayView2<bool>) -> bool {
if mat.nrows() != mat.ncols() {
return false;
}
let rank = compute_rank_inner(mat);
rank == mat.nrows()
}
|
qiskit/crates/accelerate/src/synthesis/linear/utils.rs/0
|
{
"file_path": "qiskit/crates/accelerate/src/synthesis/linear/utils.rs",
"repo_id": "qiskit",
"token_count": 3033
}
| 165 |
# `qiskit-circuit`
The Rust-based data structures for circuits.
This currently defines the core data collections for `QuantumCircuit`, but may expand in the future to back `DAGCircuit` as well.
This crate is a very low part of the Rust stack, if not the very lowest.
The data model exposed by this crate is as follows.
## CircuitData
The core representation of a quantum circuit in Rust is the `CircuitData` struct. This containts the list
of instructions that are comprising the circuit. Each element in this list is modeled by a
`CircuitInstruction` struct. The `CircuitInstruction` contains the operation object and it's operands.
This includes the parameters and bits. It also contains the potential mutable state of the Operation representation from the legacy Python data model; namely `duration`, `unit`, `condition`, and `label`.
In the future we'll be able to remove all of that except for label.
At rest a `CircuitInstruction` is compacted into a `PackedInstruction` which caches reused qargs
in the instructions to reduce the memory overhead of `CircuitData`. The `PackedInstruction` objects
get unpacked back to `CircuitInstruction` when accessed for a more convienent working form.
Additionally the `CircuitData` contains a `param_table` field which is used to track parameterized
instructions that are using python defined `ParameterExpression` objects for any parameters and also
a global phase field which is used to track the global phase of the circuit.
## Operation Model
In the circuit crate all the operations used in a `CircuitInstruction` are part of the `OperationType`
enum. The `OperationType` enum has four variants which are used to define the different types of
operation objects that can be on a circuit:
- `StandardGate`: a rust native representation of a member of the Qiskit standard gate library. This is
an `enum` that enumerates all the gates in the library and statically defines all the gate properties
except for gates that take parameters,
- `PyGate`: A struct that wraps a gate outside the standard library defined in Python. This struct wraps
a `Gate` instance (or subclass) as a `PyObject`. The static properties of this object (such as name,
number of qubits, etc) are stored in Rust for performance but the dynamic properties such as
the matrix or definition are accessed by calling back into Python to get them from the stored
`PyObject`
- `PyInstruction`: A struct that wraps an instruction defined in Python. This struct wraps an
`Instruction` instance (or subclass) as a `PyObject`. The static properties of this object (such as
name, number of qubits, etc) are stored in Rust for performance but the dynamic properties such as
the definition are accessed by calling back into Python to get them from the stored `PyObject`. As
the primary difference between `Gate` and `Instruction` in the python data model are that `Gate` is a
specialized `Instruction` subclass that represents unitary operations the primary difference between
this and `PyGate` are that `PyInstruction` will always return `None` when it's matrix is accessed.
- `PyOperation`: A struct that wraps an operation defined in Python. This struct wraps an `Operation`
instance (or subclass) as a `PyObject`. The static properties of this object (such as name, number
of qubits, etc) are stored in Rust for performance. As `Operation` is the base abstract interface
definition of what can be put on a circuit this is mostly just a container for custom Python objects.
Anything that's operating on a bare operation will likely need to access it via the `PyObject`
manually because the interface doesn't define many standard properties outside of what's cached in
the struct.
There is also an `Operation` trait defined which defines the common access pattern interface to these
4 types along with the `OperationType` parent. This trait defines methods to access the standard data
model attributes of operations in Qiskit. This includes things like the name, number of qubits, the matrix, the definition, etc.
## ParamTable
The `ParamTable` struct is used to track which circuit instructions are using `ParameterExpression`
objects for any of their parameters. The Python space `ParameterExpression` is comprised of a symengine
symbolic expression that defines operations using `Parameter` objects. Each `Parameter` is modeled by
a uuid and a name to uniquely identify it. The parameter table maps the `Parameter` objects to the
`CircuitInstruction` in the `CircuitData` that are using them. The `Parameter` comprised of 3 `HashMaps` internally that map the uuid (as `u128`, which is accesible in Python by using `uuid.int`) to the `ParamEntry`, the `name` to the uuid, and the uuid to the PyObject for the actual `Parameter`.
The `ParamEntry` is just a `HashSet` of 2-tuples with usize elements. The two usizes represent the instruction index in the `CircuitData` and the index of the `CircuitInstruction.params` field of
a give instruction where the given `Parameter` is used in the circuit. If the instruction index is
`GLOBAL_PHASE_MAX`, that points to the global phase property of the circuit instead of a `CircuitInstruction`.
|
qiskit/crates/circuit/README.md/0
|
{
"file_path": "qiskit/crates/circuit/README.md",
"repo_id": "qiskit",
"token_count": 1264
}
| 166 |
// This code is part of Qiskit.
//
// (C) Copyright IBM 2024
//
// This code is licensed under the Apache License, Version 2.0. You may
// obtain a copy of this license in the LICENSE.txt file in the root directory
// of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
//
// Any modifications or derivative works of this code must retain this
// copyright notice, and modified files need to carry a notice indicating
// that they have been altered from the originals.
use thiserror::Error;
use pyo3::exceptions::PyIndexError;
use pyo3::prelude::*;
use pyo3::types::PySlice;
use self::sealed::{Descending, SequenceIndexIter};
/// A Python-space indexer for the standard `PySequence` type; a single integer or a slice.
///
/// These come in as `isize`s from Python space, since Python typically allows negative indices.
/// Use `with_len` to specialize the index to a valid Rust-space indexer into a collection of the
/// given length.
pub enum PySequenceIndex<'py> {
Int(isize),
Slice(Bound<'py, PySlice>),
}
impl<'py> FromPyObject<'py> for PySequenceIndex<'py> {
fn extract_bound(ob: &Bound<'py, PyAny>) -> PyResult<Self> {
// `slice` can't be subclassed in Python, so it's safe (and faster) to check for it exactly.
// The `downcast_exact` check is just a pointer comparison, so while `slice` is the less
// common input, doing that first has little-to-no impact on the speed of the `isize` path,
// while the reverse makes `slice` inputs significantly slower.
if let Ok(slice) = ob.downcast_exact::<PySlice>() {
return Ok(Self::Slice(slice.clone()));
}
Ok(Self::Int(ob.extract()?))
}
}
impl<'py> PySequenceIndex<'py> {
/// Specialize this index to a collection of the given `len`, returning a Rust-native type.
pub fn with_len(&self, len: usize) -> Result<SequenceIndex, PySequenceIndexError> {
match self {
PySequenceIndex::Int(index) => {
let wrapped_index = PySequenceIndex::convert_idx(*index, len)?;
Ok(SequenceIndex::Int(wrapped_index))
}
PySequenceIndex::Slice(slice) => {
let indices = slice
.indices(len as ::std::os::raw::c_long)
.map_err(PySequenceIndexError::from)?;
if indices.step > 0 {
Ok(SequenceIndex::PosRange {
start: indices.start as usize,
stop: indices.stop as usize,
step: indices.step as usize,
})
} else {
Ok(SequenceIndex::NegRange {
// `indices.start` can be negative if the collection length is 0.
start: (indices.start >= 0).then_some(indices.start as usize),
// `indices.stop` can be negative if the 0 index should be output.
stop: (indices.stop >= 0).then_some(indices.stop as usize),
step: indices.step.unsigned_abs(),
})
}
}
}
}
/// Given an integer (which may be negative) get a valid unsigned index for a sequence.
pub fn convert_idx(index: isize, length: usize) -> Result<usize, PySequenceIndexError> {
let wrapped_index = if index >= 0 {
let index = index as usize;
if index >= length {
return Err(PySequenceIndexError::OutOfRange);
}
index
} else {
length
.checked_sub(index.unsigned_abs())
.ok_or(PySequenceIndexError::OutOfRange)?
};
Ok(wrapped_index)
}
}
/// Error type for problems encountered when calling methods on `PySequenceIndex`.
#[derive(Error, Debug)]
pub enum PySequenceIndexError {
#[error("index out of range")]
OutOfRange,
#[error(transparent)]
InnerPy(#[from] PyErr),
}
impl From<PySequenceIndexError> for PyErr {
fn from(value: PySequenceIndexError) -> PyErr {
match value {
PySequenceIndexError::OutOfRange => PyIndexError::new_err("index out of range"),
PySequenceIndexError::InnerPy(inner) => inner,
}
}
}
/// Rust-native version of a Python sequence-like indexer.
///
/// Typically this is constructed by a call to `PySequenceIndex::with_len`, which guarantees that
/// all the indices will be in bounds for a collection of the given length.
///
/// This splits the positive- and negative-step versions of the slice in two so it can be translated
/// more easily into static dispatch. This type can be converted into several types of iterator.
#[derive(Clone, Copy, Debug)]
pub enum SequenceIndex {
Int(usize),
PosRange {
start: usize,
stop: usize,
step: usize,
},
NegRange {
start: Option<usize>,
stop: Option<usize>,
step: usize,
},
}
impl SequenceIndex {
/// The number of indices this refers to.
pub fn len(&self) -> usize {
match self {
Self::Int(_) => 1,
Self::PosRange { start, stop, step } => {
let gap = stop.saturating_sub(*start);
gap / *step + (gap % *step != 0) as usize
}
Self::NegRange { start, stop, step } => 'arm: {
let Some(start) = start else { break 'arm 0 };
let gap = stop
.map(|stop| start.saturating_sub(stop))
.unwrap_or(*start + 1);
gap / step + (gap % step != 0) as usize
}
}
}
pub fn is_empty(&self) -> bool {
// This is just to keep clippy happy; the length is already fairly inexpensive to calculate.
self.len() == 0
}
/// Get an iterator over the indices. This will be a single-item iterator for the case of
/// `Self::Int`, but you probably wanted to destructure off that case beforehand anyway.
pub fn iter(&self) -> SequenceIndexIter {
match self {
Self::Int(value) => SequenceIndexIter::Int(Some(*value)),
Self::PosRange { start, step, .. } => SequenceIndexIter::PosRange {
lowest: *start,
step: *step,
indices: 0..self.len(),
},
Self::NegRange { start, step, .. } => SequenceIndexIter::NegRange {
// We can unwrap `highest` to an arbitrary value if `None`, because in that case the
// `len` is 0 and the iterator will not yield any objects.
highest: start.unwrap_or_default(),
step: *step,
indices: 0..self.len(),
},
}
}
/// Get an iterator over the contained indices that is guaranteed to iterate from the highest
/// index to the lowest.
pub fn descending(&self) -> Descending {
Descending(self.iter())
}
}
impl IntoIterator for SequenceIndex {
type Item = usize;
type IntoIter = SequenceIndexIter;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
// Private module to make it impossible to construct or inspect the internals of the iterator types
// from outside this file, while still allowing them to be used.
mod sealed {
/// Custom iterator for indices for Python sequence-likes.
///
/// In the range types, the `indices ` are `Range` objects that run from 0 to the length of the
/// iterator. In theory, we could generate the iterators ourselves, but that ends up with a lot of
/// boilerplate.
#[derive(Clone, Debug)]
pub enum SequenceIndexIter {
Int(Option<usize>),
PosRange {
lowest: usize,
step: usize,
indices: ::std::ops::Range<usize>,
},
NegRange {
highest: usize,
// The step of the iterator, but note that this is a negative range, so the forwards method
// steps downwards from `upper` towards `lower`.
step: usize,
indices: ::std::ops::Range<usize>,
},
}
impl Iterator for SequenceIndexIter {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self {
Self::Int(value) => value.take(),
Self::PosRange {
lowest,
step,
indices,
} => indices.next().map(|idx| *lowest + idx * *step),
Self::NegRange {
highest,
step,
indices,
} => indices.next().map(|idx| *highest - idx * *step),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
match self {
Self::Int(None) => (0, Some(0)),
Self::Int(Some(_)) => (1, Some(1)),
Self::PosRange { indices, .. } | Self::NegRange { indices, .. } => {
indices.size_hint()
}
}
}
}
impl DoubleEndedIterator for SequenceIndexIter {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
match self {
Self::Int(value) => value.take(),
Self::PosRange {
lowest,
step,
indices,
} => indices.next_back().map(|idx| *lowest + idx * *step),
Self::NegRange {
highest,
step,
indices,
} => indices.next_back().map(|idx| *highest - idx * *step),
}
}
}
impl ExactSizeIterator for SequenceIndexIter {}
pub struct Descending(pub SequenceIndexIter);
impl Iterator for Descending {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.0 {
SequenceIndexIter::Int(_) | SequenceIndexIter::NegRange { .. } => self.0.next(),
SequenceIndexIter::PosRange { .. } => self.0.next_back(),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
impl DoubleEndedIterator for Descending {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
match self.0 {
SequenceIndexIter::Int(_) | SequenceIndexIter::NegRange { .. } => {
self.0.next_back()
}
SequenceIndexIter::PosRange { .. } => self.0.next(),
}
}
}
impl ExactSizeIterator for Descending {}
}
#[cfg(test)]
mod test {
use super::*;
/// Get a set of test parametrisations for iterator methods. The second argument is the
/// expected values from a normal forward iteration.
fn index_iterator_cases() -> impl Iterator<Item = (SequenceIndex, Vec<usize>)> {
let pos = |start, stop, step| SequenceIndex::PosRange { start, stop, step };
let neg = |start, stop, step| SequenceIndex::NegRange { start, stop, step };
[
(SequenceIndex::Int(3), vec![3]),
(pos(0, 5, 2), vec![0, 2, 4]),
(pos(2, 10, 1), vec![2, 3, 4, 5, 6, 7, 8, 9]),
(pos(1, 15, 3), vec![1, 4, 7, 10, 13]),
(neg(Some(3), None, 1), vec![3, 2, 1, 0]),
(neg(Some(3), None, 2), vec![3, 1]),
(neg(Some(2), Some(0), 1), vec![2, 1]),
(neg(Some(2), Some(0), 2), vec![2]),
(neg(Some(2), Some(0), 3), vec![2]),
(neg(Some(10), Some(2), 3), vec![10, 7, 4]),
(neg(None, None, 1), vec![]),
(neg(None, None, 3), vec![]),
]
.into_iter()
}
/// Test that the index iterator's implementation of `ExactSizeIterator` is correct.
#[test]
fn index_iterator() {
for (index, forwards) in index_iterator_cases() {
// We're testing that all the values are the same, and the `size_hint` is correct at
// every single point.
let mut actual = Vec::new();
let mut sizes = Vec::new();
let mut iter = index.iter();
loop {
sizes.push(iter.size_hint().0);
if let Some(next) = iter.next() {
actual.push(next);
} else {
break;
}
}
assert_eq!(
actual, forwards,
"values for {:?}\nActual : {:?}\nExpected: {:?}",
index, actual, forwards,
);
let expected_sizes = (0..=forwards.len()).rev().collect::<Vec<_>>();
assert_eq!(
sizes, expected_sizes,
"sizes for {:?}\nActual : {:?}\nExpected: {:?}",
index, sizes, expected_sizes,
);
}
}
/// Test that the index iterator's implementation of `DoubleEndedIterator` is correct.
#[test]
fn reversed_index_iterator() {
for (index, forwards) in index_iterator_cases() {
let actual = index.iter().rev().collect::<Vec<_>>();
let expected = forwards.into_iter().rev().collect::<Vec<_>>();
assert_eq!(
actual, expected,
"reversed {:?}\nActual : {:?}\nExpected: {:?}",
index, actual, expected,
);
}
}
/// Test that `descending` produces its values in reverse-sorted order.
#[test]
fn descending() {
for (index, mut expected) in index_iterator_cases() {
let actual = index.descending().collect::<Vec<_>>();
expected.sort_by(|left, right| right.cmp(left));
assert_eq!(
actual, expected,
"descending {:?}\nActual : {:?}\nExpected: {:?}",
index, actual, expected,
);
}
}
/// Test SequenceIndex::from_int correctly handles positive and negative indices
#[test]
fn convert_py_idx() {
let cases = [
(2, 5, 2), // (index, sequence length, expected result)
(-2, 5, 3),
(0, 2, 0),
];
for (py_index, length, expected) in cases {
let index = PySequenceIndex::convert_idx(py_index, length).unwrap();
assert_eq!(index, expected, "Expected {} but got {}", expected, index);
}
}
/// Test that out-of-range errors are returned as expected.
#[test]
fn bad_convert_py_idx() {
let cases = [
(5, 5), // (index, sequence length)
(-6, 5),
];
for (py_index, length) in cases {
assert!(matches!(
PySequenceIndex::convert_idx(py_index, length).unwrap_err(),
PySequenceIndexError::OutOfRange,
));
}
}
}
|
qiskit/crates/circuit/src/slice.rs/0
|
{
"file_path": "qiskit/crates/circuit/src/slice.rs",
"repo_id": "qiskit",
"token_count": 7041
}
| 167 |
// This code is part of Qiskit.
//
// (C) Copyright IBM 2024
//
// This code is licensed under the Apache License, Version 2.0. You may
// obtain a copy of this license in the LICENSE.txt file in the root directory
// of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
//
// Any modifications or derivative works of this code must retain this
// copyright notice, and modified files need to carry a notice indicating
// that they have been altered from the originals.
use pyo3::prelude::*;
use pyo3::types::{PyList, PyString, PyTuple, PyType};
use crate::error::QASM3ImporterError;
pub trait PyRegister {
// This really should be
// fn iter<'a>(&'a self, py: Python<'a>) -> impl Iterator<Item = &'a PyAny>;
// or at a minimum
// fn iter<'a>(&'a self, py: Python<'a>) -> ::pyo3::types::iter::PyListIterator<'a>;
// but we can't use the former before Rust 1.75 and the latter before PyO3 0.21.
fn bit_list<'a>(&'a self, py: Python<'a>) -> &Bound<'a, PyList>;
}
macro_rules! register_type {
($name: ident) => {
/// Rust-space wrapper around Qiskit `Register` objects.
pub struct $name {
/// The actual register instance.
object: Py<PyAny>,
/// A pointer to the inner list of bits. We keep a handle to this for lookup
/// efficiency; we can use direct list methods to retrieve the bit instances, rather
/// than needing to indirect through the general `__getitem__` of the register, or
/// looking up the qubit instances on the circuit.
items: Py<PyList>,
}
impl PyRegister for $name {
fn bit_list<'a>(&'a self, py: Python<'a>) -> &Bound<'a, PyList> {
self.items.bind(py)
}
}
impl ::pyo3::IntoPy<Py<PyAny>> for $name {
fn into_py(self, _py: Python) -> Py<PyAny> {
self.object
}
}
impl ::pyo3::ToPyObject for $name {
fn to_object(&self, py: Python) -> Py<PyAny> {
// _Technically_, allowing access this internal object can let the Rust-space
// wrapper get out-of-sync since we keep a direct handle to the list, but in
// practice, the field it's viewing is private and "inaccessible" from Python.
self.object.clone_ref(py)
}
}
};
}
register_type!(PyQuantumRegister);
register_type!(PyClassicalRegister);
/// Information received from Python space about how to construct a Python-space object to
/// represent a given gate that might be declared.
#[pyclass(module = "qiskit._accelerate.qasm3", frozen, name = "CustomGate")]
#[derive(Clone, Debug)]
pub struct PyGate {
constructor: Py<PyAny>,
name: String,
num_params: usize,
num_qubits: usize,
}
impl PyGate {
pub fn new<T: IntoPy<Py<PyAny>>, S: AsRef<str>>(
py: Python,
constructor: T,
name: S,
num_params: usize,
num_qubits: usize,
) -> Self {
Self {
constructor: constructor.into_py(py),
name: name.as_ref().to_owned(),
num_params,
num_qubits,
}
}
/// Construct a Python-space instance of the custom gate.
pub fn construct<A>(&self, py: Python, args: A) -> PyResult<Py<PyAny>>
where
A: IntoPy<Py<PyTuple>>,
{
let args = args.into_py(py);
let received_num_params = args.bind(py).len();
if received_num_params == self.num_params {
self.constructor.call1(py, args.bind(py))
} else {
Err(QASM3ImporterError::new_err(format!(
"internal error: wrong number of params for {} (got {}, expected {})",
&self.name, received_num_params, self.num_params
)))
}
}
pub fn name(&self) -> &str {
&self.name
}
pub fn num_params(&self) -> usize {
self.num_params
}
pub fn num_qubits(&self) -> usize {
self.num_qubits
}
}
#[pymethods]
impl PyGate {
#[new]
#[pyo3(signature=(/, constructor, name, num_params, num_qubits))]
fn __new__(constructor: Py<PyAny>, name: String, num_params: usize, num_qubits: usize) -> Self {
Self {
constructor,
name,
num_params,
num_qubits,
}
}
fn __repr__<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyAny>> {
PyString::new_bound(py, "CustomGate(name={!r}, num_params={}, num_qubits={})").call_method1(
"format",
(
PyString::new_bound(py, &self.name),
self.num_params,
self.num_qubits,
),
)
}
fn __reduce__(&self, py: Python) -> Py<PyTuple> {
(
PyType::new_bound::<PyGate>(py),
(
self.constructor.clone_ref(py),
&self.name,
self.num_params,
self.num_qubits,
),
)
.into_py(py)
}
}
/// Wrapper around various Python-space imports. This is just a convenience wrapper to save us
/// needing to `getattr` things off a Python-space module quite so frequently. This is
/// give-or-take just a manual lookup for a few `import` items at the top of a Python module, and
/// the attached constructor functions produce (when appropriate), Rust-space wrappers around the
/// Python objects.
pub struct PyCircuitModule {
circuit: Py<PyType>,
qreg: Py<PyType>,
qubit: Py<PyType>,
creg: Py<PyType>,
clbit: Py<PyType>,
circuit_instruction: Py<PyType>,
barrier: Py<PyType>,
// The singleton object.
measure: Py<PyAny>,
}
impl PyCircuitModule {
/// Import the necessary components from `qiskit.circuit`.
pub fn import(py: Python) -> PyResult<Self> {
let module = PyModule::import_bound(py, "qiskit.circuit")?;
Ok(Self {
circuit: module
.getattr("QuantumCircuit")?
.downcast_into::<PyType>()?
.unbind(),
qreg: module
.getattr("QuantumRegister")?
.downcast_into::<PyType>()?
.unbind(),
qubit: module.getattr("Qubit")?.downcast_into::<PyType>()?.unbind(),
creg: module
.getattr("ClassicalRegister")?
.downcast_into::<PyType>()?
.unbind(),
clbit: module.getattr("Clbit")?.downcast_into::<PyType>()?.unbind(),
circuit_instruction: module
.getattr("CircuitInstruction")?
.downcast_into::<PyType>()?
.unbind(),
barrier: module
.getattr("Barrier")?
.downcast_into::<PyType>()?
.unbind(),
// Measure is a singleton, so just store the object.
measure: module.getattr("Measure")?.call0()?.into_py(py),
})
}
pub fn new_circuit(&self, py: Python) -> PyResult<PyCircuit> {
self.circuit.call0(py).map(PyCircuit)
}
pub fn new_qreg<T: IntoPy<Py<PyString>>>(
&self,
py: Python,
name: T,
size: usize,
) -> PyResult<PyQuantumRegister> {
let qreg = self.qreg.call1(py, (size, name.into_py(py)))?;
Ok(PyQuantumRegister {
items: qreg
.bind(py)
.getattr("_bits")?
.downcast_into::<PyList>()?
.unbind(),
object: qreg,
})
}
pub fn new_qubit(&self, py: Python) -> PyResult<Py<PyAny>> {
self.qubit.call0(py)
}
pub fn new_creg<T: IntoPy<Py<PyString>>>(
&self,
py: Python,
name: T,
size: usize,
) -> PyResult<PyClassicalRegister> {
let creg = self.creg.call1(py, (size, name.into_py(py)))?;
Ok(PyClassicalRegister {
items: creg
.bind(py)
.getattr("_bits")?
.downcast_into::<PyList>()?
.unbind(),
object: creg,
})
}
pub fn new_clbit(&self, py: Python) -> PyResult<Py<PyAny>> {
self.clbit.call0(py)
}
pub fn new_instruction<O, Q, C>(
&self,
py: Python,
operation: O,
qubits: Q,
clbits: C,
) -> PyResult<Py<PyAny>>
where
O: IntoPy<Py<PyAny>>,
Q: IntoPy<Py<PyTuple>>,
C: IntoPy<Py<PyTuple>>,
{
self.circuit_instruction
.call1(py, (operation, qubits.into_py(py), clbits.into_py(py)))
}
pub fn new_barrier(&self, py: Python, num_qubits: usize) -> PyResult<Py<PyAny>> {
self.barrier.call1(py, (num_qubits,)).map(|x| x.into_py(py))
}
pub fn measure(&self, py: Python) -> Py<PyAny> {
self.measure.clone_ref(py)
}
}
/// Circuit construction context object to provide an easier Rust-space interface for us to
/// construct the Python :class:`.QuantumCircuit`. The idea of doing this from Rust space like
/// this is that we might steadily be able to move more and more of it into being native Rust as
/// the Rust-space APIs around the internal circuit data stabilize.
pub struct PyCircuit(Py<PyAny>);
impl PyCircuit {
/// Untyped access to the inner Python object.
pub fn inner<'a>(&'a self, py: Python<'a>) -> &Bound<'a, PyAny> {
self.0.bind(py)
}
pub fn add_qreg(&self, py: Python, qreg: &PyQuantumRegister) -> PyResult<()> {
self.inner(py)
.call_method1("add_register", (qreg.to_object(py),))
.map(|_| ())
}
pub fn add_qubit(&self, py: Python, qubit: Py<PyAny>) -> PyResult<()> {
self.inner(py)
.call_method1("add_bits", ((qubit,),))
.map(|_| ())
}
pub fn add_creg(&self, py: Python, creg: &PyClassicalRegister) -> PyResult<()> {
self.inner(py)
.call_method1("add_register", (creg.to_object(py),))
.map(|_| ())
}
pub fn add_clbit<T: IntoPy<Py<PyAny>>>(&self, py: Python, clbit: T) -> PyResult<()> {
self.inner(py)
.call_method1("add_bits", ((clbit,),))
.map(|_| ())
}
pub fn append<T: IntoPy<Py<PyAny>>>(&self, py: Python, instruction: T) -> PyResult<()> {
self.inner(py)
.call_method1("_append", (instruction.into_py(py),))
.map(|_| ())
}
}
impl ::pyo3::IntoPy<Py<PyAny>> for PyCircuit {
fn into_py(self, py: Python) -> Py<PyAny> {
self.0.clone_ref(py)
}
}
|
qiskit/crates/qasm3/src/circuit.rs/0
|
{
"file_path": "qiskit/crates/qasm3/src/circuit.rs",
"repo_id": "qiskit",
"token_count": 5054
}
| 168 |
.. _qiskit-exceptions:
.. automodule:: qiskit.exceptions
:no-members:
:no-inherited-members:
:no-special-members:
|
qiskit/docs/apidoc/exceptions.rst/0
|
{
"file_path": "qiskit/docs/apidoc/exceptions.rst",
"repo_id": "qiskit",
"token_count": 53
}
| 169 |
.. _qiskit-result:
.. automodule:: qiskit.result
:no-members:
:no-inherited-members:
:no-special-members:
|
qiskit/docs/apidoc/result.rst/0
|
{
"file_path": "qiskit/docs/apidoc/result.rst",
"repo_id": "qiskit",
"token_count": 51
}
| 170 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Example showing how to draw a quantum circuit using Qiskit.
"""
from qiskit import QuantumCircuit
def build_bell_circuit():
"""Returns a circuit putting 2 qubits in the Bell state."""
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
return qc
# Create the circuit
bell_circuit = build_bell_circuit()
# Use the internal .draw() to print the circuit
print(bell_circuit)
|
qiskit/examples/python/circuit_draw.py/0
|
{
"file_path": "qiskit/examples/python/circuit_draw.py",
"repo_id": "qiskit",
"token_count": 293
}
| 171 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Compatibility helpers for the Numpy 1.x to 2.0 transition."""
import re
import typing
import warnings
import numpy as np
# This version pattern is taken from the pypa packaging project:
# https://github.com/pypa/packaging/blob/21.3/packaging/version.py#L223-L254 which is dual licensed
# Apache 2.0 and BSD see the source for the original authors and other details.
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION = np.lib.NumpyVersion(np.__version__)
VERSION_PARTS: typing.Tuple[int, ...]
"""The numeric parts of the Numpy release version, e.g. ``(2, 0, 0)``. Does not include pre- or
post-release markers (e.g. ``rc1``)."""
if match := re.fullmatch(_VERSION_PATTERN, np.__version__, flags=re.VERBOSE | re.IGNORECASE):
# Assuming Numpy won't ever introduce epochs, and we don't care about pre/post markers.
VERSION_PARTS = tuple(int(x) for x in match["release"].split("."))
else:
# Just guess a version. We know all existing Numpys have good version strings, so the only way
# this should trigger is from a new or a dev version.
warnings.warn(
f"Unrecognized version string for Numpy: '{np.__version__}'. Assuming Numpy 2.0.",
RuntimeWarning,
)
VERSION_PARTS = (2, 0, 0)
COPY_ONLY_IF_NEEDED = None if VERSION_PARTS >= (2, 0, 0) else False
"""The sentinel value given to ``np.array`` and ``np.ndarray.astype`` (etc) to indicate that a copy
should be made only if required."""
|
qiskit/qiskit/_numpy_compat.py/0
|
{
"file_path": "qiskit/qiskit/_numpy_compat.py",
"repo_id": "qiskit",
"token_count": 1333
}
| 172 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""User-space constructor functions for the expression tree, which do some of the inference and
lifting boilerplate work."""
# pylint: disable=redefined-builtin,redefined-outer-name
from __future__ import annotations
__all__ = [
"lift",
"bit_not",
"logic_not",
"bit_and",
"bit_or",
"bit_xor",
"logic_and",
"logic_or",
"equal",
"not_equal",
"less",
"less_equal",
"greater",
"greater_equal",
"lift_legacy_condition",
]
import typing
from .expr import Expr, Var, Value, Unary, Binary, Cast, Index
from ..types import CastKind, cast_kind
from .. import types
if typing.TYPE_CHECKING:
import qiskit
def _coerce_lossless(expr: Expr, type: types.Type) -> Expr:
"""Coerce ``expr`` to ``type`` by inserting a suitable :class:`Cast` node, if the cast is
lossless. Otherwise, raise a ``TypeError``."""
kind = cast_kind(expr.type, type)
if kind is CastKind.EQUAL:
return expr
if kind is CastKind.IMPLICIT:
return Cast(expr, type, implicit=True)
if kind is CastKind.LOSSLESS:
return Cast(expr, type, implicit=False)
if kind is CastKind.DANGEROUS:
raise TypeError(f"cannot cast '{expr}' to '{type}' without loss of precision")
raise TypeError(f"no cast is defined to take '{expr}' to '{type}'")
def lift_legacy_condition(
condition: tuple[qiskit.circuit.Clbit | qiskit.circuit.ClassicalRegister, int], /
) -> Expr:
"""Lift a legacy two-tuple equality condition into a new-style :class:`Expr`.
Examples:
Taking an old-style conditional instruction and getting an :class:`Expr` from its
condition::
from qiskit.circuit import ClassicalRegister
from qiskit.circuit.library import HGate
from qiskit.circuit.classical import expr
cr = ClassicalRegister(2)
instr = HGate().c_if(cr, 3)
lifted = expr.lift_legacy_condition(instr.condition)
"""
from qiskit.circuit import Clbit # pylint: disable=cyclic-import
target, value = condition
if isinstance(target, Clbit):
bool_ = types.Bool()
return Var(target, bool_) if value else Unary(Unary.Op.LOGIC_NOT, Var(target, bool_), bool_)
left = Var(target, types.Uint(width=target.size))
if value.bit_length() > target.size:
left = Cast(left, types.Uint(width=value.bit_length()), implicit=True)
right = Value(value, left.type)
return Binary(Binary.Op.EQUAL, left, right, types.Bool())
def lift(value: typing.Any, /, type: types.Type | None = None) -> Expr:
"""Lift the given Python ``value`` to a :class:`~.expr.Value` or :class:`~.expr.Var`.
If an explicit ``type`` is given, the typing in the output will reflect that.
Examples:
Lifting simple circuit objects to be :class:`~.expr.Var` instances::
>>> from qiskit.circuit import Clbit, ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.lift(Clbit())
Var(<clbit>, Bool())
>>> expr.lift(ClassicalRegister(3, "c"))
Var(ClassicalRegister(3, "c"), Uint(3))
The type of the return value can be influenced, if the given value could be interpreted
losslessly as the given type (use :func:`cast` to perform a full set of casting
operations, include lossy ones)::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr, types
>>> expr.lift(ClassicalRegister(3, "c"), types.Uint(5))
Var(ClassicalRegister(3, "c"), Uint(5))
>>> expr.lift(5, types.Uint(4))
Value(5, Uint(4))
"""
if isinstance(value, Expr):
if type is not None:
raise ValueError("use 'cast' to cast existing expressions, not 'lift'")
return value
from qiskit.circuit import Clbit, ClassicalRegister # pylint: disable=cyclic-import
inferred: types.Type
if value is True or value is False or isinstance(value, Clbit):
inferred = types.Bool()
constructor = Value if value is True or value is False else Var
elif isinstance(value, ClassicalRegister):
inferred = types.Uint(width=value.size)
constructor = Var
elif isinstance(value, int):
if value < 0:
raise ValueError("cannot represent a negative value")
inferred = types.Uint(width=value.bit_length() or 1)
constructor = Value
else:
raise TypeError(f"failed to infer a type for '{value}'")
if type is None:
type = inferred
if types.is_supertype(type, inferred):
return constructor(value, type)
raise TypeError(
f"the explicit type '{type}' is not suitable for representing '{value}';"
f" it must be non-strict supertype of '{inferred}'"
)
def cast(operand: typing.Any, type: types.Type, /) -> Expr:
"""Create an explicit cast from the given value to the given type.
Examples:
Add an explicit cast node that explicitly casts a higher precision type to a lower precision
one::
>>> from qiskit.circuit.classical import expr, types
>>> value = expr.value(5, types.Uint(32))
>>> expr.cast(value, types.Uint(8))
Cast(Value(5, types.Uint(32)), types.Uint(8), implicit=False)
"""
operand = lift(operand)
if cast_kind(operand.type, type) is CastKind.NONE:
raise TypeError(f"cannot cast '{operand}' to '{type}'")
return Cast(operand, type)
def bit_not(operand: typing.Any, /) -> Expr:
"""Create a bitwise 'not' expression node from the given value, resolving any implicit casts and
lifting the value into a :class:`Value` node if required.
Examples:
Bitwise negation of a :class:`.ClassicalRegister`::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.bit_not(ClassicalRegister(3, "c"))
Unary(Unary.Op.BIT_NOT, Var(ClassicalRegister(3, 'c'), Uint(3)), Uint(3))
"""
operand = lift(operand)
if operand.type.kind not in (types.Bool, types.Uint):
raise TypeError(f"cannot apply '{Unary.Op.BIT_NOT}' to type '{operand.type}'")
return Unary(Unary.Op.BIT_NOT, operand, operand.type)
def logic_not(operand: typing.Any, /) -> Expr:
"""Create a logical 'not' expression node from the given value, resolving any implicit casts and
lifting the value into a :class:`Value` node if required.
Examples:
Logical negation of a :class:`.ClassicalRegister`::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.logic_not(ClassicalRegister(3, "c"))
Unary(\
Unary.Op.LOGIC_NOT, \
Cast(Var(ClassicalRegister(3, 'c'), Uint(3)), Bool(), implicit=True), \
Bool())
"""
operand = _coerce_lossless(lift(operand), types.Bool())
return Unary(Unary.Op.LOGIC_NOT, operand, operand.type)
def _lift_binary_operands(left: typing.Any, right: typing.Any) -> tuple[Expr, Expr]:
"""Lift two binary operands simultaneously, inferring the widths of integer literals in either
position to match the other operand."""
left_int = isinstance(left, int) and not isinstance(left, bool)
right_int = isinstance(right, int) and not isinstance(right, bool)
if not (left_int or right_int):
left = lift(left)
right = lift(right)
elif not right_int:
right = lift(right)
if right.type.kind is types.Uint:
if left.bit_length() > right.type.width:
raise TypeError(
f"integer literal '{left}' is wider than the other operand '{right}'"
)
left = Value(left, right.type)
else:
left = lift(left)
elif not left_int:
left = lift(left)
if left.type.kind is types.Uint:
if right.bit_length() > left.type.width:
raise TypeError(
f"integer literal '{right}' is wider than the other operand '{left}'"
)
right = Value(right, left.type)
else:
right = lift(right)
else:
# Both are `int`, so we take our best case to make things work.
uint = types.Uint(max(left.bit_length(), right.bit_length(), 1))
left = Value(left, uint)
right = Value(right, uint)
return left, right
def _binary_bitwise(op: Binary.Op, left: typing.Any, right: typing.Any) -> Expr:
left, right = _lift_binary_operands(left, right)
type: types.Type
if left.type.kind is right.type.kind is types.Bool:
type = types.Bool()
elif left.type.kind is types.Uint and right.type.kind is types.Uint:
if left.type != right.type:
raise TypeError(
"binary bitwise operations are defined between unsigned integers of the same width,"
f" but got {left.type.width} and {right.type.width}."
)
type = left.type
else:
raise TypeError(f"invalid types for '{op}': '{left.type}' and '{right.type}'")
return Binary(op, left, right, type)
def bit_and(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a bitwise 'and' expression node from the given value, resolving any implicit casts and
lifting the values into :class:`Value` nodes if required.
Examples:
Bitwise 'and' of a classical register and an integer literal::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.bit_and(ClassicalRegister(3, "c"), 0b111)
Binary(\
Binary.Op.BIT_AND, \
Var(ClassicalRegister(3, 'c'), Uint(3)), \
Value(7, Uint(3)), \
Uint(3))
"""
return _binary_bitwise(Binary.Op.BIT_AND, left, right)
def bit_or(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a bitwise 'or' expression node from the given value, resolving any implicit casts and
lifting the values into :class:`Value` nodes if required.
Examples:
Bitwise 'or' of a classical register and an integer literal::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.bit_or(ClassicalRegister(3, "c"), 0b101)
Binary(\
Binary.Op.BIT_OR, \
Var(ClassicalRegister(3, 'c'), Uint(3)), \
Value(5, Uint(3)), \
Uint(3))
"""
return _binary_bitwise(Binary.Op.BIT_OR, left, right)
def bit_xor(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a bitwise 'exclusive or' expression node from the given value, resolving any implicit
casts and lifting the values into :class:`Value` nodes if required.
Examples:
Bitwise 'exclusive or' of a classical register and an integer literal::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.bit_xor(ClassicalRegister(3, "c"), 0b101)
Binary(\
Binary.Op.BIT_XOR, \
Var(ClassicalRegister(3, 'c'), Uint(3)), \
Value(5, Uint(3)), \
Uint(3))
"""
return _binary_bitwise(Binary.Op.BIT_XOR, left, right)
def _binary_logical(op: Binary.Op, left: typing.Any, right: typing.Any) -> Expr:
bool_ = types.Bool()
left = _coerce_lossless(lift(left), bool_)
right = _coerce_lossless(lift(right), bool_)
return Binary(op, left, right, bool_)
def logic_and(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a logical 'and' expression node from the given value, resolving any implicit casts and
lifting the values into :class:`Value` nodes if required.
Examples:
Logical 'and' of two classical bits::
>>> from qiskit.circuit import Clbit
>>> from qiskit.circuit.classical import expr
>>> expr.logical_and(Clbit(), Clbit())
Binary(Binary.Op.LOGIC_AND, Var(<clbit 0>, Bool()), Var(<clbit 1>, Bool()), Bool())
"""
return _binary_logical(Binary.Op.LOGIC_AND, left, right)
def logic_or(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a logical 'or' expression node from the given value, resolving any implicit casts and
lifting the values into :class:`Value` nodes if required.
Examples:
Logical 'or' of two classical bits
>>> from qiskit.circuit import Clbit
>>> from qiskit.circuit.classical import expr
>>> expr.logical_and(Clbit(), Clbit())
Binary(Binary.Op.LOGIC_OR, Var(<clbit 0>, Bool()), Var(<clbit 1>, Bool()), Bool())
"""
return _binary_logical(Binary.Op.LOGIC_OR, left, right)
def _equal_like(op: Binary.Op, left: typing.Any, right: typing.Any) -> Expr:
left, right = _lift_binary_operands(left, right)
if left.type.kind is not right.type.kind:
raise TypeError(f"invalid types for '{op}': '{left.type}' and '{right.type}'")
type = types.greater(left.type, right.type)
return Binary(op, _coerce_lossless(left, type), _coerce_lossless(right, type), types.Bool())
def equal(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create an 'equal' expression node from the given value, resolving any implicit casts and
lifting the values into :class:`Value` nodes if required.
Examples:
Equality between a classical register and an integer::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.equal(ClassicalRegister(3, "c"), 7)
Binary(Binary.Op.EQUAL, \
Var(ClassicalRegister(3, "c"), Uint(3)), \
Value(7, Uint(3)), \
Uint(3))
"""
return _equal_like(Binary.Op.EQUAL, left, right)
def not_equal(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a 'not equal' expression node from the given value, resolving any implicit casts and
lifting the values into :class:`Value` nodes if required.
Examples:
Inequality between a classical register and an integer::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.not_equal(ClassicalRegister(3, "c"), 7)
Binary(Binary.Op.NOT_EQUAL, \
Var(ClassicalRegister(3, "c"), Uint(3)), \
Value(7, Uint(3)), \
Uint(3))
"""
return _equal_like(Binary.Op.NOT_EQUAL, left, right)
def _binary_relation(op: Binary.Op, left: typing.Any, right: typing.Any) -> Expr:
left, right = _lift_binary_operands(left, right)
if left.type.kind is not right.type.kind or left.type.kind is types.Bool:
raise TypeError(f"invalid types for '{op}': '{left.type}' and '{right.type}'")
type = types.greater(left.type, right.type)
return Binary(op, _coerce_lossless(left, type), _coerce_lossless(right, type), types.Bool())
def less(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a 'less than' expression node from the given value, resolving any implicit casts and
lifting the values into :class:`Value` nodes if required.
Examples:
Query if a classical register is less than an integer::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.less(ClassicalRegister(3, "c"), 5)
Binary(Binary.Op.LESS, \
Var(ClassicalRegister(3, "c"), Uint(3)), \
Value(5, Uint(3)), \
Uint(3))
"""
return _binary_relation(Binary.Op.LESS, left, right)
def less_equal(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a 'less than or equal to' expression node from the given value, resolving any implicit
casts and lifting the values into :class:`Value` nodes if required.
Examples:
Query if a classical register is less than or equal to another::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.less(ClassicalRegister(3, "a"), ClassicalRegister(3, "b"))
Binary(Binary.Op.LESS_EQUAL, \
Var(ClassicalRegister(3, "a"), Uint(3)), \
Var(ClassicalRegister(3, "b"), Uint(3)), \
Uint(3))
"""
return _binary_relation(Binary.Op.LESS_EQUAL, left, right)
def greater(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a 'greater than' expression node from the given value, resolving any implicit casts
and lifting the values into :class:`Value` nodes if required.
Examples:
Query if a classical register is greater than an integer::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.less(ClassicalRegister(3, "c"), 5)
Binary(Binary.Op.GREATER, \
Var(ClassicalRegister(3, "c"), Uint(3)), \
Value(5, Uint(3)), \
Uint(3))
"""
return _binary_relation(Binary.Op.GREATER, left, right)
def greater_equal(left: typing.Any, right: typing.Any, /) -> Expr:
"""Create a 'greater than or equal to' expression node from the given value, resolving any
implicit casts and lifting the values into :class:`Value` nodes if required.
Examples:
Query if a classical register is greater than or equal to another::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.less(ClassicalRegister(3, "a"), ClassicalRegister(3, "b"))
Binary(Binary.Op.GREATER_EQUAL, \
Var(ClassicalRegister(3, "a"), Uint(3)), \
Var(ClassicalRegister(3, "b"), Uint(3)), \
Uint(3))
"""
return _binary_relation(Binary.Op.GREATER_EQUAL, left, right)
def _shift_like(
op: Binary.Op, left: typing.Any, right: typing.Any, type: types.Type | None
) -> Expr:
if type is not None and type.kind is not types.Uint:
raise TypeError(f"type '{type}' is not a valid bitshift operand type")
if isinstance(left, Expr):
left = _coerce_lossless(left, type) if type is not None else left
else:
left = lift(left, type)
right = lift(right)
if left.type.kind != types.Uint or right.type.kind != types.Uint:
raise TypeError(f"invalid types for '{op}': '{left.type}' and '{right.type}'")
return Binary(op, left, right, left.type)
def shift_left(left: typing.Any, right: typing.Any, /, type: types.Type | None = None) -> Expr:
"""Create a 'bitshift left' expression node from the given two values, resolving any implicit
casts and lifting the values into :class:`Value` nodes if required.
If ``type`` is given, the ``left`` operand will be coerced to it (if possible).
Examples:
Shift the value of a standalone variable left by some amount::
>>> from qiskit.circuit.classical import expr, types
>>> a = expr.Var.new("a", types.Uint(8))
>>> expr.shift_left(a, 4)
Binary(Binary.Op.SHIFT_LEFT, \
Var(<UUID>, Uint(8), name='a'), \
Value(4, Uint(3)), \
Uint(8))
Shift an integer literal by a variable amount, coercing the type of the literal::
>>> expr.shift_left(3, a, types.Uint(16))
Binary(Binary.Op.SHIFT_LEFT, \
Value(3, Uint(16)), \
Var(<UUID>, Uint(8), name='a'), \
Uint(16))
"""
return _shift_like(Binary.Op.SHIFT_LEFT, left, right, type)
def shift_right(left: typing.Any, right: typing.Any, /, type: types.Type | None = None) -> Expr:
"""Create a 'bitshift right' expression node from the given values, resolving any implicit casts
and lifting the values into :class:`Value` nodes if required.
If ``type`` is given, the ``left`` operand will be coerced to it (if possible).
Examples:
Shift the value of a classical register right by some amount::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.shift_right(ClassicalRegister(8, "a"), 4)
Binary(Binary.Op.SHIFT_RIGHT, \
Var(ClassicalRegister(8, "a"), Uint(8)), \
Value(4, Uint(3)), \
Uint(8))
"""
return _shift_like(Binary.Op.SHIFT_RIGHT, left, right, type)
def index(target: typing.Any, index: typing.Any, /) -> Expr:
"""Index into the ``target`` with the given integer ``index``, lifting the values into
:class:`Value` nodes if required.
This can be used as the target of a :class:`.Store`, if the ``target`` is itself an lvalue.
Examples:
Index into a classical register with a literal::
>>> from qiskit.circuit import ClassicalRegister
>>> from qiskit.circuit.classical import expr
>>> expr.index(ClassicalRegister(8, "a"), 3)
Index(Var(ClassicalRegister(8, "a"), Uint(8)), Value(3, Uint(2)), Bool())
"""
target, index = lift(target), lift(index)
if target.type.kind is not types.Uint or index.type.kind is not types.Uint:
raise TypeError(f"invalid types for indexing: '{target.type}' and '{index.type}'")
return Index(target, index, types.Bool())
|
qiskit/qiskit/circuit/classical/expr/constructors.py/0
|
{
"file_path": "qiskit/qiskit/circuit/classical/expr/constructors.py",
"repo_id": "qiskit",
"token_count": 8510
}
| 173 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Implementations of boolean logic quantum circuits."""
from __future__ import annotations
from typing import List, Optional
from qiskit.circuit import QuantumRegister, QuantumCircuit, AncillaRegister
from qiskit.circuit.library.standard_gates import MCXGate
class OR(QuantumCircuit):
r"""A circuit implementing the logical OR operation on a number of qubits.
For the OR operation the state :math:`|1\rangle` is interpreted as ``True``. The result
qubit is flipped, if the state of any variable qubit is ``True``. The OR is implemented using
a multi-open-controlled X gate (i.e. flips if the state is :math:`|0\rangle`) and
applying an X gate on the result qubit.
Using a list of flags, qubits can be skipped or negated.
The OR gate without special flags:
.. plot::
from qiskit.circuit.library import OR
from qiskit.visualization.library import _generate_circuit_library_visualization
circuit = OR(5)
_generate_circuit_library_visualization(circuit)
Using flags we can negate qubits or skip them. For instance, if we have 5 qubits and want to
return ``True`` if the first qubit is ``False`` or one of the last two are ``True`` we use the
flags ``[-1, 0, 0, 1, 1]``.
.. plot::
from qiskit.circuit.library import OR
from qiskit.visualization.library import _generate_circuit_library_visualization
circuit = OR(5, flags=[-1, 0, 0, 1, 1])
_generate_circuit_library_visualization(circuit)
"""
def __init__(
self,
num_variable_qubits: int,
flags: Optional[List[int]] = None,
mcx_mode: str = "noancilla",
) -> None:
"""Create a new logical OR circuit.
Args:
num_variable_qubits: The qubits of which the OR is computed. The result will be written
into an additional result qubit.
flags: A list of +1/0/-1 marking negations or omissions of qubits.
mcx_mode: The mode to be used to implement the multi-controlled X gate.
"""
self.num_variable_qubits = num_variable_qubits
self.flags = flags
# add registers
qr_variable = QuantumRegister(num_variable_qubits, name="variable")
qr_result = QuantumRegister(1, name="result")
circuit = QuantumCircuit(qr_variable, qr_result, name="or")
# determine the control qubits: all that have a nonzero flag
flags = flags or [1] * num_variable_qubits
control_qubits = [q for q, flag in zip(qr_variable, flags) if flag != 0]
# determine the qubits that need to be flipped (if a flag is > 0)
flip_qubits = [q for q, flag in zip(qr_variable, flags) if flag > 0]
# determine the number of ancillas
num_ancillas = MCXGate.get_num_ancilla_qubits(len(control_qubits), mode=mcx_mode)
if num_ancillas > 0:
qr_ancilla = AncillaRegister(num_ancillas, "ancilla")
circuit.add_register(qr_ancilla)
else:
qr_ancilla = AncillaRegister(0)
circuit.x(qr_result)
if len(flip_qubits) > 0:
circuit.x(flip_qubits)
circuit.mcx(control_qubits, qr_result[:], qr_ancilla[:], mode=mcx_mode)
if len(flip_qubits) > 0:
circuit.x(flip_qubits)
super().__init__(*circuit.qregs, name="or")
self.compose(circuit.to_gate(), qubits=self.qubits, inplace=True)
|
qiskit/qiskit/circuit/library/boolean_logic/quantum_or.py/0
|
{
"file_path": "qiskit/qiskit/circuit/library/boolean_logic/quantum_or.py",
"repo_id": "qiskit",
"token_count": 1495
}
| 174 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Multiple-Control, Multiple-Target Gate."""
from __future__ import annotations
from collections.abc import Callable
from qiskit import circuit
from qiskit.circuit import ControlledGate, Gate, QuantumRegister, QuantumCircuit
from qiskit.exceptions import QiskitError
# pylint: disable=cyclic-import
from ..standard_gates import XGate, YGate, ZGate, HGate, TGate, TdgGate, SGate, SdgGate
class MCMT(QuantumCircuit):
"""The multi-controlled multi-target gate, for an arbitrary singly controlled target gate.
For example, the H gate controlled on 3 qubits and acting on 2 target qubit is represented as:
.. parsed-literal::
ββββ ββββ
β
ββββ ββββ
β
ββββ ββββ
ββββ΄ββββ
β€0 β
β 2-H β
β€1 β
ββββββββ
This default implementations requires no ancilla qubits, by broadcasting the target gate
to the number of target qubits and using Qiskit's generic control routine to control the
broadcasted target on the control qubits. If ancilla qubits are available, a more efficient
variant using the so-called V-chain decomposition can be used. This is implemented in
:class:`~qiskit.circuit.library.MCMTVChain`.
"""
def __init__(
self,
gate: Gate | Callable[[QuantumCircuit, circuit.Qubit, circuit.Qubit], circuit.Instruction],
num_ctrl_qubits: int,
num_target_qubits: int,
) -> None:
"""Create a new multi-control multi-target gate.
Args:
gate: The gate to be applied controlled on the control qubits and applied to the target
qubits. Can be either a Gate or a circuit method.
If it is a callable, it will be casted to a Gate.
num_ctrl_qubits: The number of control qubits.
num_target_qubits: The number of target qubits.
Raises:
AttributeError: If the gate cannot be casted to a controlled gate.
AttributeError: If the number of controls or targets is 0.
"""
if num_ctrl_qubits == 0 or num_target_qubits == 0:
raise AttributeError("Need at least one control and one target qubit.")
# set the internal properties and determine the number of qubits
self.gate = self._identify_gate(gate)
self.num_ctrl_qubits = num_ctrl_qubits
self.num_target_qubits = num_target_qubits
num_qubits = num_ctrl_qubits + num_target_qubits + self.num_ancilla_qubits
# initialize the circuit object
super().__init__(num_qubits, name="mcmt")
self._label = f"{num_target_qubits}-{self.gate.name.capitalize()}"
# build the circuit
self._build()
def _build(self):
"""Define the MCMT gate without ancillas."""
if self.num_target_qubits == 1:
# no broadcasting needed (makes for better circuit diagrams)
broadcasted_gate = self.gate
else:
broadcasted = QuantumCircuit(self.num_target_qubits, name=self._label)
for target in list(range(self.num_target_qubits)):
broadcasted.append(self.gate, [target], [])
broadcasted_gate = broadcasted.to_gate()
mcmt_gate = broadcasted_gate.control(self.num_ctrl_qubits)
self.append(mcmt_gate, self.qubits, [])
@property
def num_ancilla_qubits(self):
"""Return the number of ancillas."""
return 0
def _identify_gate(self, gate):
"""Case the gate input to a gate."""
valid_gates = {
"ch": HGate(),
"cx": XGate(),
"cy": YGate(),
"cz": ZGate(),
"h": HGate(),
"s": SGate(),
"sdg": SdgGate(),
"x": XGate(),
"y": YGate(),
"z": ZGate(),
"t": TGate(),
"tdg": TdgGate(),
}
if isinstance(gate, ControlledGate):
base_gate = gate.base_gate
elif isinstance(gate, Gate):
if gate.num_qubits != 1:
raise AttributeError("Base gate must act on one qubit only.")
base_gate = gate
elif isinstance(gate, QuantumCircuit):
if gate.num_qubits != 1:
raise AttributeError(
"The circuit you specified as control gate can only have one qubit!"
)
base_gate = gate.to_gate() # raises error if circuit contains non-unitary instructions
else:
if callable(gate): # identify via name of the passed function
name = gate.__name__
elif isinstance(gate, str):
name = gate
else:
raise AttributeError(f"Invalid gate specified: {gate}")
base_gate = valid_gates[name]
return base_gate
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None, annotated=False):
"""Return the controlled version of the MCMT circuit."""
if not annotated and ctrl_state is None:
gate = MCMT(self.gate, self.num_ctrl_qubits + num_ctrl_qubits, self.num_target_qubits)
else:
gate = super().control(num_ctrl_qubits, label, ctrl_state, annotated=annotated)
return gate
def inverse(self, annotated: bool = False):
"""Return the inverse MCMT circuit, which is itself."""
return MCMT(self.gate, self.num_ctrl_qubits, self.num_target_qubits)
class MCMTVChain(MCMT):
"""The MCMT implementation using the CCX V-chain.
This implementation requires ancillas but is decomposed into a much shallower circuit
than the default implementation in :class:`~qiskit.circuit.library.MCMT`.
**Expanded Circuit:**
.. plot::
from qiskit.circuit.library import MCMTVChain, ZGate
from qiskit.visualization.library import _generate_circuit_library_visualization
circuit = MCMTVChain(ZGate(), 2, 2)
_generate_circuit_library_visualization(circuit.decompose())
**Examples:**
>>> from qiskit.circuit.library import HGate
>>> MCMTVChain(HGate(), 3, 2).draw()
q_0: βββ βββββββββββββββββββββββββ ββ
β β
q_1: βββ βββββββββββββββββββββββββ ββ
β β
q_2: βββΌβββββ βββββββββββββββ βββββΌββ
β β βββββ β β
q_3: βββΌβββββΌβββ€ H βββββββββΌβββββΌββ
β β βββ¬βββββββ β β
q_4: βββΌβββββΌβββββΌβββ€ H ββββΌβββββΌββ
βββ΄ββ β β βββ¬ββ β βββ΄ββ
q_5: β€ X ββββ βββββΌβββββΌβββββ βββ€ X β
ββββββββ΄ββ β β βββ΄βββββββ
q_6: ββββββ€ X ββββ βββββ βββ€ X ββββββ
βββββ βββββ
"""
def _build(self):
"""Define the MCMT gate."""
control_qubits = self.qubits[: self.num_ctrl_qubits]
target_qubits = self.qubits[
self.num_ctrl_qubits : self.num_ctrl_qubits + self.num_target_qubits
]
ancilla_qubits = self.qubits[self.num_ctrl_qubits + self.num_target_qubits :]
if len(ancilla_qubits) > 0:
master_control = ancilla_qubits[-1]
else:
master_control = control_qubits[0]
self._ccx_v_chain_rule(control_qubits, ancilla_qubits, reverse=False)
for qubit in target_qubits:
self.append(self.gate.control(), [master_control, qubit], [])
self._ccx_v_chain_rule(control_qubits, ancilla_qubits, reverse=True)
@property
def num_ancilla_qubits(self):
"""Return the number of ancilla qubits required."""
return max(0, self.num_ctrl_qubits - 1)
def _ccx_v_chain_rule(
self,
control_qubits: QuantumRegister | list[circuit.Qubit],
ancilla_qubits: QuantumRegister | list[circuit.Qubit],
reverse: bool = False,
) -> None:
"""Get the rule for the CCX V-chain.
The CCX V-chain progressively computes the CCX of the control qubits and puts the final
result in the last ancillary qubit.
Args:
control_qubits: The control qubits.
ancilla_qubits: The ancilla qubits.
reverse: If True, compute the chain down to the qubit. If False, compute upwards.
Returns:
The rule for the (reversed) CCX V-chain.
Raises:
QiskitError: If an insufficient number of ancilla qubits was provided.
"""
if len(ancilla_qubits) == 0:
return
if len(ancilla_qubits) < len(control_qubits) - 1:
raise QiskitError("Insufficient number of ancilla qubits.")
iterations = list(enumerate(range(2, len(control_qubits))))
if not reverse:
self.ccx(control_qubits[0], control_qubits[1], ancilla_qubits[0])
for i, j in iterations:
self.ccx(control_qubits[j], ancilla_qubits[i], ancilla_qubits[i + 1])
else:
for i, j in reversed(iterations):
self.ccx(control_qubits[j], ancilla_qubits[i], ancilla_qubits[i + 1])
self.ccx(control_qubits[0], control_qubits[1], ancilla_qubits[0])
def inverse(self, annotated: bool = False):
return MCMTVChain(self.gate, self.num_ctrl_qubits, self.num_target_qubits)
|
qiskit/qiskit/circuit/library/generalized_gates/mcmt.py/0
|
{
"file_path": "qiskit/qiskit/circuit/library/generalized_gates/mcmt.py",
"repo_id": "qiskit",
"token_count": 4414
}
| 175 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The EfficientSU2 2-local circuit."""
from __future__ import annotations
import typing
from collections.abc import Callable
from numpy import pi
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library.standard_gates import RYGate, RZGate, CXGate
from .two_local import TwoLocal
if typing.TYPE_CHECKING:
import qiskit # pylint: disable=cyclic-import
class EfficientSU2(TwoLocal):
r"""The hardware efficient SU(2) 2-local circuit.
The ``EfficientSU2`` circuit consists of layers of single qubit operations spanned by SU(2)
and :math:`CX` entanglements. This is a heuristic pattern that can be used to prepare trial wave
functions for variational quantum algorithms or classification circuit for machine learning.
SU(2) stands for special unitary group of degree 2, its elements are :math:`2 \times 2`
unitary matrices with determinant 1, such as the Pauli rotation gates.
On 3 qubits and using the Pauli :math:`Y` and :math:`Z` su2_gates as single qubit gates, the
hardware efficient SU(2) circuit is represented by:
.. parsed-literal::
ββββββββββββββββββββββββ β β β ββββββββββββββββββββββββββ
β€ RY(ΞΈ[0]) ββ€ RZ(ΞΈ[3]) ββββββββββββ βββββ ... ββββ€ RY(ΞΈ[12]) ββ€ RZ(ΞΈ[15]) β
ββββββββββββ€ββββββββββββ€ β βββ΄ββ β β βββββββββββββ€βββββββββββββ€
β€ RY(ΞΈ[1]) ββ€ RZ(ΞΈ[4]) βββββββ βββ€ X ββββ ... ββββ€ RY(ΞΈ[13]) ββ€ RZ(ΞΈ[16]) β
ββββββββββββ€ββββββββββββ€ β βββ΄βββββββ β β βββββββββββββ€βββββββββββββ€
β€ RY(ΞΈ[2]) ββ€ RZ(ΞΈ[5]) βββββ€ X βββββββββ ... ββββ€ RY(ΞΈ[14]) ββ€ RZ(ΞΈ[17]) β
ββββββββββββββββββββββββ β βββββ β β ββββββββββββββββββββββββββ
See :class:`~qiskit.circuit.library.RealAmplitudes` for more detail on the possible arguments
and options such as skipping unentanglement qubits, which apply here too.
Examples:
>>> circuit = EfficientSU2(3, reps=1)
>>> print(circuit)
ββββββββββββββββββββββββ ββββββββββββββββββββββββ
q_0: β€ RY(ΞΈ[0]) ββ€ RZ(ΞΈ[3]) ββββ βββββ βββ€ RY(ΞΈ[6]) ββ€ RZ(ΞΈ[9]) ββββββββββββββ
ββββββββββββ€ββββββββββββ€βββ΄ββ β ββββββββββββββββββββββββ€βββββββββββββ
q_1: β€ RY(ΞΈ[1]) ββ€ RZ(ΞΈ[4]) ββ€ X ββββΌββββββββ βββββββ€ RY(ΞΈ[7]) ββ€ RZ(ΞΈ[10]) β
ββββββββββββ€ββββββββββββ€ββββββββ΄ββ βββ΄ββ ββββββββββββ€βββββββββββββ€
q_2: β€ RY(ΞΈ[2]) ββ€ RZ(ΞΈ[5]) βββββββ€ X βββββ€ X ββββββ€ RY(ΞΈ[8]) ββ€ RZ(ΞΈ[11]) β
ββββββββββββββββββββββββ βββββ βββββ βββββββββββββββββββββββββ
>>> ansatz = EfficientSU2(4, su2_gates=['rx', 'y'], entanglement='circular', reps=1)
>>> qc = QuantumCircuit(4) # create a circuit and append the RY variational form
>>> qc.compose(ansatz, inplace=True)
>>> qc.draw()
ββββββββββββββββββββββ ββββββββββββ βββββ
q_0: β€ RX(ΞΈ[0]) ββ€ Y ββ€ X ββββ βββ€ RX(ΞΈ[4]) βββββ€ Y ββββββββββββββββββββββ
ββββββββββββ€βββββ€βββ¬βββββ΄ββββββββββββββββββ΄ββββ΄ββββ βββββ
q_1: β€ RX(ΞΈ[1]) ββ€ Y ββββΌβββ€ X βββββββ βββββββ€ RX(ΞΈ[5]) βββββ€ Y ββββββββββ
ββββββββββββ€βββββ€ β βββββ βββ΄ββ ββββββββββββββββ΄ββββ΄βββββββββ
q_2: β€ RX(ΞΈ[2]) ββ€ Y ββββΌβββββββββββ€ X βββββββββββ βββββββ€ RX(ΞΈ[6]) ββ€ Y β
ββββββββββββ€βββββ€ β βββββ βββ΄ββ ββββββββββββ€βββββ€
q_3: β€ RX(ΞΈ[3]) ββ€ Y ββββ βββββββββββββββββββββββ€ X ββββββ€ RX(ΞΈ[7]) ββ€ Y β
βββββββββββββββββ βββββ βββββββββββββββββ
"""
def __init__(
self,
num_qubits: int | None = None,
su2_gates: (
str
| type
| qiskit.circuit.Instruction
| QuantumCircuit
| list[str | type | qiskit.circuit.Instruction | QuantumCircuit]
| None
) = None,
entanglement: str | list[list[int]] | Callable[[int], list[int]] = "reverse_linear",
reps: int = 3,
skip_unentangled_qubits: bool = False,
skip_final_rotation_layer: bool = False,
parameter_prefix: str = "ΞΈ",
insert_barriers: bool = False,
initial_state: QuantumCircuit | None = None,
name: str = "EfficientSU2",
flatten: bool | None = None,
) -> None:
"""
Args:
num_qubits: The number of qubits of the EfficientSU2 circuit.
reps: Specifies how often the structure of a rotation layer followed by an entanglement
layer is repeated.
su2_gates: The SU(2) single qubit gates to apply in single qubit gate layers.
If only one gate is provided, the same gate is applied to each qubit.
If a list of gates is provided, all gates are applied to each qubit in the provided
order.
entanglement: Specifies the entanglement structure. Can be a string
('full', 'linear', 'reverse_linear', 'pairwise', 'circular', or 'sca'),
a list of integer-pairs specifying the indices of qubits entangled with one another,
or a callable returning such a list provided with the index of the entanglement layer.
Defaults to 'reverse_linear' entanglement.
Note that 'reverse_linear' entanglement provides the same unitary as 'full'
with fewer entangling gates.
See the Examples section of :class:`~qiskit.circuit.library.TwoLocal` for more
detail.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
skip_unentangled_qubits: If True, the single qubit gates are only applied to qubits
that are entangled with another qubit. If False, the single qubit gates are applied
to each qubit in the Ansatz. Defaults to False.
skip_final_rotation_layer: If False, a rotation layer is added at the end of the
ansatz. If True, no rotation layer is added.
parameter_prefix: The parameterized gates require a parameter to be defined, for which
we use :class:`~qiskit.circuit.ParameterVector`.
insert_barriers: If True, barriers are inserted in between each layer. If False,
no barriers are inserted.
flatten: Set this to ``True`` to output a flat circuit instead of nesting it inside multiple
layers of gate objects. By default currently the contents of
the output circuit will be wrapped in nested objects for
cleaner visualization. However, if you're using this circuit
for anything besides visualization its **strongly** recommended
to set this flag to ``True`` to avoid a large performance
overhead for parameter binding.
"""
if su2_gates is None:
su2_gates = [RYGate, RZGate]
super().__init__(
num_qubits=num_qubits,
rotation_blocks=su2_gates,
entanglement_blocks=CXGate,
entanglement=entanglement,
reps=reps,
skip_unentangled_qubits=skip_unentangled_qubits,
skip_final_rotation_layer=skip_final_rotation_layer,
parameter_prefix=parameter_prefix,
insert_barriers=insert_barriers,
initial_state=initial_state,
name=name,
flatten=flatten,
)
@property
def parameter_bounds(self) -> list[tuple[float, float]]:
"""Return the parameter bounds.
Returns:
The parameter bounds.
"""
return self.num_parameters * [(-pi, pi)]
|
qiskit/qiskit/circuit/library/n_local/efficient_su2.py/0
|
{
"file_path": "qiskit/qiskit/circuit/library/n_local/efficient_su2.py",
"repo_id": "qiskit",
"token_count": 3862
}
| 176 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Standard gates."""
from __future__ import annotations
from math import pi
from qiskit.circuit import (
EquivalenceLibrary,
Parameter,
QuantumCircuit,
QuantumRegister,
Gate,
Qubit,
Clbit,
)
from . import (
HGate,
CHGate,
PhaseGate,
CPhaseGate,
RGate,
RCCXGate,
RXGate,
CRXGate,
RXXGate,
RYGate,
CRYGate,
RZGate,
CRZGate,
RZZGate,
RZXGate,
SGate,
SdgGate,
CSGate,
CSdgGate,
SwapGate,
CSwapGate,
iSwapGate,
SXGate,
SXdgGate,
CSXGate,
DCXGate,
TGate,
TdgGate,
UGate,
CUGate,
U1Gate,
CU1Gate,
U2Gate,
U3Gate,
CU3Gate,
XGate,
CXGate,
CCXGate,
YGate,
CYGate,
RYYGate,
ECRGate,
ZGate,
CZGate,
IGate,
CCZGate,
XXPlusYYGate,
XXMinusYYGate,
)
_sel = StandardEquivalenceLibrary = EquivalenceLibrary()
def _cnot_rxx_decompose(plus_ry: bool = True, plus_rxx: bool = True):
"""Decomposition of CNOT gate.
NOTE: this differs to CNOT by a global phase.
The matrix returned is given by exp(1j * pi/4) * CNOT
Args:
plus_ry (bool): positive initial RY rotation
plus_rxx (bool): positive RXX rotation.
Returns:
QuantumCircuit: The decomposed circuit for CNOT gate (up to
global phase).
"""
# Convert boolean args to +/- 1 signs
if plus_ry:
sgn_ry = 1
else:
sgn_ry = -1
if plus_rxx:
sgn_rxx = 1
else:
sgn_rxx = -1
circuit = QuantumCircuit(2, global_phase=-sgn_ry * sgn_rxx * pi / 4)
circuit.append(RYGate(sgn_ry * pi / 2), [0])
circuit.append(RXXGate(sgn_rxx * pi / 2), [0, 1])
circuit.append(RXGate(-sgn_rxx * pi / 2), [0])
circuit.append(RXGate(-sgn_rxx * sgn_ry * pi / 2), [1])
circuit.append(RYGate(-sgn_ry * pi / 2), [0])
return circuit
# Import existing gate definitions
# HGate
#
# βββββ βββββββββββ
# q: β€ H β β‘ q: β€ U2(0,Ο) β
# βββββ βββββββββββ
q = QuantumRegister(1, "q")
def_h = QuantumCircuit(q)
def_h.append(U2Gate(0, pi), [q[0]], [])
_sel.add_equivalence(HGate(), def_h)
# CHGate
#
# q_0: βββ ββ q_0: ββββββββββββββββββ βββββββββββββββββββββ
# βββ΄ββ β‘ ββββββββββββββββββ΄βββββββββββββββββββββ
# q_1: β€ H β q_1: β€ S ββ€ H ββ€ T ββ€ X ββ€ Tdg ββ€ H ββ€ Sdg β
# βββββ βββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
def_ch = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[1]], []),
(HGate(), [q[1]], []),
(TGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(TdgGate(), [q[1]], []),
(HGate(), [q[1]], []),
(SdgGate(), [q[1]], []),
]:
def_ch.append(inst, qargs, cargs)
_sel.add_equivalence(CHGate(), def_ch)
# PhaseGate
#
# ββββββββ βββββββββ
# q: β€ P(Ο΄) β β‘ q: β€ U1(Ο΄) β
# ββββββββ βββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
phase_to_u1 = QuantumCircuit(q)
phase_to_u1.append(U1Gate(theta), [0])
_sel.add_equivalence(PhaseGate(theta), phase_to_u1)
q = QuantumRegister(1, "q")
theta = Parameter("theta")
phase_to_u = QuantumCircuit(q)
phase_to_u.u(0, 0, theta, 0)
_sel.add_equivalence(PhaseGate(theta), phase_to_u)
# CPhaseGate
# ββββββββββ
# q_0: ββ ββββ q_0: β€ P(Ο΄/2) ββββ ββββββββββββββββ ββββββββββββ
# βP(Ο΄) β‘ βββββββββββββ΄ββββββββββββββββ΄ββββββββββββ
# q_1: ββ ββββ q_1: βββββββββββ€ X ββ€ P(-Ο΄/2) ββ€ X ββ€ P(Ο΄/2) β
# βββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_cphase = QuantumCircuit(q)
def_cphase.p(theta / 2, 0)
def_cphase.cx(0, 1)
def_cphase.p(-theta / 2, 1)
def_cphase.cx(0, 1)
def_cphase.p(theta / 2, 1)
_sel.add_equivalence(CPhaseGate(theta), def_cphase)
# CPhaseGate
#
# q_0: ββ ββββ q_0: ββ ββββ
# βP(Ο΄) β‘ βU1(Ο΄)
# q_1: ββ ββββ q_1: ββ ββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
cphase_to_cu1 = QuantumCircuit(q)
cphase_to_cu1.append(CU1Gate(theta), [0, 1])
_sel.add_equivalence(CPhaseGate(theta), cphase_to_cu1)
# RGate
#
# ββββββββββ βββββββββββββββββββββββββ
# q: β€ R(Ο΄,Ο) β β‘ q: β€ U3(Ο΄,Ο - Ο/2,Ο/2 - Ο) β
# ββββββββββ βββββββββββββββββββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
phi = Parameter("phi")
def_r = QuantumCircuit(q)
def_r.append(U3Gate(theta, phi - pi / 2, -phi + pi / 2), [q[0]])
_sel.add_equivalence(RGate(theta, phi), def_r)
# IGate
q = QuantumRegister(1, "q")
def_id = QuantumCircuit(q)
def_id.append(UGate(0, 0, 0), [q[0]])
_sel.add_equivalence(IGate(), def_id)
q = QuantumRegister(1, "q")
def_id_rx = QuantumCircuit(q)
def_id_rx.append(RXGate(0), [q[0]])
_sel.add_equivalence(IGate(), def_id_rx)
q = QuantumRegister(1, "q")
def_id_ry = QuantumCircuit(q)
def_id_ry.append(RYGate(0), [q[0]])
_sel.add_equivalence(IGate(), def_id_ry)
q = QuantumRegister(1, "q")
def_id_rz = QuantumCircuit(q)
def_id_rz.append(RZGate(0), [q[0]])
_sel.add_equivalence(IGate(), def_id_rz)
# RCCXGate
#
# βββββββββ
# q_0: β€0 β q_0: βββββββββββββββββββββββββ ββββββββββββββββββββββββ
# β β β
# q_1: β€1 Rccx β β‘ q_1: βββββββββββββ ββββββββββββΌββββββββββ ββββββββββββββ
# β β βββββββββββββ΄ββββββββββββ΄ββββββββββ΄ββββββββββββββ
# q_2: β€2 β q_2: β€ H ββ€ T ββ€ X ββ€ Tdg ββ€ X ββ€ T ββ€ X ββ€ Tdg ββ€ H β
# βββββββββ βββββββββββββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(3, "q")
def_rccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(HGate(), [q[2]], []),
]:
def_rccx.append(inst, qargs, cargs)
_sel.add_equivalence(RCCXGate(), def_rccx)
# RXGate
#
# βββββββββ ββββββββββ
# q: β€ Rx(Ο΄) β β‘ q: β€ R(Ο΄,0) β
# βββββββββ ββββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
def_rx = QuantumCircuit(q)
def_rx.append(RGate(theta, 0), [q[0]], [])
_sel.add_equivalence(RXGate(theta), def_rx)
# CRXGate
#
# q_0: βββββ ββββ q_0: ββββββββββββββ βββββββββββββββββββββ ββββββββββββββββββββ
# βββββ΄ββββ β‘ ββββββββββββββ΄βββββββββββββββββββββ΄ββββββββββββββββββββ
# q_1: β€ Rx(Ο΄) β q_1: β€ U1(Ο/2) ββ€ X ββ€ U3(-Ο΄/2,0,0) ββ€ X ββ€ U3(Ο΄/2,-Ο/2,0) β
# βββββββββ βββββββββββββββββββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_crx = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(theta / 2, -pi / 2, 0), [q[1]], []),
]:
def_crx.append(inst, qargs, cargs)
_sel.add_equivalence(CRXGate(theta), def_crx)
# CRXGate
#
# q_0: βββββ ββββ q_0: ββββββββ βββββββββββββββββ ββββββββββββββββββββ
# βββββ΄ββββ β‘ ββββββββ΄βββββββββββββββββ΄ββββββββββββββββββββ
# q_1: β€ Rx(Ο΄) β q_1: β€ S ββ€ X ββ€ Ry(-Ο΄/2) ββ€ X ββ€ Ry(Ο΄/2) ββ€ Sdg β
# βββββββββ βββββββββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
crx_to_srycx = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RYGate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RYGate(theta / 2), [q[1]], []),
(SdgGate(), [q[1]], []),
]:
crx_to_srycx.append(inst, qargs, cargs)
_sel.add_equivalence(CRXGate(theta), crx_to_srycx)
# CRX in terms of one RXX
# βββββ βββββββββββββββββββ
# q_0: βββββ ββββ q_0: ββββ€ H βββββ€0 ββ€ H β
# βββββ΄ββββ β‘ ββββ΄ββββ΄ββββ Rxx(-Ο΄/2) ββββββ
# q_1: β€ Rx(Ο΄) β q_1: β€ Rx(Ο΄/2) ββ€1 ββββββ
# βββββββββ βββββββββββββββββββββββββ
theta = Parameter("theta")
crx_to_rxx = QuantumCircuit(2)
crx_to_rxx.h(0)
crx_to_rxx.rx(theta / 2, 1)
crx_to_rxx.rxx(-theta / 2, 0, 1)
crx_to_rxx.h(0)
_sel.add_equivalence(CRXGate(theta), crx_to_rxx)
# CRX to CRZ
#
# q_0: βββββ ββββ q_0: ββββββββββ βββββββββ
# βββββ΄ββββ β‘ ββββββββββ΄βββββββββ
# q_1: β€ Rx(Ο΄) β q_1: β€ H ββ€ Rz(Ο΄) ββ€ H β
# βββββββββ βββββββββββββββββββ
theta = Parameter("theta")
crx_to_crz = QuantumCircuit(2)
crx_to_crz.h(1)
crx_to_crz.crz(theta, 0, 1)
crx_to_crz.h(1)
_sel.add_equivalence(CRXGate(theta), crx_to_crz)
# RXXGate
#
# βββββββββββ βββββ βββββ
# q_0: β€0 β q_0: β€ H ββββ ββββββββββββββ βββ€ H β
# β Rxx(Ο΄) β β‘ βββββ€βββ΄ββββββββββββββ΄βββββββ€
# q_1: β€1 β q_1: β€ H ββ€ X ββ€ Rz(Ο΄) ββ€ X ββ€ H β
# βββββββββββ βββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_rxx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(HGate(), [q[0]], []),
]:
def_rxx.append(inst, qargs, cargs)
_sel.add_equivalence(RXXGate(theta), def_rxx)
# RXX to RZX
# βββββββββββ βββββββββββββββββββββ
# q_0: β€0 β q_0: β€ H ββ€0 ββ€ H β
# β Rxx(Ο΄) β β‘ ββββββ Rzx(Ο΄) ββββββ
# q_1: β€1 β q_1: ββββββ€1 ββββββ
# βββββββββββ βββββββββββ
theta = Parameter("theta")
rxx_to_rzx = QuantumCircuit(2)
rxx_to_rzx.h(0)
rxx_to_rzx.rzx(theta, 0, 1)
rxx_to_rzx.h(0)
_sel.add_equivalence(RXXGate(theta), rxx_to_rzx)
# RXX to RZZ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
rxx_to_rzz = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(RZZGate(theta), [q[0], q[1]], []),
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
]:
rxx_to_rzz.append(inst, qargs, cargs)
_sel.add_equivalence(RXXGate(theta), rxx_to_rzz)
# RZXGate
#
# βββββββββββ
# q_0: β€0 β q_0: ββββββββ ββββββββββββββ βββββββ
# β Rzx(Ο΄) β β‘ ββββββββ΄ββββββββββββββ΄βββββββ
# q_1: β€1 β q_1: β€ H ββ€ X ββ€ Rz(Ο΄) ββ€ X ββ€ H β
# βββββββββββ βββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_rzx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
]:
def_rzx.append(inst, qargs, cargs)
_sel.add_equivalence(RZXGate(theta), def_rzx)
# RYGate
#
# βββββββββ ββββββββββββ
# q: β€ Ry(Ο΄) β β‘ q: β€ R(Ο΄,Ο/2) β
# βββββββββ ββββββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
def_ry = QuantumCircuit(q)
def_ry.append(RGate(theta, pi / 2), [q[0]], [])
_sel.add_equivalence(RYGate(theta), def_ry)
q = QuantumRegister(1, "q")
ry_to_rx = QuantumCircuit(q)
ry_to_rx.sdg(0)
ry_to_rx.rx(theta, 0)
ry_to_rx.s(0)
_sel.add_equivalence(RYGate(theta), ry_to_rx)
# CRYGate
#
# q_0: βββββ ββββ q_0: ββββββββββββββ βββββββββββββββββ ββ
# βββββ΄ββββ β‘ ββββββββββββββ΄βββββββββββββββββ΄ββ
# q_1: β€ Ry(Ο΄) β q_1: β€ Ry(Ο΄/2) ββ€ X ββ€ Ry(-Ο΄/2) ββ€ X β
# βββββββββ βββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_cry = QuantumCircuit(q)
for inst, qargs, cargs in [
(RYGate(theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RYGate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
]:
def_cry.append(inst, qargs, cargs)
_sel.add_equivalence(CRYGate(theta), def_cry)
# CRY to CRZ
#
# q_0: βββββ ββββ q_0: ββββββββββββββββ ββββββββββββββββ
# βββββ΄ββββ β‘ ββββββββββββββββ΄ββββββββββββββββ
# q_1: β€ Ry(Ο΄) β q_1: β€ Rx(Ο/2) ββ€ Rz(Ο΄) ββ€ Rx(-Ο/2) β
# βββββββββ ββββββββββββββββββββββββββββββββ
theta = Parameter("theta")
cry_to_crz = QuantumCircuit(2)
cry_to_crz.rx(pi / 2, 1)
cry_to_crz.crz(theta, 0, 1)
cry_to_crz.rx(-pi / 2, 1)
_sel.add_equivalence(CRYGate(theta), cry_to_crz)
# CRY to CRZ
#
# q_0: βββββ ββββ q_0: βββββββββββββββββββββ βββββββββββββββββββββ
# βββββ΄ββββ β‘ βββββββββββββββββββββ΄βββββββββββββββββββββ
# q_1: β€ Ry(Ο΄) β q_1: β€ H ββ€ Rz(Ο/2) ββ€ Rx(Ο΄) ββ€ Rz(-Ο/2) ββ€ H β
# βββββββββ ββββββββββββββββββββββββββββββββββββββββββ
theta = Parameter("theta")
cry_to_crx = QuantumCircuit(2)
cry_to_crx.h(1)
cry_to_crx.rz(pi / 2, 1)
cry_to_crx.crx(theta, 0, 1)
cry_to_crx.rz(-pi / 2, 1)
cry_to_crx.h(1)
_sel.add_equivalence(CRYGate(theta), cry_to_crx)
# CRY to RZZ
#
# q_0: βββββ ββββ q_0: βββββββββββββββββββββββββ βββββββββββββββββββ
# βββββ΄ββββ β‘ βββββββββββββββββββββββ βZZ(-Ο΄/2) ββββββββββ
# q_1: β€ Ry(Ο΄) β q_1: β€ Sdg ββ€ Rx(Ο΄/2) ββ€ H βββ ββββββββββ€ H ββ€ S β
# βββββββββ βββββββββββββββββββββββ ββββββββββ
cry_to_rzz = QuantumCircuit(2)
cry_to_rzz.sdg(1)
cry_to_rzz.rx(theta / 2, 1)
cry_to_rzz.h(1)
cry_to_rzz.rzz(-theta / 2, 0, 1)
cry_to_rzz.h(1)
cry_to_rzz.s(1)
_sel.add_equivalence(CRYGate(theta), cry_to_rzz)
# RYYGate
#
# βββββββββββ βββββββββββ ββββββββββββ
# q_0: β€0 β q_0: β€ Rx(Ο/2) ββββ ββββββββββββββ βββ€ Rx(-Ο/2) β
# β Ryy(Ο΄) β β‘ βββββββββββ€βββ΄ββββββββββββββ΄ββββββββββββββ€
# q_1: β€1 β q_1: β€ Rx(Ο/2) ββ€ X ββ€ Rz(Ο΄) ββ€ X ββ€ Rx(-Ο/2) β
# βββββββββββ ββββββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_ryy = QuantumCircuit(q)
for inst, qargs, cargs in [
(RXGate(pi / 2), [q[0]], []),
(RXGate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RXGate(-pi / 2), [q[0]], []),
(RXGate(-pi / 2), [q[1]], []),
]:
def_ryy.append(inst, qargs, cargs)
_sel.add_equivalence(RYYGate(theta), def_ryy)
# RYY to RZZ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
ryy_to_rzz = QuantumCircuit(q)
for inst, qargs, cargs in [
(RXGate(pi / 2), [q[0]], []),
(RXGate(pi / 2), [q[1]], []),
(RZZGate(theta), [q[0], q[1]], []),
(RXGate(-pi / 2), [q[0]], []),
(RXGate(-pi / 2), [q[1]], []),
]:
ryy_to_rzz.append(inst, qargs, cargs)
_sel.add_equivalence(RYYGate(theta), ryy_to_rzz)
# RYY to RXX
q = QuantumRegister(2, "q")
theta = Parameter("theta")
ryy_to_rxx = QuantumCircuit(q)
for inst, qargs, cargs in [
(SdgGate(), [q[0]], []),
(SdgGate(), [q[1]], []),
(RXXGate(theta), [q[0], q[1]], []),
(SGate(), [q[0]], []),
(SGate(), [q[1]], []),
]:
ryy_to_rxx.append(inst, qargs, cargs)
_sel.add_equivalence(RYYGate(theta), ryy_to_rxx)
# RZGate
# global phase: -Ο΄/2
# βββββββββ βββββββββ
# q: β€ Rz(Ο΄) β β‘ q: β€ U1(Ο΄) β
# βββββββββ βββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
def_rz = QuantumCircuit(q, global_phase=-theta / 2)
def_rz.append(U1Gate(theta), [q[0]], [])
_sel.add_equivalence(RZGate(theta), def_rz)
# RZGate
#
# βββββββββ ββββββββββββββββββββββββ
# q: β€ Rz(Ο΄) β β‘ q: β€ βX ββ€ Ry(-Ο΄) ββ€ βXdg β
# βββββββββ ββββββββββββββββββββββββ
q = QuantumRegister(1, "q")
rz_to_sxry = QuantumCircuit(q)
rz_to_sxry.sx(0)
rz_to_sxry.ry(-theta, 0)
rz_to_sxry.sxdg(0)
_sel.add_equivalence(RZGate(theta), rz_to_sxry)
q = QuantumRegister(1, "q")
rz_to_rx = QuantumCircuit(q)
rz_to_rx.h(0)
rz_to_rx.rx(theta, 0)
rz_to_rx.h(0)
_sel.add_equivalence(RZGate(theta), rz_to_rx)
# CRZGate
#
# q_0: βββββ ββββ q_0: ββββββββββββββ βββββββββββββββββ ββ
# βββββ΄ββββ β‘ ββββββββββββββ΄βββββββββββββββββ΄ββ
# q_1: β€ Rz(Ο΄) β q_1: β€ Rz(Ο΄/2) ββ€ X ββ€ Rz(-Ο΄/2) ββ€ X β
# βββββββββ βββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_crz = QuantumCircuit(q)
for inst, qargs, cargs in [
(RZGate(theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
]:
def_crz.append(inst, qargs, cargs)
_sel.add_equivalence(CRZGate(theta), def_crz)
# CRZ to CRY
#
# q_0: βββββ ββββ q_0: βββββββββββββββββ βββββββββββββββ
# βββββ΄ββββ β‘ βββββββββββββββββ΄βββββββββββββββ
# q_1: β€ Rz(Ο΄) β q_1: β€ Rx(-Ο/2) ββ€ Ry(Ο΄) ββ€ Rx(Ο/2) β
# βββββββββ ββββββββββββββββββββββββββββββββ
theta = Parameter("theta")
crz_to_cry = QuantumCircuit(2)
crz_to_cry.rx(-pi / 2, 1)
crz_to_cry.cry(theta, 0, 1)
crz_to_cry.rx(pi / 2, 1)
_sel.add_equivalence(CRZGate(theta), crz_to_cry)
# CRZ to CRX
#
# q_0: βββββ ββββ q_0: ββββββββββ βββββββββ
# βββββ΄ββββ β‘ ββββββββββ΄βββββββββ
# q_1: β€ Rz(Ο΄) β q_1: β€ H ββ€ Rx(Ο΄) ββ€ H β
# βββββββββ βββββββββββββββββββ
theta = Parameter("theta")
crz_to_crx = QuantumCircuit(2)
crz_to_crx.h(1)
crz_to_crx.crx(theta, 0, 1)
crz_to_crx.h(1)
_sel.add_equivalence(CRZGate(theta), crz_to_crx)
# CRZ to RZZ
#
# q_0: βββββ ββββ q_0: βββββββββββββ ββββββββ
# βββββ΄ββββ β‘ βββββββββββ βZZ(-Ο΄/2)
# q_1: β€ Rz(Ο΄) β q_1: β€ Rz(Ο΄/2) βββ ββββββββ
# βββββββββ βββββββββββ
theta = Parameter("theta")
crz_to_rzz = QuantumCircuit(2)
crz_to_rzz.rz(theta / 2, 1)
crz_to_rzz.rzz(-theta / 2, 0, 1)
_sel.add_equivalence(CRZGate(theta), crz_to_rzz)
# RZZGate
#
# q_0: ββ βββββ q_0: βββ ββββββββββββββ ββ
# βZZ(Ο΄) β‘ βββ΄ββββββββββββββ΄ββ
# q_1: ββ βββββ q_1: β€ X ββ€ Rz(Ο΄) ββ€ X β
# βββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_rzz = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
]:
def_rzz.append(inst, qargs, cargs)
_sel.add_equivalence(RZZGate(theta), def_rzz)
# RZZ to RXX
q = QuantumRegister(2, "q")
theta = Parameter("theta")
rzz_to_rxx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(RXXGate(theta), [q[0], q[1]], []),
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
]:
rzz_to_rxx.append(inst, qargs, cargs)
_sel.add_equivalence(RZZGate(theta), rzz_to_rxx)
# RZZ to RZX
# βββββββββββ
# q_0: ββ βββββ q_0: ββββββ€0 ββββββ
# βZZ(Ο΄) β‘ ββββββ Rzx(Ο΄) ββββββ
# q_1: ββ βββββ q_1: β€ H ββ€1 ββ€ H β
# βββββββββββββββββββββ
theta = Parameter("theta")
rzz_to_rzx = QuantumCircuit(2)
rzz_to_rzx.h(1)
rzz_to_rzx.rzx(theta, 0, 1)
rzz_to_rzx.h(1)
_sel.add_equivalence(RZZGate(theta), rzz_to_rzx)
# RZZ to RYY
q = QuantumRegister(2, "q")
theta = Parameter("theta")
rzz_to_ryy = QuantumCircuit(q)
for inst, qargs, cargs in [
(RXGate(-pi / 2), [q[0]], []),
(RXGate(-pi / 2), [q[1]], []),
(RYYGate(theta), [q[0], q[1]], []),
(RXGate(pi / 2), [q[0]], []),
(RXGate(pi / 2), [q[1]], []),
]:
rzz_to_ryy.append(inst, qargs, cargs)
_sel.add_equivalence(RZZGate(theta), rzz_to_ryy)
# RZXGate
#
# βββββββββββ
# q_0: β€0 β q_0: ββββββββ ββββββββββββββ βββββββ
# β Rzx(Ο΄) β β‘ ββββββββ΄ββββββββββββββ΄βββββββ
# q_1: β€1 β q_1: β€ H ββ€ X ββ€ Rz(Ο΄) ββ€ X ββ€ H β
# βββββββββββ βββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_rzx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
]:
def_rzx.append(inst, qargs, cargs)
_sel.add_equivalence(RZXGate(theta), def_rzx)
# ECRGate
#
# ββββββββ ββββββββββββββββββββββββββββββββ
# q_0: β€0 β q_0: β€0 ββ€ X ββ€0 β
# β Ecr β β‘ β Rzx(Ο/4) βββββββ Rzx(-Ο/4) β
# q_1: β€1 β q_1: β€1 βββββββ€1 β
# ββββββββ βββββββββββββ ββββββββββββββ
q = QuantumRegister(2, "q")
def_ecr = QuantumCircuit(q)
for inst, qargs, cargs in [
(RZXGate(pi / 4), [q[0], q[1]], []),
(XGate(), [q[0]], []),
(RZXGate(-pi / 4), [q[0], q[1]], []),
]:
def_ecr.append(inst, qargs, cargs)
_sel.add_equivalence(ECRGate(), def_ecr)
# ECRGate decomposed to Clifford gates (up to a global phase)
#
# global phase: 7Ο/4
# ββββββββ βββββ βββββ
# q_0: β€0 β q_0: β€ S βββββ βββ€ X β
# β Ecr β β‘ βββββ΄ββββ΄βββββββ
# q_1: β€1 β q_1: β€ βX ββ€ X ββββββ
# ββββββββ βββββββββββ
q = QuantumRegister(2, "q")
def_ecr_cliff = QuantumCircuit(q, global_phase=-pi / 4)
for inst, qargs, cargs in [
(SGate(), [q[0]], []),
(SXGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(XGate(), [q[0]], []),
]:
def_ecr_cliff.append(inst, qargs, cargs)
_sel.add_equivalence(ECRGate(), def_ecr_cliff)
# CXGate decomposed using an ECRGate and Clifford 1-qubit gates
# global phase: Ο/4
# q_0: βββ ββ βββββββ βββββββββββββ
# βββ΄ββ β‘ q_0: β€ Sdg βββ€0 ββ€ X β
# q_1: β€ X β βββββββ΄ββ Ecr ββββββ
# βββββ q_1: β€ βXdg ββ€1 ββββββ
# ββββββββββββββββ
q = QuantumRegister(2, "q")
def_ecr_to_cx_cliff = QuantumCircuit(q, global_phase=pi / 4)
for inst, qargs, cargs in [
(SdgGate(), [q[0]], []),
(SXdgGate(), [q[1]], []),
(ECRGate(), [q[0], q[1]], []),
(XGate(), [q[0]], []),
]:
def_ecr_to_cx_cliff.append(inst, qargs, cargs)
_sel.add_equivalence(CXGate(), def_ecr_to_cx_cliff)
# SGate
#
# βββββ βββββββββββ
# q: β€ S β β‘ q: β€ U1(Ο/2) β
# βββββ βββββββββββ
q = QuantumRegister(1, "q")
def_s = QuantumCircuit(q)
def_s.append(U1Gate(pi / 2), [q[0]], [])
_sel.add_equivalence(SGate(), def_s)
# SdgGate
#
# βββββββ ββββββββββββ
# q: β€ Sdg β β‘ q: β€ U1(-Ο/2) β
# βββββββ ββββββββββββ
q = QuantumRegister(1, "q")
def_sdg = QuantumCircuit(q)
def_sdg.append(U1Gate(-pi / 2), [q[0]], [])
_sel.add_equivalence(SdgGate(), def_sdg)
# CSGate
#
# q_0: βββ ββ q_0: ββββββββ ββββββββ
# βββ΄ββ ββββββββ΄ββββββββ
# q_1: β€ S β = q_1: β€ H ββ€ Sx ββ€ H β
# βββββ ββββββββββββββββ
q = QuantumRegister(2, "q")
def_cs = QuantumCircuit(q)
def_cs.append(HGate(), [q[1]], [])
def_cs.append(CSXGate(), [q[0], q[1]], [])
def_cs.append(HGate(), [q[1]], [])
_sel.add_equivalence(CSGate(), def_cs)
# CSdgGate
#
# q_0: ββββ βββ q_0: ββββββββ βββββ ββββββββ
# ββββ΄βββ ββββββββ΄βββββ΄ββββββββ
# q_1: β€ Sdg β = q_1: β€ H ββ€ X ββ€ Sx ββ€ H β
# βββββββ βββββββββββββββββββββ
q = QuantumRegister(2, "q")
def_csdg = QuantumCircuit(q)
def_csdg.append(HGate(), [q[1]], [])
def_csdg.append(CXGate(), [q[0], q[1]], [])
def_csdg.append(CSXGate(), [q[0], q[1]], [])
def_csdg.append(HGate(), [q[1]], [])
_sel.add_equivalence(CSdgGate(), def_csdg)
# SdgGate
#
# βββββββ ββββββββββ
# q: β€ Sdg β β‘ q: β€ S ββ€ Z β
# βββββββ ββββββββββ
q = QuantumRegister(1, "q")
def_sdg = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[0]], []),
(ZGate(), [q[0]], []),
]:
def_sdg.append(inst, qargs, cargs)
_sel.add_equivalence(SdgGate(), def_sdg)
# SdgGate
#
# βββββββ ββββββββββ
# q: β€ Sdg β β‘ q: β€ Z ββ€ S β
# βββββββ ββββββββββ
q = QuantumRegister(1, "q")
def_sdg = QuantumCircuit(q)
for inst, qargs, cargs in [
(ZGate(), [q[0]], []),
(SGate(), [q[0]], []),
]:
def_sdg.append(inst, qargs, cargs)
_sel.add_equivalence(SdgGate(), def_sdg)
# SdgGate
#
# βββββββ βββββββββββββββ
# q: β€ Sdg β β‘ q: β€ S ββ€ S ββ€ S β
# βββββββ βββββββββββββββ
q = QuantumRegister(1, "q")
def_sdg = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
]:
def_sdg.append(inst, qargs, cargs)
_sel.add_equivalence(SdgGate(), def_sdg)
# SwapGate
# βββββ
# q_0: βXβ q_0: βββ βββ€ X ββββ ββ
# β β‘ βββ΄βββββ¬βββββ΄ββ
# q_1: βXβ q_1: β€ X ββββ βββ€ X β
# βββββ βββββ
q = QuantumRegister(2, "q")
def_swap = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(CXGate(), [q[0], q[1]], []),
]:
def_swap.append(inst, qargs, cargs)
_sel.add_equivalence(SwapGate(), def_swap)
# SwapGate
#
# q_0: βXβ
# β β‘
# q_1: βXβ
#
# ββββββββββββββββββββ ββββββ ββββββββββββββββββββββββββββ
# q_0: β€ Rz(-Ο/2) ββ€0 βββββ€ βX βββββ€1 ββ€ Rz(-Ο/2) ββ€0 β
# ββββ¬βββββ¬ββββ Ecr βββββ΄βββββ΄ββββ Ecr βββββ¬βββββ¬ββββ Ecr β
# q_1: ββββ€ βX βββββ€1 ββ€ Rz(-Ο/2) ββ€0 βββββ€ βX βββββ€1 β
# ββββββ ββββββββββββββββββββββββββββ ββββββ ββββββββ
#
q = QuantumRegister(2, "q")
def_swap_ecr = QuantumCircuit(q)
def_swap_ecr.rz(-pi / 2, 0)
def_swap_ecr.sx(1)
def_swap_ecr.ecr(0, 1)
def_swap_ecr.rz(-pi / 2, 1)
def_swap_ecr.sx(0)
def_swap_ecr.ecr(1, 0)
def_swap_ecr.rz(-pi / 2, 0)
def_swap_ecr.sx(1)
def_swap_ecr.ecr(0, 1)
_sel.add_equivalence(SwapGate(), def_swap_ecr)
# SwapGate
#
# q_0: βXβ
# β β‘
# q_1: βXβ
#
# global phase: 3Ο/2
# ββββββ ββββββ ββββββ
# q_0: β€ βX βββ ββ€ βX βββ ββ€ βX βββ β
# ββββββ€ β ββββββ€ β ββββββ€ β
# q_1: β€ βX βββ ββ€ βX βββ ββ€ βX βββ β
# ββββββ ββββββ ββββββ
q = QuantumRegister(2, "q")
def_swap_cz = QuantumCircuit(q, global_phase=-pi / 2)
def_swap_cz.sx(0)
def_swap_cz.sx(1)
def_swap_cz.cz(0, 1)
def_swap_cz.sx(0)
def_swap_cz.sx(1)
def_swap_cz.cz(0, 1)
def_swap_cz.sx(0)
def_swap_cz.sx(1)
def_swap_cz.cz(0, 1)
_sel.add_equivalence(SwapGate(), def_swap_cz)
# iSwapGate
#
# ββββββββββ ββββββββββ βββββ
# q_0: β€0 β q_0: β€ S ββ€ H ββββ βββ€ X ββββββ
# β Iswap β β‘ βββββ€ββββββββ΄βββββ¬βββββββ
# q_1: β€1 β q_1: β€ S βββββββ€ X ββββ βββ€ H β
# ββββββββββ βββββ βββββ βββββ
q = QuantumRegister(2, "q")
def_iswap = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[0]], []),
(SGate(), [q[1]], []),
(HGate(), [q[0]], []),
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(HGate(), [q[1]], []),
]:
def_iswap.append(inst, qargs, cargs)
_sel.add_equivalence(iSwapGate(), def_iswap)
# SXGate
# global phase: Ο/4
# ββββββ βββββββββββββββββββ
# q: β€ βX β β‘ q: β€ Sdg ββ€ H ββ€ Sdg β
# ββββββ βββββββββββββββββββ
q = QuantumRegister(1, "q")
def_sx = QuantumCircuit(q, global_phase=pi / 4)
for inst, qargs, cargs in [(SdgGate(), [q[0]], []), (HGate(), [q[0]], []), (SdgGate(), [q[0]], [])]:
def_sx.append(inst, qargs, cargs)
_sel.add_equivalence(SXGate(), def_sx)
# HGate decomposed into SXGate and SGate
# global phase: -Ο/4
# βββββ ββββββββββββββββ
# q: β€ H β β‘ q: β€ S ββ€ βX ββ€ S β
# βββββ ββββββββββββββββ
q = QuantumRegister(1, "q")
def_h_to_sx = QuantumCircuit(q, global_phase=-pi / 4)
for inst, qargs, cargs in [(SGate(), [q[0]], []), (SXGate(), [q[0]], []), (SGate(), [q[0]], [])]:
def_h_to_sx.append(inst, qargs, cargs)
_sel.add_equivalence(HGate(), def_h_to_sx)
# SXGate
# global phase: Ο/4
# ββββββ βββββββββββ
# q: β€ βX β β‘ q: β€ Rx(Ο/2) β
# ββββββ βββββββββββ
q = QuantumRegister(1, "q")
sx_to_rx = QuantumCircuit(q, global_phase=pi / 4)
sx_to_rx.rx(pi / 2, 0)
_sel.add_equivalence(SXGate(), sx_to_rx)
# SXdgGate
# global phase: 7Ο/4
# ββββββββ βββββββββββββββ
# q: β€ βXdg β β‘ q: β€ S ββ€ H ββ€ S β
# ββββββββ βββββββββββββββ
q = QuantumRegister(1, "q")
def_sxdg = QuantumCircuit(q, global_phase=-pi / 4)
for inst, qargs, cargs in [(SGate(), [q[0]], []), (HGate(), [q[0]], []), (SGate(), [q[0]], [])]:
def_sxdg.append(inst, qargs, cargs)
_sel.add_equivalence(SXdgGate(), def_sxdg)
# HGate decomposed into SXdgGate and SdgGate
# global phase: Ο/4
# βββββ ββββββββββββββββββββββ
# q: β€ H β β‘ q: β€ Sdg ββ€ βXdg ββ€ Sdg β
# βββββ ββββββββββββββββββββββ
q = QuantumRegister(1, "q")
def_h_to_sxdg = QuantumCircuit(q, global_phase=pi / 4)
for inst, qargs, cargs in [
(SdgGate(), [q[0]], []),
(SXdgGate(), [q[0]], []),
(SdgGate(), [q[0]], []),
]:
def_h_to_sxdg.append(inst, qargs, cargs)
_sel.add_equivalence(HGate(), def_h_to_sxdg)
# SXdgGate
# global phase: 7Ο/4
# ββββββββ ββββββββββββ
# q: β€ βXdg β β‘ q: β€ Rx(-Ο/2) β
# ββββββββ ββββββββββββ
q = QuantumRegister(1, "q")
sxdg_to_rx = QuantumCircuit(q, global_phase=-pi / 4)
sxdg_to_rx.rx(-pi / 2, 0)
_sel.add_equivalence(SXdgGate(), sxdg_to_rx)
# CSXGate
#
# q_0: βββ βββ q_0: βββββββ βββββββββββββ
# βββ΄βββ β‘ βββββ βU1(Ο/2) βββββ
# q_1: β€ Sx β q_1: β€ H βββ βββββββββ€ H β
# ββββββ βββββ βββββ
q = QuantumRegister(2, "q")
def_csx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[1]], []),
(CU1Gate(pi / 2), [q[0], q[1]], []),
(HGate(), [q[1]], []),
]:
def_csx.append(inst, qargs, cargs)
_sel.add_equivalence(CSXGate(), def_csx)
# CSXGate
# global phase: Ο/8
# ββββββββββββββββββ βββββββ βββββ
# q_0: βββ βββ q_0: β€ X ββ€0 βββ€ Tdg ββββ€ X β
# βββ΄βββ β‘ ββββββ Rzx(Ο/4) βββ΄ββββββ΄βββββββ
# q_1: β€ Sx β q_1: ββββββ€1 ββ€ sx^0.5 ββββββ
# ββββββ βββββββββββββββββββββββ
q = QuantumRegister(2, "q")
csx_to_zx45 = QuantumCircuit(q, global_phase=pi / 4)
for inst, qargs, cargs in [
(XGate(), [q[0]], []),
(RZXGate(pi / 4), [q[0], q[1]], []),
(TdgGate(), [q[0]], []),
(XGate(), [q[0]], []),
(RXGate(pi / 4), [q[1]], []),
]:
csx_to_zx45.append(inst, qargs, cargs)
_sel.add_equivalence(CSXGate(), csx_to_zx45)
# DCXGate
#
# ββββββββ βββββ
# q_0: β€0 β q_0: βββ βββ€ X β
# β Dcx β β‘ βββ΄βββββ¬ββ
# q_1: β€1 β q_1: β€ X ββββ ββ
# ββββββββ βββββ
q = QuantumRegister(2, "q")
def_dcx = QuantumCircuit(q)
for inst, qargs, cargs in [(CXGate(), [q[0], q[1]], []), (CXGate(), [q[1], q[0]], [])]:
def_dcx.append(inst, qargs, cargs)
_sel.add_equivalence(DCXGate(), def_dcx)
# DCXGate
#
# ββββββββ βββββ βββββββββββββββββ
# q_0: β€0 β q_0: ββ€ H βββ€ Sdg ββ€0 ββββββ
# β Dcx β β‘ ββ΄ββββ΄βββββββββ Iswap ββββββ
# q_1: β€1 β q_1: β€ Sdg βββββββββ€1 ββ€ H β
# ββββββββ βββββββ βββββββββββββββ
q = QuantumRegister(2, "q")
dcx_to_iswap = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(SdgGate(), [q[0]], []),
(SdgGate(), [q[1]], []),
(iSwapGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
]:
dcx_to_iswap.append(inst, qargs, cargs)
_sel.add_equivalence(DCXGate(), dcx_to_iswap)
# CSwapGate
#
# q_0: ββ β q_0: ββββββββ βββββββ
# β βββββ β βββββ
# q_1: βXβ β‘ q_1: β€ X ββββ βββ€ X β
# β βββ¬βββββ΄βββββ¬ββ
# q_2: βXβ q_2: βββ βββ€ X ββββ ββ
# βββββ
q = QuantumRegister(3, "q")
def_cswap = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[2], q[1]], []),
(CCXGate(), [q[0], q[1], q[2]], []),
(CXGate(), [q[2], q[1]], []),
]:
def_cswap.append(inst, qargs, cargs)
_sel.add_equivalence(CSwapGate(), def_cswap)
# TGate
#
# βββββ βββββββββββ
# q: β€ T β β‘ q: β€ U1(Ο/4) β
# βββββ βββββββββββ
q = QuantumRegister(1, "q")
def_t = QuantumCircuit(q)
def_t.append(U1Gate(pi / 4), [q[0]], [])
_sel.add_equivalence(TGate(), def_t)
# TdgGate
#
# βββββββ ββββββββββββ
# q: β€ Tdg β β‘ q: β€ U1(-Ο/4) β
# βββββββ ββββββββββββ
q = QuantumRegister(1, "q")
def_tdg = QuantumCircuit(q)
def_tdg.append(U1Gate(-pi / 4), [q[0]], [])
_sel.add_equivalence(TdgGate(), def_tdg)
# UGate
#
# ββββββββββββ βββββββββββββ
# q: β€ U(ΞΈ,Ο,Ξ») β β‘ q: β€ U3(ΞΈ,Ο,Ξ») β
# ββββββββββββ βββββββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
phi = Parameter("phi")
lam = Parameter("lam")
u_to_u3 = QuantumCircuit(q)
u_to_u3.append(U3Gate(theta, phi, lam), [0])
_sel.add_equivalence(UGate(theta, phi, lam), u_to_u3)
# CUGate
# ββββββββ ββββββββββββββββ Β»
# q_0: βββββββ βββββββ q_0: βββββ€ P(Ξ³) ββββββ€ P(Ξ»/2 + Ο/2) ββββ ββΒ»
# βββββββ΄βββββββ β‘ βββββ΄βββββββ΄βββββββββββββββββββββββ΄ββΒ»
# q_1: β€ U(ΞΈ,Ο,Ξ»,Ξ³) β q_1: β€ P(Ξ»/2 - Ο/2) ββββββββββββββββββ€ X βΒ»
# ββββββββββββββ ββββββββββββββββ βββββΒ»
# Β«
# Β«q_0: βββββββββββββββββββββββββββ ββββββββββββββββ
# Β« βββββββββββββββββββββββββββ΄ββββββββββββββββ
# Β«q_1: β€ U(-ΞΈ/2,Ο,-Ξ»/2 - Ο/2) ββ€ X ββ€ U(ΞΈ/2,Ο,0) β
# Β« βββββββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
phi = Parameter("phi")
lam = Parameter("lam")
gamma = Parameter("gamma")
def_cu = QuantumCircuit(q)
def_cu.p(gamma, 0)
def_cu.p((lam + phi) / 2, 0)
def_cu.p((lam - phi) / 2, 1)
def_cu.cx(0, 1)
def_cu.u(-theta / 2, 0, -(phi + lam) / 2, 1)
def_cu.cx(0, 1)
def_cu.u(theta / 2, phi, 0, 1)
_sel.add_equivalence(CUGate(theta, phi, lam, gamma), def_cu)
# CUGate
# ββββββββ
# q_0: βββββββ βββββββ q_0: β€ P(Ξ³) ββββββββ ββββββ
# βββββββ΄βββββββ β‘ βββββββββββββββ΄ββββββ
# q_1: β€ U(ΞΈ,Ο,Ξ»,Ξ³) β q_1: βββββββββ€ U3(ΞΈ,Ο,Ξ») β
# ββββββββββββββ βββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
phi = Parameter("phi")
lam = Parameter("lam")
gamma = Parameter("gamma")
cu_to_cu3 = QuantumCircuit(q)
cu_to_cu3.p(gamma, 0)
cu_to_cu3.append(CU3Gate(theta, phi, lam), [0, 1])
_sel.add_equivalence(CUGate(theta, phi, lam, gamma), cu_to_cu3)
# U1Gate
#
# βββββββββ βββββββββββββ
# q: β€ U1(ΞΈ) β β‘ q: β€ U3(0,0,ΞΈ) β
# βββββββββ βββββββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
def_u1 = QuantumCircuit(q)
def_u1.append(U3Gate(0, 0, theta), [q[0]], [])
_sel.add_equivalence(U1Gate(theta), def_u1)
# U1Gate
#
# βββββββββ ββββββββ
# q: β€ U1(ΞΈ) β β‘ q: β€ P(0) β
# βββββββββ ββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
u1_to_phase = QuantumCircuit(q)
u1_to_phase.p(theta, 0)
_sel.add_equivalence(U1Gate(theta), u1_to_phase)
# U1Gate
# global phase: ΞΈ/2
# βββββββββ βββββββββ
# q: β€ U1(ΞΈ) β β‘ q: β€ Rz(ΞΈ) β
# βββββββββ βββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
u1_to_rz = QuantumCircuit(q, global_phase=theta / 2)
u1_to_rz.append(RZGate(theta), [q[0]], [])
_sel.add_equivalence(U1Gate(theta), u1_to_rz)
# CU1Gate
# βββββββββββ
# q_0: ββ βββββ q_0: β€ U1(ΞΈ/2) ββββ βββββββββββββββββ βββββββββββββ
# βU1(ΞΈ) β‘ ββββββββββββββ΄βββββββββββββββββ΄βββββββββββββ
# q_1: ββ βββββ q_1: ββββββββββββ€ X ββ€ U1(-ΞΈ/2) ββ€ X ββ€ U1(ΞΈ/2) β
# βββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
def_cu1 = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(theta / 2), [q[0]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta / 2), [q[1]], []),
]:
def_cu1.append(inst, qargs, cargs)
_sel.add_equivalence(CU1Gate(theta), def_cu1)
# U2Gate
#
# βββββββββββ βββββββββββββββ
# q: β€ U2(Ο,Ξ») β β‘ q: β€ U3(Ο/2,Ο,Ξ») β
# βββββββββββ βββββββββββββββ
q = QuantumRegister(1, "q")
phi = Parameter("phi")
lam = Parameter("lam")
def_u2 = QuantumCircuit(q)
def_u2.append(U3Gate(pi / 2, phi, lam), [q[0]], [])
_sel.add_equivalence(U2Gate(phi, lam), def_u2)
# U2Gate
# global phase: 7Ο/4
# βββββββββββ ββββββββββββββββββββββββββββββββββββ
# q: β€ U2(Ο,Ξ») β β‘ q: β€ U1(Ξ» - Ο/2) ββ€ βX ββ€ U1(Ο + Ο/2) β
# βββββββββββ ββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(1, "q")
phi = Parameter("phi")
lam = Parameter("lam")
u2_to_u1sx = QuantumCircuit(q, global_phase=-pi / 4)
u2_to_u1sx.append(U1Gate(lam - pi / 2), [0])
u2_to_u1sx.sx(0)
u2_to_u1sx.append(U1Gate(phi + pi / 2), [0])
_sel.add_equivalence(U2Gate(phi, lam), u2_to_u1sx)
# U3Gate
# global phase: Ξ»/2 + Ο/2 - Ο/2
# βββββββββββββ ββββββββββββββββββββββββββββββββββββββββββββββββ
# q: β€ U3(ΞΈ,Ο,Ξ») β β‘ q: β€ Rz(Ξ») ββ€ βX ββ€ Rz(ΞΈ + Ο) ββ€ βX ββ€ Rz(Ο + 3Ο) β
# βββββββββββββ ββββββββββββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
phi = Parameter("phi")
lam = Parameter("lam")
u3_qasm_def = QuantumCircuit(q, global_phase=(lam + phi - pi) / 2)
u3_qasm_def.rz(lam, 0)
u3_qasm_def.sx(0)
u3_qasm_def.rz(theta + pi, 0)
u3_qasm_def.sx(0)
u3_qasm_def.rz(phi + 3 * pi, 0)
_sel.add_equivalence(U3Gate(theta, phi, lam), u3_qasm_def)
# U3Gate
#
# βββββββββββββ ββββββββββββ
# q: β€ U3(ΞΈ,Ο,Ξ») β β‘ q: β€ U(ΞΈ,Ο,Ξ») β
# βββββββββββββ ββββββββββββ
q = QuantumRegister(1, "q")
theta = Parameter("theta")
phi = Parameter("phi")
lam = Parameter("lam")
u3_to_u = QuantumCircuit(q)
u3_to_u.u(theta, phi, lam, 0)
_sel.add_equivalence(U3Gate(theta, phi, lam), u3_to_u)
# CU3Gate
# βββββββββββββββββ Β»
# q_0: βββββββ ββββββ q_0: β€ U1(Ξ»/2 + Ο/2) ββββ ββββββββββββββββββββββββββββββ ββΒ»
# βββββββ΄ββββββ β‘ βββββββββββββββββ€βββ΄ββββββββββββββββββββββββββββββ΄ββΒ»
# q_1: β€ U3(ΞΈ,Ο,Ξ») β q_1: β€ U1(Ξ»/2 - Ο/2) ββ€ X ββ€ U3(-ΞΈ/2,0,-Ξ»/2 - Ο/2) ββ€ X βΒ»
# βββββββββββββ ββββββββββββββββββββββββββββββββββββββββββββββββββββΒ»
# Β«
# Β«q_0: βββββββββββββββ
# Β« βββββββββββββββ
# Β«q_1: β€ U3(ΞΈ/2,Ο,0) β
# Β« βββββββββββββββ
q = QuantumRegister(2, "q")
theta = Parameter("theta")
phi = Parameter("phi")
lam = Parameter("lam")
def_cu3 = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate((lam + phi) / 2), [q[0]], []),
(U1Gate((lam - phi) / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, -(phi + lam) / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(theta / 2, phi, 0), [q[1]], []),
]:
def_cu3.append(inst, qargs, cargs)
_sel.add_equivalence(CU3Gate(theta, phi, lam), def_cu3)
q = QuantumRegister(2, "q")
theta = Parameter("theta")
phi = Parameter("phi")
lam = Parameter("lam")
cu3_to_cu = QuantumCircuit(q)
cu3_to_cu.cu(theta, phi, lam, 0, 0, 1)
_sel.add_equivalence(CU3Gate(theta, phi, lam), cu3_to_cu)
# XGate
#
# βββββ βββββββββββββ
# q: β€ X β β‘ q: β€ U3(Ο,0,Ο) β
# βββββ βββββββββββββ
q = QuantumRegister(1, "q")
def_x = QuantumCircuit(q)
def_x.append(U3Gate(pi, 0, pi), [q[0]], [])
_sel.add_equivalence(XGate(), def_x)
# XGate
#
# βββββ ββββββββββββββββββββ
# q: β€ X β β‘ q: β€ H ββ€ S ββ€ S ββ€ H β
# βββββ ββββββββββββββββββββ
q = QuantumRegister(1, "q")
def_x = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
(HGate(), [q[0]], []),
]:
def_x.append(inst, qargs, cargs)
_sel.add_equivalence(XGate(), def_x)
# XGate
# global phase: Ο/2
# βββββ ββββββββββ
# q: β€ X β β‘ q: β€ Y ββ€ Z β
# βββββ ββββββββββ
def_x = QuantumCircuit(1, global_phase=pi / 2)
def_x.y(0)
def_x.z(0)
_sel.add_equivalence(XGate(), def_x)
# CXGate
for pos_ry in [False, True]:
for pos_rxx in [False, True]:
cx_to_rxx = _cnot_rxx_decompose(pos_ry, pos_rxx)
_sel.add_equivalence(CXGate(), cx_to_rxx)
# CXGate
#
# q_0: βββ ββ q_0: βββββββ ββββββ
# βββ΄ββ β‘ βββββ β βββββ
# q_1: β€ X β q_1: β€ H βββ ββ€ H β
# βββββ βββββ βββββ
q = QuantumRegister(2, "q")
cx_to_cz = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[1]], []),
(CZGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
]:
cx_to_cz.append(inst, qargs, cargs)
_sel.add_equivalence(CXGate(), cx_to_cz)
# CXGate
# global phase: 3Ο/4
# βββββ βββββββββββββββ ββββββββββββββββββββ
# q_0: βββ ββ q_0: β€ H βββββββ€0 ββ€ X βββββββ€0 ββ€ H ββ€ S ββββββ
# βββ΄ββ β‘ βββββ€ββββββ Iswap ββββββ€ββββββ Iswap ββββββ€βββββ€βββββ
# q_1: β€ X β q_1: β€ X ββ€ H ββ€1 ββ€ X ββ€ H ββ€1 ββ€ S ββ€ X ββ€ H β
# βββββ βββββββββββββββββββββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
cx_to_iswap = QuantumCircuit(q, global_phase=3 * pi / 4)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(XGate(), [q[1]], []),
(HGate(), [q[1]], []),
(iSwapGate(), [q[0], q[1]], []),
(XGate(), [q[0]], []),
(XGate(), [q[1]], []),
(HGate(), [q[1]], []),
(iSwapGate(), [q[0], q[1]], []),
(HGate(), [q[0]], []),
(SGate(), [q[0]], []),
(SGate(), [q[1]], []),
(XGate(), [q[1]], []),
(HGate(), [q[1]], []),
]:
cx_to_iswap.append(inst, qargs, cargs)
_sel.add_equivalence(CXGate(), cx_to_iswap)
# CXGate
# global phase: 7Ο/4
# βββββββββββββββββββββββββββββ
# q_0: βββ ββ q_0: β€ Rz(-Ο/2) ββ€ Ry(Ο) ββ€0 β
# βββ΄ββ β‘ βββββββββββ¬βββββββββββ Ecr β
# q_1: β€ X β q_1: β€ Rx(Ο/2) ββββββββββββ€1 β
# βββββ βββββββββββ ββββββββ
q = QuantumRegister(2, "q")
cx_to_ecr = QuantumCircuit(q, global_phase=-pi / 4)
for inst, qargs, cargs in [
(RZGate(-pi / 2), [q[0]], []),
(RYGate(pi), [q[0]], []),
(RXGate(pi / 2), [q[1]], []),
(ECRGate(), [q[0], q[1]], []),
]:
cx_to_ecr.append(inst, qargs, cargs)
_sel.add_equivalence(CXGate(), cx_to_ecr)
# CXGate
# q_0: βββ ββ q_0: ββββββββββββββββ βββββββββββββββββββ
# βββ΄ββ β‘ ββββββββββββββ βP(Ο) ββββββββββββββ
# q_1: β€ X β q_1: β€ U(Ο/2,0,Ο) βββ ββββββ€ U(Ο/2,0,Ο) β
# βββββ ββββββββββββββ ββββββββββββββ
q = QuantumRegister(2, "q")
cx_to_cp = QuantumCircuit(q)
for inst, qargs, cargs in [
(UGate(pi / 2, 0, pi), [q[1]], []),
(CPhaseGate(pi), [q[0], q[1]], []),
(UGate(pi / 2, 0, pi), [q[1]], []),
]:
cx_to_cp.append(inst, qargs, cargs)
_sel.add_equivalence(CXGate(), cx_to_cp)
# CXGate
# ββββββββββββββ
# q_0: βββ ββ q_0: β€ U(0,0,Ο/2) ββββββ ββββββββββββββββββ
# βββ΄ββ β‘ ββββββββββββββ€βββββ΄ββββββββββββββββββ
# q_1: β€ X β q_1: β€ U(Ο/2,0,Ο) ββ€ Rz(Ο) ββ€ U(Ο/2,0,Ο) β
# βββββ βββββββββββββββββββββββββββββββββββββ
q = QuantumRegister(2, "q")
cx_to_crz = QuantumCircuit(q)
for inst, qargs, cargs in [
(UGate(pi / 2, 0, pi), [q[1]], []),
(UGate(0, 0, pi / 2), [q[0]], []),
(CRZGate(pi), [q[0], q[1]], []),
(UGate(pi / 2, 0, pi), [q[1]], []),
]:
cx_to_crz.append(inst, qargs, cargs)
_sel.add_equivalence(CXGate(), cx_to_crz)
# CXGate
# global phase: Ο/4
# ββββββββββββββββββββ
# q_0: βββ ββ q_0: β€0 ββ€ Sdg ββ
# βββ΄ββ β‘ β Rzx(Ο/2) ββββββββ΄β
# q_1: β€ X β q_1: β€1 ββ€ βXdg β
# βββββ βββββββββββββββββββββ
q = QuantumRegister(2, "q")
cx_to_zx90 = QuantumCircuit(q, global_phase=pi / 4)
for inst, qargs, cargs in [
(RZXGate(pi / 2), [q[0], q[1]], []),
(SdgGate(), [q[0]], []),
(SXdgGate(), [q[1]], []),
]:
cx_to_zx90.append(inst, qargs, cargs)
_sel.add_equivalence(CXGate(), cx_to_zx90)
# CCXGate
# βββββ
# q_0: βββ ββ q_0: ββββββββββββββββββββ ββββββββββββββββββββββ βββββ ββββ€ T βββββ ββ
# β β βββββ β βββ΄ββββ΄ββββ΄ββββ΄ββ
# q_1: βββ ββ β‘ q_1: ββββββββ ββββββββββββΌββββββββββ ββββ€ T βββββΌβββ€ X ββ€ Tdg ββ€ X β
# βββ΄ββ ββββββββ΄ββββββββββββ΄ββββββββββ΄ββββ΄ββββ΄ββββ΄βββββββ€ββ¬ββββ¬ββββββ
# q_2: β€ X β q_2: β€ H ββ€ X ββ€ Tdg ββ€ X ββ€ T ββ€ X ββ€ Tdg ββ€ X ββ€ T βββ€ H βββββββ
# βββββ βββββββββββββββββββββββββββββββββββββββββββββββββ βββββ
q = QuantumRegister(3, "q")
def_ccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[1]], []),
(TGate(), [q[2]], []),
(HGate(), [q[2]], []),
(CXGate(), [q[0], q[1]], []),
(TGate(), [q[0]], []),
(TdgGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
]:
def_ccx.append(inst, qargs, cargs)
_sel.add_equivalence(CCXGate(), def_ccx)
# CCXGate
#
# q_0: βββ ββ q_0: βββββββββ ββββββββββββββββββ βββββ βββ
# β βββ΄βββββββββ βββ΄ββ β
# q_1: βββ ββ β‘ q_1: βββ ββββ€ X ββ€ Sdg ββββ ββββ€ X ββββΌβββ
# βββ΄ββ βββ΄ββββββββ€ββββββββββ΄ββββββββ€βββ΄βββ
# q_2: β€ X β q_2: β€ Sx ββ€ Z βββββββββ€ Sx ββ€ Z ββ€ Sx β
# βββββ βββββββββββ βββββββββββββββββ
q = QuantumRegister(3, "q")
ccx_to_cx_csx = QuantumCircuit(q)
for inst, qargs, cargs in [
(CSXGate(), [q[1], q[2]], []),
(CXGate(), [q[0], q[1]], []),
(ZGate(), [q[2]], []),
(SdgGate(), [q[1]], []),
(CSXGate(), [q[1], q[2]], []),
(ZGate(), [q[2]], []),
(CXGate(), [q[0], q[1]], []),
(CSXGate(), [q[0], q[2]], []),
]:
ccx_to_cx_csx.append(inst, qargs, cargs)
_sel.add_equivalence(CCXGate(), ccx_to_cx_csx)
# YGate
#
# βββββ βββββββββββββββββ
# q: β€ Y β β‘ q: β€ U3(Ο,Ο/2,Ο/2) β
# βββββ βββββββββββββββββ
q = QuantumRegister(1, "q")
def_y = QuantumCircuit(q)
def_y.append(U3Gate(pi, pi / 2, pi / 2), [q[0]], [])
_sel.add_equivalence(YGate(), def_y)
# YGate
# global phase: 3Ο/2
# βββββ ββββββββββββββββββββββββββββββ
# q: β€ Y β β‘ q: β€ H ββ€ S ββ€ S ββ€ H ββ€ S ββ€ S β
# βββββ ββββββββββββββββββββββββββββββ
q = QuantumRegister(1, "q")
def_y = QuantumCircuit(q)
def_y.global_phase = 3 * pi / 2
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
(HGate(), [q[0]], []),
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
]:
def_y.append(inst, qargs, cargs)
_sel.add_equivalence(YGate(), def_y)
# YGate
# global phase: Ο/2
# βββββ ββββββββββββββββββββββββββββββ
# q: β€ Y β β‘ q: β€ S ββ€ S ββ€ H ββ€ S ββ€ S ββ€ H β
# βββββ ββββββββββββββββββββββββββββββ
q = QuantumRegister(1, "q")
def_y = QuantumCircuit(q)
def_y.global_phase = pi / 2
for inst, qargs, cargs in [
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
(HGate(), [q[0]], []),
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
(HGate(), [q[0]], []),
]:
def_y.append(inst, qargs, cargs)
_sel.add_equivalence(YGate(), def_y)
# YGate
# global phase: Ο/2
# βββββ ββββββββββ
# q: β€ Y β β‘ q: β€ Z ββ€ X β
# βββββ ββββββββββ
def_y = QuantumCircuit(1, global_phase=pi / 2)
def_y.z(0)
def_y.x(0)
_sel.add_equivalence(YGate(), def_y)
# CYGate
#
# q_0: βββ ββ q_0: ββββββββββ βββββββ
# βββ΄ββ β‘ ββββββββββ΄βββββββ
# q_1: β€ Y β q_1: β€ Sdg ββ€ X ββ€ S β
# βββββ βββββββββββββββββ
q = QuantumRegister(2, "q")
def_cy = QuantumCircuit(q)
for inst, qargs, cargs in [
(SdgGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(SGate(), [q[1]], []),
]:
def_cy.append(inst, qargs, cargs)
_sel.add_equivalence(CYGate(), def_cy)
# ZGate
#
# βββββ βββββββββ
# q: β€ Z β β‘ q: β€ U1(Ο) β
# βββββ βββββββββ
q = QuantumRegister(1, "q")
def_z = QuantumCircuit(q)
def_z.append(U1Gate(pi), [q[0]], [])
_sel.add_equivalence(ZGate(), def_z)
# ZGate
#
# βββββ ββββββββββ
# q: β€ Z β β‘ q: β€ S ββ€ S β
# βββββ ββββββββββ
q = QuantumRegister(1, "q")
def_z = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[0]], []),
(SGate(), [q[0]], []),
]:
def_z.append(inst, qargs, cargs)
_sel.add_equivalence(ZGate(), def_z)
# ZGate
# global phase: Ο/2
# βββββ ββββββββββ
# q: β€ Z β β‘ q: β€ X ββ€ Y β
# βββββ ββββββββββ
def_z = QuantumCircuit(1, global_phase=pi / 2)
def_z.x(0)
def_z.y(0)
_sel.add_equivalence(ZGate(), def_z)
# CZGate
#
# q_0: ββ β q_0: ββββββββ βββββββ
# β β‘ ββββββββ΄βββββββ
# q_1: ββ β q_1: β€ H ββ€ X ββ€ H β
# βββββββββββββββ
q = QuantumRegister(2, "q")
def_cz = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
]:
def_cz.append(inst, qargs, cargs)
_sel.add_equivalence(CZGate(), def_cz)
# CCZGate
#
# q_0: ββ β q_0: ββββββββ βββββββ
# β β
# q_1: ββ β = q_1: ββββββββ βββββββ
# β ββββββββ΄βββββββ
# q_2: ββ β q_2: β€ H ββ€ X ββ€ H β
# βββββββββββββββ
q = QuantumRegister(3, "q")
def_ccz = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[2]], []),
(CCXGate(), [q[0], q[1], q[2]], []),
(HGate(), [q[2]], []),
]:
def_ccz.append(inst, qargs, cargs)
_sel.add_equivalence(CCZGate(), def_ccz)
# XGate
# global phase: Ο/2
# βββββ βββββββββ
# q: β€ X β β‘ q: β€ Rx(Ο) β
# βββββ βββββββββ
q = QuantumRegister(1, "q")
x_to_rx = QuantumCircuit(q)
x_to_rx.append(RXGate(theta=pi), [q[0]])
x_to_rx.global_phase = pi / 2
_sel.add_equivalence(XGate(), x_to_rx)
# YGate
# global phase: Ο/2
# βββββ βββββββββ
# q: β€ Y β β‘ q: β€ Ry(Ο) β
# βββββ βββββββββ
q = QuantumRegister(1, "q")
y_to_ry = QuantumCircuit(q)
y_to_ry.append(RYGate(theta=pi), [q[0]])
y_to_ry.global_phase = pi / 2
_sel.add_equivalence(YGate(), y_to_ry)
# HGate
# global phase: Ο/2
# βββββ ββββββββββββββββββββ
# q: β€ H β β‘ q: β€ Ry(Ο/2) ββ€ Rx(Ο) β
# βββββ ββββββββββββββββββββ
q = QuantumRegister(1, "q")
h_to_rxry = QuantumCircuit(q)
h_to_rxry.append(RYGate(theta=pi / 2), [q[0]])
h_to_rxry.append(RXGate(theta=pi), [q[0]])
h_to_rxry.global_phase = pi / 2
_sel.add_equivalence(HGate(), h_to_rxry)
# HGate
# global phase: Ο/2
# βββββ ββββββββββββββββββββββββ
# q: β€ H β β‘ q: β€ R(Ο/2,Ο/2) ββ€ R(Ο,0) β
# βββββ ββββββββββββββββββββββββ
q = QuantumRegister(1, "q")
h_to_rr = QuantumCircuit(q)
h_to_rr.append(RGate(theta=pi / 2, phi=pi / 2), [q[0]])
h_to_rr.append(RGate(theta=pi, phi=0), [q[0]])
h_to_rr.global_phase = pi / 2
_sel.add_equivalence(HGate(), h_to_rr)
# XXPlusYYGate
# βββββββββββββββββ
# β€0 β
# β {XX+YY}(ΞΈ,Ξ²) β
# β€1 β
# βββββββββββββββββ
# βββββββββ βββββ ββββββββββββββββββββββββ βββββββ ββββββββββββββ
# ββ€ Rz(Ξ²) ββββ€ S ββββββββββββββ€ X ββ€ Ry(-0.5*ΞΈ) ββ€ X ββββ€ Sdg βββββ€ Rz(-1.0*Ξ²) ββββββββββββ
# β‘ ββ΄ββββββββ΄βββββββ΄βββββββββββββββ¬ββββββββββββββββ€βββ¬βββββ΄ββββββ΄βββββββ¬βββββββ¬ββββββββββββββ
# β€ Rz(-Ο/2) ββ€ βX ββ€ Rz(Ο/2) ββββ βββ€ Ry(-0.5*ΞΈ) ββββ βββ€ Rz(-Ο/2) βββββ€ βXdg βββββ€ Rz(Ο/2) β
# βββββββββββββββββββββββββββββ ββββββββββββββ ββββββββββββ ββββββββ βββββββββββ
q = QuantumRegister(2, "q")
xxplusyy = QuantumCircuit(q)
beta = Parameter("beta")
theta = Parameter("theta")
rules: list[tuple[Gate, list[Qubit], list[Clbit]]] = [
(RZGate(beta), [q[0]], []),
(RZGate(-pi / 2), [q[1]], []),
(SXGate(), [q[1]], []),
(RZGate(pi / 2), [q[1]], []),
(SGate(), [q[0]], []),
(CXGate(), [q[1], q[0]], []),
(RYGate(-theta / 2), [q[1]], []),
(RYGate(-theta / 2), [q[0]], []),
(CXGate(), [q[1], q[0]], []),
(SdgGate(), [q[0]], []),
(RZGate(-pi / 2), [q[1]], []),
(SXdgGate(), [q[1]], []),
(RZGate(pi / 2), [q[1]], []),
(RZGate(-beta), [q[0]], []),
]
for instr, qargs, cargs in rules:
xxplusyy._append(instr, qargs, cargs)
_sel.add_equivalence(XXPlusYYGate(theta, beta), xxplusyy)
# XXMinusYYGate
# βββββββββββββββββ
# β€0 β
# β {XX-YY}(ΞΈ,Ξ²) β
# β€1 β
# βββββββββββββββββ
# ββββββββββββ βββββββββββββββββ βββββββββββ ββββββββββββ βββββββββββββββββββ
# ββ€ Rz(-Ο/2) βββ€ βX ββ€ Rz(Ο/2) ββββ ββββ€ Ry(ΞΈ/2) ββββββ βββ€ Rz(-Ο/2) βββ€ βXdg ββ€ Rz(Ο/2) β
# β‘ ββ΄βββββββββββ΄ββββββ¬βββββββββββββββ΄ββββ΄ββββββββββ΄βββββ΄βββββ¬ββββββ¬βββββ΄βββββββ€βββββββββββ
# β€ Rz(-1.0*Ξ²) ββ€ S ββββββββββββββ€ X ββ€ Ry(-0.5*ΞΈ) ββ€ X ββββ€ Sdg βββββ€ Rz(Ξ²) ββββββββββββ
# βββββββββββββββββββ ββββββββββββββββββββββββ βββββββ βββββββββ
q = QuantumRegister(2, "q")
xxminusyy = QuantumCircuit(q)
beta = Parameter("beta")
theta = Parameter("theta")
rules: list[tuple[Gate, list[Qubit], list[Clbit]]] = [
(RZGate(-beta), [q[1]], []),
(RZGate(-pi / 2), [q[0]], []),
(SXGate(), [q[0]], []),
(RZGate(pi / 2), [q[0]], []),
(SGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RYGate(theta / 2), [q[0]], []),
(RYGate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(SdgGate(), [q[1]], []),
(RZGate(-pi / 2), [q[0]], []),
(SXdgGate(), [q[0]], []),
(RZGate(pi / 2), [q[0]], []),
(RZGate(beta), [q[1]], []),
]
for instr, qargs, cargs in rules:
xxminusyy._append(instr, qargs, cargs)
_sel.add_equivalence(XXMinusYYGate(theta, beta), xxminusyy)
|
qiskit/qiskit/circuit/library/standard_gates/equivalence_library.py/0
|
{
"file_path": "qiskit/qiskit/circuit/library/standard_gates/equivalence_library.py",
"repo_id": "qiskit",
"token_count": 32889
}
| 177 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Swap gate."""
from __future__ import annotations
from typing import Optional, Union
import numpy
from qiskit.circuit.singleton import SingletonGate, SingletonControlledGate, stdlib_singleton_key
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.circuit._utils import with_gate_array, with_controlled_gate_array
from qiskit._accelerate.circuit import StandardGate
_SWAP_ARRAY = numpy.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
@with_gate_array(_SWAP_ARRAY)
class SwapGate(SingletonGate):
r"""The SWAP gate.
This is a symmetric and Clifford gate.
Can be applied to a :class:`~qiskit.circuit.QuantumCircuit`
with the :meth:`~qiskit.circuit.QuantumCircuit.swap` method.
**Circuit symbol:**
.. parsed-literal::
q_0: βXβ
β
q_1: βXβ
**Matrix Representation:**
.. math::
SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
The gate is equivalent to a state swap and is a classical logic gate.
.. math::
|a, b\rangle \rightarrow |b, a\rangle
"""
_standard_gate = StandardGate.SwapGate
def __init__(self, label: Optional[str] = None, *, duration=None, unit="dt"):
"""Create new SWAP gate."""
super().__init__("swap", 2, [], label=label, duration=duration, unit=unit)
_singleton_lookup_key = stdlib_singleton_key()
def _define(self):
"""
gate swap a,b { cx a,b; cx b,a; cx a,b; }
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .x import CXGate
q = QuantumRegister(2, "q")
qc = QuantumCircuit(q, name=self.name)
rules = [
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(CXGate(), [q[0], q[1]], []),
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def control(
self,
num_ctrl_qubits: int = 1,
label: str | None = None,
ctrl_state: str | int | None = None,
annotated: bool | None = None,
):
"""Return a (multi-)controlled-SWAP gate.
One control returns a CSWAP (Fredkin) gate.
Args:
num_ctrl_qubits: number of control qubits.
label: An optional label for the gate [Default: ``None``]
ctrl_state: control state expressed as integer,
string (e.g.``'110'``), or ``None``. If ``None``, use all 1s.
annotated: indicates whether the controlled gate should be implemented
as an annotated gate. If ``None``, this is handled as ``False``.
Returns:
ControlledGate: controlled version of this gate.
"""
if not annotated and num_ctrl_qubits == 1:
gate = CSwapGate(label=label, ctrl_state=ctrl_state, _base_label=self.label)
else:
gate = super().control(
num_ctrl_qubits=num_ctrl_qubits,
label=label,
ctrl_state=ctrl_state,
annotated=annotated,
)
return gate
def inverse(self, annotated: bool = False):
"""Return inverse Swap gate (itself).
Args:
annotated: when set to ``True``, this is typically used to return an
:class:`.AnnotatedOperation` with an inverse modifier set instead of a concrete
:class:`.Gate`. However, for this class this argument is ignored as this gate
is self-inverse.
Returns:
SwapGate: inverse gate (self-inverse).
"""
return SwapGate() # self-inverse
def __eq__(self, other):
return isinstance(other, SwapGate)
@with_controlled_gate_array(_SWAP_ARRAY, num_ctrl_qubits=1)
class CSwapGate(SingletonControlledGate):
r"""Controlled-SWAP gate, also known as the Fredkin gate.
Can be applied to a :class:`~qiskit.circuit.QuantumCircuit`
with the :meth:`~qiskit.circuit.QuantumCircuit.cswap` and
:meth:`~qiskit.circuit.QuantumCircuit.fredkin` methods.
**Circuit symbol:**
.. parsed-literal::
q_0: ββ β
β
q_1: βXβ
β
q_2: βXβ
**Matrix representation:**
.. math::
CSWAP\ q_0, q_1, q_2 =
I \otimes I \otimes |0 \rangle \langle 0| +
SWAP \otimes |1 \rangle \langle 1| =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In many textbooks, controlled gates are
presented with the assumption of more significant qubits as control,
which in our case would be q_2. Thus a textbook matrix for this
gate will be:
.. parsed-literal::
q_0: βXβ
β
q_1: βXβ
β
q_2: ββ β
.. math::
CSWAP\ q_2, q_1, q_0 =
|0 \rangle \langle 0| \otimes I \otimes I +
|1 \rangle \langle 1| \otimes SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
In the computational basis, this gate swaps the states of
the two target qubits if the control qubit is in the
:math:`|1\rangle` state.
.. math::
|0, b, c\rangle \rightarrow |0, b, c\rangle
|1, b, c\rangle \rightarrow |1, c, b\rangle
"""
_standard_gate = StandardGate.CSwapGate
def __init__(
self,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
*,
duration=None,
unit="dt",
_base_label=None,
):
"""Create new CSWAP gate."""
if unit is None:
unit = "dt"
super().__init__(
"cswap",
3,
[],
num_ctrl_qubits=1,
label=label,
ctrl_state=ctrl_state,
base_gate=SwapGate(label=_base_label),
duration=duration,
unit=unit,
)
_singleton_lookup_key = stdlib_singleton_key(num_ctrl_qubits=1)
def _define(self):
"""
gate cswap a,b,c
{ cx c,b;
ccx a,b,c;
cx c,b;
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .x import CXGate, CCXGate
q = QuantumRegister(3, "q")
qc = QuantumCircuit(q, name=self.name)
rules = [
(CXGate(), [q[2], q[1]], []),
(CCXGate(), [q[0], q[1], q[2]], []),
(CXGate(), [q[2], q[1]], []),
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def inverse(self, annotated: bool = False):
"""Return inverse CSwap gate (itself).
Args:
annotated: when set to ``True``, this is typically used to return an
:class:`.AnnotatedOperation` with an inverse modifier set instead of a concrete
:class:`.Gate`. However, for this class this argument is ignored as this gate
is self-inverse.
Returns:
CSwapGate: inverse gate (self-inverse).
"""
return CSwapGate(ctrl_state=self.ctrl_state) # self-inverse
def __eq__(self, other):
return isinstance(other, CSwapGate) and self.ctrl_state == other.ctrl_state
|
qiskit/qiskit/circuit/library/standard_gates/swap.py/0
|
{
"file_path": "qiskit/qiskit/circuit/library/standard_gates/swap.py",
"repo_id": "qiskit",
"token_count": 4402
}
| 178 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Template circuits for X, CX and Toffoli gates.
**Reference:**
Maslov, D. and Dueck, G. W. and Miller, D. M.,
Techniques for the synthesis of reversible Toffoli networks, 2007
http://dx.doi.org/10.1145/1278349.1278355
"""
from .template_nct_2a_1 import template_nct_2a_1
from .template_nct_2a_2 import template_nct_2a_2
from .template_nct_2a_3 import template_nct_2a_3
from .template_nct_4a_1 import template_nct_4a_1
from .template_nct_4a_2 import template_nct_4a_2
from .template_nct_4a_3 import template_nct_4a_3
from .template_nct_4b_1 import template_nct_4b_1
from .template_nct_4b_2 import template_nct_4b_2
from .template_nct_5a_1 import template_nct_5a_1
from .template_nct_5a_2 import template_nct_5a_2
from .template_nct_5a_3 import template_nct_5a_3
from .template_nct_5a_4 import template_nct_5a_4
from .template_nct_6a_1 import template_nct_6a_1
from .template_nct_6a_2 import template_nct_6a_2
from .template_nct_6a_3 import template_nct_6a_3
from .template_nct_6a_4 import template_nct_6a_4
from .template_nct_6b_1 import template_nct_6b_1
from .template_nct_6b_2 import template_nct_6b_2
from .template_nct_6c_1 import template_nct_6c_1
from .template_nct_7a_1 import template_nct_7a_1
from .template_nct_7b_1 import template_nct_7b_1
from .template_nct_7c_1 import template_nct_7c_1
from .template_nct_7d_1 import template_nct_7d_1
from .template_nct_7e_1 import template_nct_7e_1
from .template_nct_9a_1 import template_nct_9a_1
from .template_nct_9c_1 import template_nct_9c_1
from .template_nct_9c_2 import template_nct_9c_2
from .template_nct_9c_3 import template_nct_9c_3
from .template_nct_9c_4 import template_nct_9c_4
from .template_nct_9c_5 import template_nct_9c_5
from .template_nct_9c_6 import template_nct_9c_6
from .template_nct_9c_7 import template_nct_9c_7
from .template_nct_9c_8 import template_nct_9c_8
from .template_nct_9c_9 import template_nct_9c_9
from .template_nct_9c_10 import template_nct_9c_10
from .template_nct_9c_11 import template_nct_9c_11
from .template_nct_9c_12 import template_nct_9c_12
from .template_nct_9d_1 import template_nct_9d_1
from .template_nct_9d_2 import template_nct_9d_2
from .template_nct_9d_3 import template_nct_9d_3
from .template_nct_9d_4 import template_nct_9d_4
from .template_nct_9d_5 import template_nct_9d_5
from .template_nct_9d_6 import template_nct_9d_6
from .template_nct_9d_7 import template_nct_9d_7
from .template_nct_9d_8 import template_nct_9d_8
from .template_nct_9d_9 import template_nct_9d_9
from .template_nct_9d_10 import template_nct_9d_10
|
qiskit/qiskit/circuit/library/templates/nct/__init__.py/0
|
{
"file_path": "qiskit/qiskit/circuit/library/templates/nct/__init__.py",
"repo_id": "qiskit",
"token_count": 1243
}
| 179 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
A library of template circuits.
Templates are circuits that compute the identity. They find use
in circuit optimization where matching part of the template allows the compiler
to replace the match with the inverse of the remainder from the template.
"""
from .rzx_yz import rzx_yz
from .rzx_xz import rzx_xz
from .rzx_cy import rzx_cy
from .rzx_zz1 import rzx_zz1
from .rzx_zz2 import rzx_zz2
from .rzx_zz3 import rzx_zz3
|
qiskit/qiskit/circuit/library/templates/rzx/__init__.py/0
|
{
"file_path": "qiskit/qiskit/circuit/library/templates/rzx/__init__.py",
"repo_id": "qiskit",
"token_count": 262
}
| 180 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Helper function for converting a circuit to a dag dependency"""
from qiskit.dagcircuit.dagdependency_v2 import _DAGDependencyV2
def _circuit_to_dagdependency_v2(circuit):
"""Build a ``_DAGDependencyV2`` object from a :class:`~.QuantumCircuit`.
Args:
circuit (QuantumCircuit): the input circuit.
Return:
_DAGDependencyV2: the DAG representing the input circuit as a dag dependency.
"""
dagdependency = _DAGDependencyV2()
dagdependency.name = circuit.name
dagdependency.metadata = circuit.metadata
dagdependency.calibrations = circuit.calibrations
dagdependency.global_phase = circuit.global_phase
dagdependency.add_qubits(circuit.qubits)
dagdependency.add_clbits(circuit.clbits)
for register in circuit.qregs:
dagdependency.add_qreg(register)
for register in circuit.cregs:
dagdependency.add_creg(register)
for instruction in circuit.data:
dagdependency.apply_operation_back(
instruction.operation, instruction.qubits, instruction.clbits
)
return dagdependency
|
qiskit/qiskit/converters/circuit_to_dagdependency_v2.py/0
|
{
"file_path": "qiskit/qiskit/converters/circuit_to_dagdependency_v2.py",
"repo_id": "qiskit",
"token_count": 537
}
| 181 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.