Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix flaky REM test / refactor #2464

Merged
merged 7 commits into from
Aug 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 21 additions & 24 deletions mitiq/rem/inverse_confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# LICENSE file in the root directory of this source tree.

from functools import reduce
from typing import List, Sequence
from typing import Sequence

import numpy as np
import numpy.typing as npt
Expand All @@ -14,38 +14,35 @@


def sample_probability_vector(
probability_vector: npt.NDArray[np.float64], samples: int
) -> List[Bitstring]:
probability_vector: Sequence[float], samples: int
) -> list[str]:
Copy link
Contributor

@purva-thakre purva-thakre Aug 14, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am curious as to what's usually preferred when we specify the return type?

I see you have used list[str] here but you have also used from typing import List before specifying the return type as List[str] on another line.

Searching around for this shows that we do not need to use the typing module for >= Python 3.9. Do we need to add this as a to-do for other modules in mitiq?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Didn't we have this exact same discussion with @vprusso in another PR? iirc there we learned that list[str] is the way to go.

Copy link
Contributor

@purva-thakre purva-thakre Aug 14, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, we did. Which is why I was asking if we need to add this to our todos.

We still use things like from typing import List elsewhere.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't need to be so proactive. The aliases are deprecated, but they are unlikely to be removed in the foreseeable future.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed, we should not take action until we need to on this. In the meantime lets try to use the builtins instead of typing.List, but it's not a big concern since they both achieve the same goal.

If we do migrate eventually, we will have to import a large majority of the types from collections.abc since many of the types we rely on (e.g. typing.Sequence) are deprecated in favor of coming from collections.abc.

"""Generate a number of samples from a probability distribution as
bitstrings.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It could be useful to add an example of input --> output for our future selves.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Considering this left us a bit confused when first encountering it, I think that's a great idea. Done in f8725eb.


Args:
probability_vector: A probability vector.
samples: The number of samples to generate.

Returns:
A list of sampled bitstrings.

Example:
>>> sample_probability_vector([0, 1/2, 1/4, 1/4], 4)
['01', '10', '11', '11']
"""
# sample using the probability distribution given
num_values = len(probability_vector)
choices = np.random.choice(num_values, size=samples, p=probability_vector)

# convert samples to binary strings
bit_width = int(np.log2(num_values))
binary_repr_vec = np.vectorize(np.binary_repr)
binary_strings = binary_repr_vec(choices, width=bit_width)

# split the binary strings into an array of ints
bitstrings = (
np.apply_along_axis( # type: ignore
func1d=np.fromstring, # type: ignore
axis=1,
arr=binary_strings[:, None],
dtype="U1", # type: ignore
if not np.log2(num_values).is_integer():
raise ValueError(
"The length of the probability vector must be a power of 2."
)
.astype(np.uint8)
.tolist()

sampled_indices = np.random.choice(
num_values, size=samples, p=probability_vector
)

bit_width = int(np.log2(num_values))
bitstrings = [format(index, f"0{bit_width}b") for index in sampled_indices]

return bitstrings


Expand All @@ -60,7 +57,7 @@ def bitstrings_to_probability_vector(
bitstrings: All measured bitstrings.

Returns:
A probabiity vector corresponding to the measured bitstrings.
A probability vector corresponding to the measured bitstrings.
"""
pv = np.zeros(2 ** len(bitstrings[0]))
for bs in bitstrings:
Expand Down Expand Up @@ -100,7 +97,7 @@ def generate_inverse_confusion_matrix(


def generate_tensored_inverse_confusion_matrix(
num_qubits: int, confusion_matrices: List[npt.NDArray[np.float64]]
num_qubits: int, confusion_matrices: list[npt.NDArray[np.float64]]
) -> npt.NDArray[np.float64]:
"""
Generates the inverse confusion matrix utilizing the supplied
Expand Down Expand Up @@ -132,7 +129,7 @@ def generate_tensored_inverse_confusion_matrix(

def closest_positive_distribution(
quasi_probabilities: npt.NDArray[np.float64],
) -> npt.NDArray[np.float64]:
) -> list[float]:
"""Given the input quasi-probability distribution returns the closest
positive probability distribution (with respect to the total variation
distance).
Expand Down Expand Up @@ -163,7 +160,7 @@ def distance(probabilities: npt.NDArray[np.float64]) -> np.float64:
raise ValueError(
"REM failed to determine the closest positive distribution."
)
return result.x
return result.x.tolist()


def mitigate_measurements(
Expand Down
52 changes: 29 additions & 23 deletions mitiq/rem/tests/test_inverse_confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,29 +22,35 @@
)


def test_sample_probability_vector_invalid_size():
with pytest.raises(ValueError, match="power of 2"):
sample_probability_vector([1 / 3, 1 / 3, 1 / 3], 3)


def test_sample_probability_vector_single_qubit():
bitstrings = sample_probability_vector(np.array([1, 0]), 10)
assert all([b == [0] for b in bitstrings])
assert all(b == "0" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 1]), 10)
assert all([b == [1] for b in bitstrings])
assert all(b == "1" for b in bitstrings)

np.random.seed(0)
bitstrings = sample_probability_vector(np.array([0.5, 0.5]), 1000)
assert isclose(sum([b[0] for b in bitstrings]), 500, rel_tol=0.1)
assert sum(int(b) for b in bitstrings) == 483


def test_sample_probability_vector_two_qubits():
bitstrings = sample_probability_vector(np.array([1, 0, 0, 0]), 10)
assert all([b == [0, 0] for b in bitstrings])
assert all(b == "00" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 1, 0, 0]), 10)
assert all([b == [0, 1] for b in bitstrings])
assert all(b == "01" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 0, 1, 0]), 10)
assert all([b == [1, 0] for b in bitstrings])
assert all(b == "10" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 0, 0, 1]), 10)
assert all([b == [1, 1] for b in bitstrings])
assert all(b == "11" for b in bitstrings)


def test_bitstrings_to_probability_vector():
Expand All @@ -64,20 +70,20 @@ def test_bitstrings_to_probability_vector():
assert (pv == np.array([0, 0, 0, 1])).all()


def test_probability_vector_roundtrip():
for _ in range(10):
pv = np.random.rand(4)
pv /= np.sum(pv)
assert isclose(
np.linalg.norm(
pv
- bitstrings_to_probability_vector(
sample_probability_vector(pv, 1000)
)
),
0,
abs_tol=0.1,
)
@pytest.mark.parametrize("_", range(10))
def test_probability_vector_roundtrip(_):
cosenal marked this conversation as resolved.
Show resolved Hide resolved
pv = np.random.rand(4)
pv /= np.sum(pv)
assert isclose(
np.linalg.norm(
pv
- bitstrings_to_probability_vector(
sample_probability_vector(pv, 1000)
)
),
0,
abs_tol=0.1,
)


def test_generate_inverse_confusion_matrix():
Expand Down Expand Up @@ -137,12 +143,12 @@ def test_generate_tensored_inverse_confusion_matrix(
num_qubits, confusion_matrices
)
else:
assert np.isclose(
assert np.allclose(
generate_tensored_inverse_confusion_matrix(
num_qubits, confusion_matrices
),
expected,
).all()
)


def test_mitigate_measurements():
Expand Down