Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
237 changes: 108 additions & 129 deletions src/qldpc/codes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -725,12 +725,7 @@ def shorten(self, bits: Collection[int]) -> ClassicalCode: # pragma: no cover
return self.shortened(bits)

def get_logical_error_rate_func(
self,
num_samples: int,
max_error_rate: float = 0.3,
*,
discard_weight: int | Collection[int] = (),
**decoder_kwargs: Any,
self, num_samples: int, max_error_rate: float = 0.3, **decoder_kwargs: Any
) -> ErrorRateFunc:
"""Construct a function from physical --> logical error rate in a code capacity model.

Expand Down Expand Up @@ -760,9 +755,6 @@ def get_logical_error_rate_func(
F(p) = q_0(p) + sum_(k>0) q_k(p) F_k.
We thereby only need to sample errors of weight k > 0.
"""
if not isinstance(discard_weight, Collection):
discard_weight = [discard_weight]

decoder = decoders.get_decoder(self.matrix, **decoder_kwargs)

# sample errors of fixed weight and record failure/discard counts
Expand All @@ -772,19 +764,15 @@ def get_logical_error_rate_func(
for weight in range(1, len(sample_allocation)):
num_failures[weight], num_discards[weight] = (
self._estimate_decoding_infidelity_and_variance(
weight, sample_allocation[weight], decoder, discard_weight
weight, sample_allocation[weight], decoder
)
)
return ErrorRateFunc(
sample_allocation, num_failures, num_discards, len(self), float(max_error_rate)
)

def _estimate_decoding_infidelity_and_variance(
self,
error_weight: int,
num_samples: int,
decoder: decoders.Decoder,
discard_weight: Collection[int],
self, error_weight: int, num_samples: int, decoder: decoders.Decoder
) -> tuple[int, int]:
"""Sample and correct errors of a fixed weight. Return logical error and discard counts."""
num_failures = 0
Expand All @@ -797,8 +785,8 @@ def _estimate_decoding_infidelity_and_variance(

# decode the error
syndrome = self.matrix @ error
decoded_error = decoder.decode(syndrome.view(np.ndarray)).view(self.field)
if discard_weight and np.count_nonzero(decoded_error) in discard_weight:
decoded_error, erasure = _get_error_and_erasure(decoder, syndrome)
if erasure: # pragma: no cover
num_discards += 1
elif np.any(decoded_error - error):
num_failures += 1
Expand Down Expand Up @@ -1944,8 +1932,6 @@ def get_logical_error_rate_func(
num_samples: int,
max_error_rate: float = 0.3,
pauli_bias: Sequence[float] | None = None,
*,
discard_weight: int | Collection[int] = (),
**decoder_kwargs: Any,
) -> ErrorRateFunc:
"""Construct a function from physical --> logical error rate in a code capacity model.
Expand All @@ -1966,9 +1952,6 @@ def get_logical_error_rate_func(
See help(qldpc.codes.ClassicalCode.get_logical_error_rate_func) for more details about how
this method works.
"""
if not isinstance(discard_weight, Collection):
discard_weight = [discard_weight]

# collect relative probabilities of Z, X, and Y errors
pauli_bias_zxy: npt.NDArray[np.floating] | None
if pauli_bias is not None:
Expand All @@ -1993,12 +1976,7 @@ def get_logical_error_rate_func(
for weight in range(1, len(sample_allocation)):
num_failures[weight], num_discards[weight] = (
self._estimate_decoding_fidelity_and_variance(
weight,
sample_allocation[weight],
decoder,
logical_ops,
pauli_bias_zxy,
discard_weight,
weight, sample_allocation[weight], decoder, logical_ops, pauli_bias_zxy
)
)
return ErrorRateFunc(
Expand All @@ -2012,7 +1990,6 @@ def _estimate_decoding_fidelity_and_variance(
decoder: decoders.Decoder,
logical_ops: npt.NDArray[np.int_],
pauli_bias_zxy: npt.NDArray[np.floating] | None,
discard_weight: Collection[int],
) -> tuple[int, int]:
"""Sample and correct errors of a fixed weight. Return logical error and discard counts."""
num_failures = 0
Expand All @@ -2037,9 +2014,9 @@ def _estimate_decoding_fidelity_and_variance(

error = np.concatenate([error_x, error_z]).view(self.field)
syndrome = syndrome_matrix @ error
decoded_error = decoder.decode(syndrome.view(np.ndarray)).view(self.field)
if discard_weight and math.symplectic_weight(decoded_error) in discard_weight:
num_discards += 1 # pragma: no cover
decoded_error, erasure = _get_error_and_erasure(decoder, syndrome)
if erasure: # pragma: no cover
num_discards += 1
elif np.any(logical_ops @ math.symplectic_conjugate(decoded_error - error)):
num_failures += 1

Expand Down Expand Up @@ -3028,7 +3005,6 @@ def get_logical_error_rate_func(
*,
decoder_x_kwargs: dict[str, Any] | None = None,
decoder_z_kwargs: dict[str, Any] | None = None,
discard_weight: int | Collection[int] = (),
**decoder_kwargs: Any,
) -> ErrorRateFunc:
"""Construct a function from physical --> logical error rate in a code capacity model.
Expand All @@ -3049,9 +3025,6 @@ def get_logical_error_rate_func(
See help(qldpc.codes.ClassicalCode.get_logical_error_rate_func) for more details about how
this method works.
"""
if not isinstance(discard_weight, Collection):
discard_weight = [discard_weight]

# collect relative probabilities of Z, X, and Y errors
pauli_bias_zxy: npt.NDArray[np.floating] | None
if pauli_bias is not None:
Expand Down Expand Up @@ -3096,7 +3069,6 @@ def get_logical_error_rate_func(
logicals_x,
logicals_z,
pauli_bias_zxy,
discard_weight,
)
)
return ErrorRateFunc(
Expand All @@ -3112,7 +3084,6 @@ def _estimate_css_decoding_fidelity_and_variance(
logicals_x: npt.NDArray[np.int_],
logicals_z: npt.NDArray[np.int_],
pauli_bias_zxy: npt.NDArray[np.floating] | None,
discard_weight: Collection[int],
) -> tuple[int, int]:
"""Sample and correct errors of a fixed weight. Return logical error and discard counts."""
num_failures = 0
Expand All @@ -3129,14 +3100,13 @@ def _estimate_css_decoding_fidelity_and_variance(
range(1, self.field.order), size=len(error_locs_z)
)
syndrome_z = self.matrix_x @ error_z
decoded_error_z = decoder_z.decode(syndrome_z.view(np.ndarray)).view(self.field)

if discard_weight and np.count_nonzero(decoded_error_z) in discard_weight:
decoded_error_z, erasure_z = _get_error_and_erasure(decoder_z, syndrome_z)
if erasure_z: # pragma: no cover
num_discards += 1
continue

failure_z = np.any(logicals_x @ (decoded_error_z - error_z))
if not discard_weight and failure_z:
failure_z = np.any(logicals_x @ (decoded_error_z.view(self.field) - error_z))
if not getattr(decoder_x, "has_erasure_bit", False) and failure_z:
# If we are _not_ post-selecting and there _was_ a decoding failure, then there is
# no need to consider X-type errors, because we will record one failure either way.
num_failures += 1
Expand All @@ -3149,10 +3119,8 @@ def _estimate_css_decoding_fidelity_and_variance(
range(1, self.field.order), size=len(error_locs_x)
)
syndrome_x = self.matrix_z @ error_x
decoded_error_x = decoder_x.decode(syndrome_x.view(np.ndarray)).view(self.field)
if (
discard_weight and np.count_nonzero(decoded_error_x) in discard_weight
): # pragma: no cover
decoded_error_x, erasure_x = _get_error_and_erasure(decoder_x, syndrome_x)
if erasure_x | erasure_z: # pragma: no cover
num_discards += 1
continue
if failure_z or np.any(logicals_z @ (decoded_error_x - error_x)):
Expand All @@ -3161,88 +3129,6 @@ def _estimate_css_decoding_fidelity_and_variance(
return num_failures, num_discards


def _join_slices(*sectors: Slice) -> npt.NDArray[np.int_]:
"""Join index slices together into one slice."""
return np.concatenate(
[
np.arange(sector.start or 0, sector.stop, sector.step or 1, dtype=int)
if isinstance(sector, slice)
else sector
for sector in sectors
]
).astype(int)


def _is_canonicalized(matrix: npt.NDArray[np.int_]) -> bool:
"""Is the given matrix in canonical (row-reduced) form?"""
return all(
matrix[row, pivot] and not np.any(matrix[:row, pivot])
for row, pivot in enumerate(math.first_nonzero_cols(matrix))
)


def _get_sample_allocation(
num_samples: int, block_length: int, max_error_rate: float
) -> npt.NDArray[np.int_]:
"""Construct an allocation of samples by error weight.

This method returns an array whose k-th entry is the number of samples to devote to errors of
weight k, given a maximum error rate that we care about.
"""
probs = _get_error_probs_by_weight(block_length, max_error_rate)

# zero out the distribution at k=0, flatten it out to the left of its peak, and renormalize
probs[0] = 0
probs[1 : np.argmax(probs)] = probs.max()
probs /= np.sum(probs)

# assign sample numbers according to the probability distribution constructed above,
# increasing num_samples if necessary to deal with weird edge cases from round-off errors
while np.sum(sample_allocation := np.round(probs * num_samples).astype(int)) < num_samples:
num_samples += 1 # pragma: no cover

# allocate one sample to k=0 to fix an edge case in ErrorRateFunc
sample_allocation[0] = 1

# truncate trailing zeros and return
nonzero = np.nonzero(sample_allocation)[0]
return sample_allocation[: nonzero[-1] + 1]


def _get_error_probs_by_weight(
block_length: int, error_rate: float, max_weight: int | None = None
) -> npt.NDArray[np.floating]:
"""Build an array whose k-th entry is the probability of a weight-k error in a code.

If a code has block_length n and each bit has an independent probability p = error_rate of an
error, then the probability of k errors is (n choose k) p**k (1-p)**(n-k).

We compute the above probability using logarithms because otherwise the combinatorial factor
(n choose k) might be too large to handle.
"""
max_weight = max_weight or block_length

# deal with some pathological cases
if error_rate == 0:
probs = np.zeros(max_weight + 1)
probs[0] = 1
return probs
elif error_rate == 1:
probs = np.zeros(max_weight + 1)
probs[block_length:] = 1
return probs

log_error_rate = np.log(error_rate)
log_one_minus_error_rate = np.log(1 - error_rate)
log_probs = [
math.log_choose(block_length, kk)
+ kk * log_error_rate
+ (block_length - kk) * log_one_minus_error_rate
for kk in range(max_weight + 1)
]
return np.exp(log_probs)


OneOrManyFloats = TypeVar("OneOrManyFloats", float, Iterable[float])


Expand Down Expand Up @@ -3329,3 +3215,96 @@ def __call__(
value = weight_probs @ values
variance = np.sqrt(weight_probs**2 @ variances)
return 1 - float(value), float(variance)


def _get_sample_allocation(
num_samples: int, block_length: int, max_error_rate: float
) -> npt.NDArray[np.int_]:
"""Construct an allocation of samples by error weight.

This method returns an array whose k-th entry is the number of samples to devote to errors of
weight k, given a maximum error rate that we care about.
"""
probs = _get_error_probs_by_weight(block_length, max_error_rate)

# zero out the distribution at k=0, flatten it out to the left of its peak, and renormalize
probs[0] = 0
probs[1 : np.argmax(probs)] = probs.max()
probs /= np.sum(probs)

# assign sample numbers according to the probability distribution constructed above,
# increasing num_samples if necessary to deal with weird edge cases from round-off errors
while np.sum(sample_allocation := np.round(probs * num_samples).astype(int)) < num_samples:
num_samples += 1 # pragma: no cover

# allocate one sample to k=0 to fix an edge case in ErrorRateFunc
sample_allocation[0] = 1

# truncate trailing zeros and return
nonzero = np.nonzero(sample_allocation)[0]
return sample_allocation[: nonzero[-1] + 1]


def _get_error_probs_by_weight(
block_length: int, error_rate: float, max_weight: int | None = None
) -> npt.NDArray[np.floating]:
"""Build an array whose k-th entry is the probability of a weight-k error in a code.

If a code has block_length n and each bit has an independent probability p = error_rate of an
error, then the probability of k errors is (n choose k) p**k (1-p)**(n-k).

We compute the above probability using logarithms because otherwise the combinatorial factor
(n choose k) might be too large to handle.
"""
max_weight = max_weight or block_length

# deal with some pathological cases
if error_rate == 0:
probs = np.zeros(max_weight + 1)
probs[0] = 1
return probs
elif error_rate == 1:
probs = np.zeros(max_weight + 1)
probs[block_length:] = 1
return probs

log_error_rate = np.log(error_rate)
log_one_minus_error_rate = np.log(1 - error_rate)
log_probs = [
math.log_choose(block_length, kk)
+ kk * log_error_rate
+ (block_length - kk) * log_one_minus_error_rate
for kk in range(max_weight + 1)
]
return np.exp(log_probs)


def _get_error_and_erasure(
decoder: decoders.Decoder,
syndrome: galois.FieldArray,
) -> tuple[galois.FieldArray, bool]:
"""Decode a syndrome and extract an erasure bit, if applicable."""
error = decoder.decode(syndrome.view(np.ndarray))
if getattr(decoder, "has_erasure_bit", False): # pragma: no cover
return error[:-1].view(type(syndrome)), bool(error[-1])
return error.view(type(syndrome)), False


def _join_slices(*sectors: Slice) -> npt.NDArray[np.int_]:
"""Join index slices together into one slice."""
return np.concatenate(
[
np.arange(sector.start or 0, sector.stop, sector.step or 1, dtype=int)
if isinstance(sector, slice)
else sector
for sector in sectors
]
).astype(int)


def _is_canonicalized(matrix: npt.NDArray[np.int_]) -> bool:
"""Is the given matrix in canonical (row-reduced) form?"""
return all(
matrix[row, pivot] and not np.any(matrix[:row, pivot])
for row, pivot in enumerate(math.first_nonzero_cols(matrix))
)
9 changes: 4 additions & 5 deletions src/qldpc/codes/common_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,13 +231,12 @@ def test_classical_capacity() -> None:

# compute discard rates
logical_error_rate_func = code.get_logical_error_rate_func(
num_samples=4, max_error_rate=0.5, discard_weight=1
num_samples=4, max_error_rate=0.5, with_lookup=True, max_weight=0
)
assert logical_error_rate_func(0, discard_rate=True) == (0, 0)
assert logical_error_rate_func(0.5, discard_rate=True) == (0.5, 0)

# test cap on physical error rate
logical_error_rate_func = code.get_logical_error_rate_func(num_samples=10, max_error_rate=0.5)
logical_error_rate_func = code.get_logical_error_rate_func(num_samples=1, max_error_rate=0.5)
with pytest.raises(ValueError, match="error rates greater than"):
logical_error_rate_func(1)

Expand Down Expand Up @@ -539,7 +538,7 @@ def test_quantum_capacity() -> None:

# compute discard rates (trivial deterministic example)
logical_error_rate_func = code.get_logical_error_rate_func(
num_samples=1, max_error_rate=1, discard_weight=1
num_samples=1, max_error_rate=1, with_lookup=True, max_weight=1
)
assert logical_error_rate_func(0, discard_rate=True) == (0, 0)

Expand Down Expand Up @@ -746,6 +745,6 @@ def test_css_capacity() -> None:

# compute discard rates (trivial deterministic example)
logical_error_rate_func = code.get_logical_error_rate_func(
num_samples=1, max_error_rate=1, discard_weight=1
num_samples=1, max_error_rate=1, with_lookup=True, max_weight=1
)
assert logical_error_rate_func(0, discard_rate=True) == (0, 0)
Loading
Loading