|
3 | 3 | from functools import wraps as _wraps
|
4 | 4 | from builtins import all as _builtin_all, any as _builtin_any
|
5 | 5 |
|
6 |
| -from ..common._aliases import (matrix_transpose as _aliases_matrix_transpose, |
7 |
| - vecdot as _aliases_vecdot, |
8 |
| - clip as _aliases_clip, |
9 |
| - unstack as _aliases_unstack, |
10 |
| - cumulative_sum as _aliases_cumulative_sum, |
11 |
| - cumulative_prod as _aliases_cumulative_prod, |
12 |
| - ) |
| 6 | +from ..common import _aliases |
13 | 7 | from .._internal import get_xp
|
14 | 8 |
|
15 | 9 | from ._info import __array_namespace_info__
|
@@ -215,10 +209,10 @@ def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keep
|
215 | 209 | return torch.clone(x)
|
216 | 210 | return torch.amin(x, axis, keepdims=keepdims)
|
217 | 211 |
|
218 |
| -clip = get_xp(torch)(_aliases_clip) |
219 |
| -unstack = get_xp(torch)(_aliases_unstack) |
220 |
| -cumulative_sum = get_xp(torch)(_aliases_cumulative_sum) |
221 |
| -cumulative_prod = get_xp(torch)(_aliases_cumulative_prod) |
| 212 | +clip = get_xp(torch)(_aliases.clip) |
| 213 | +unstack = get_xp(torch)(_aliases.unstack) |
| 214 | +cumulative_sum = get_xp(torch)(_aliases.cumulative_sum) |
| 215 | +cumulative_prod = get_xp(torch)(_aliases.cumulative_prod) |
222 | 216 |
|
223 | 217 | # torch.sort also returns a tuple
|
224 | 218 | # https://github.com/pytorch/pytorch/issues/70921
|
@@ -710,8 +704,8 @@ def matmul(x1: array, x2: array, /, **kwargs) -> array:
|
710 | 704 | x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
711 | 705 | return torch.matmul(x1, x2, **kwargs)
|
712 | 706 |
|
713 |
| -matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) |
714 |
| -_vecdot = get_xp(torch)(_aliases_vecdot) |
| 707 | +matrix_transpose = get_xp(torch)(_aliases.matrix_transpose) |
| 708 | +_vecdot = get_xp(torch)(_aliases.vecdot) |
715 | 709 |
|
716 | 710 | def vecdot(x1: array, x2: array, /, *, axis: int = -1) -> array:
|
717 | 711 | x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
|
0 commit comments