Skip to content

Commit a4f5446

Browse files
authored
[CodeStyle][Typos][A-46,A-47,N-15] Fix typos (axises, aixs, numberic) (#69856)
1 parent 163ee6b commit a4f5446

File tree

12 files changed

+40
-44
lines changed

12 files changed

+40
-44
lines changed

_typos.toml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,6 @@ UE = "UE"
2626
unpacket = "unpacket"
2727

2828
# These words need to be fixed
29-
axises = 'axises'
30-
Axises = 'Axises'
31-
aixs = 'aixs'
3229
beacuse = 'beacuse'
3330
becasue = 'becasue'
3431
Becasue = 'Becasue'
@@ -382,7 +379,6 @@ Normlized = 'Normlized'
382379
normlize = 'normlize'
383380
noraml = 'noraml'
384381
numer = 'numer'
385-
Numberic = 'Numberic'
386382
occured = 'occured'
387383
Ocurred = 'Ocurred'
388384
occures = 'occures'

paddle/cinn/common/axis.cc

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
namespace cinn {
2424
namespace common {
2525

26-
static const std::vector<std::string> kAxises({
26+
static const std::vector<std::string> kAxes({
2727
"i", // level 0
2828
"j", // level 1
2929
"k", // level 2
@@ -49,12 +49,12 @@ static const std::vector<std::string> kAxises({
4949
});
5050

5151
std::string axis_name(int level) {
52-
if (level < kAxises.size()) {
53-
return kAxises[level];
52+
if (level < kAxes.size()) {
53+
return kAxes[level];
5454
}
5555
// upper level
56-
int repeat_num = 1 + (level / kAxises.size());
57-
const auto& base_axis = kAxises[level % kAxises.size()];
56+
int repeat_num = 1 + (level / kAxes.size());
57+
const auto& base_axis = kAxes[level % kAxes.size()];
5858

5959
// if the level greater than kAxis, repeat the axis, like:
6060
// level == 22 ==> axis = "ii"
@@ -89,7 +89,7 @@ std::vector<ir::Expr> GenDefaultAxisAsExpr(int naxis) {
8989
}
9090

9191
static const std::set<std::string>& axis_set() {
92-
static std::set<std::string> x(kAxises.begin(), kAxises.end());
92+
static std::set<std::string> x(kAxes.begin(), kAxes.end());
9393
return x;
9494
}
9595

@@ -102,13 +102,13 @@ bool IsAxisNameReserved(const std::string& x) {
102102
return true;
103103
}
104104
if (!axis_set().count(std::string(1, x[0]))) {
105-
// all char in axis should in kAxises
105+
// all char in axis should in kAxes
106106
return false;
107107
}
108108
bool is_repeat_axis = true;
109109
for (int i = 1; i < x.size(); ++i) {
110110
if (x[i] != x[0]) {
111-
// the axis are repeat with the char in kAxises
111+
// the axis are repeat with the char in kAxes
112112
is_repeat_axis = false;
113113
break;
114114
}

paddle/cinn/hlir/dialect/operator/transforms/reduce_as_to_sum_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ class ReduceAsOpPattern
152152
size_t x_rank = x_shape.size();
153153
size_t y_rank = y_shape.size();
154154

155-
// Get reduc aixs and
155+
// Get reduc axis and
156156
int64_t compare_offset = x_rank - y_rank;
157157

158158
for (size_t i = 0; i < y_rank; ++i) {

paddle/cinn/lang/compute.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,9 +155,9 @@ ir::Tensor Compute(const std::vector<Expr> &domain,
155155
std::function<Expr(const std::vector<Expr> &)> fn,
156156
const std::string &name,
157157
const std::vector<Expr> &shape) {
158-
auto axises = cinn::common::GenDefaultAxis(domain.size());
158+
auto axes = cinn::common::GenDefaultAxis(domain.size());
159159
std::vector<Expr> _axis;
160-
for (auto &x : axises) _axis.push_back(x);
160+
for (auto &x : axes) _axis.push_back(x);
161161
Expr fn_body = fn(_axis);
162162

163163
std::vector<Var> reduce_axis;

paddle/fluid/inference/tensorrt/convert/slice_op.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class SliceOpConverter : public OpConverter {
3939
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("starts"));
4040
std::vector<int> ends =
4141
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("ends"));
42-
std::vector<int> decrease_axises =
42+
std::vector<int> decrease_axes =
4343
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("decrease_axis"));
4444
auto input_dims = input->getDimensions();
4545
nvinfer1::ILayer* layer = nullptr;
@@ -139,15 +139,15 @@ class SliceOpConverter : public OpConverter {
139139
layer->setInput(1, *start_tensor);
140140
layer->setInput(2, *size_tensor);
141141

142-
if (!decrease_axises.empty()) {
142+
if (!decrease_axes.empty()) {
143143
std::vector<int32_t> gather_indices;
144144
for (int i = 0; i < trt_size_dims.nbDims; i++) {
145-
if (decrease_axises.end() !=
146-
std::find(decrease_axises.begin(), decrease_axises.end(), i))
145+
if (decrease_axes.end() !=
146+
std::find(decrease_axes.begin(), decrease_axes.end(), i))
147147
continue;
148148
gather_indices.push_back(i);
149149
}
150-
if (gather_indices.empty()) gather_indices.push_back(decrease_axises[0]);
150+
if (gather_indices.empty()) gather_indices.push_back(decrease_axes[0]);
151151
auto real_size_tensor = Gather(size_tensor, gather_indices);
152152
layer = TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *layer->getOutput(0));
153153
layer->setInput(1, *real_size_tensor);

paddle/fluid/inference/tensorrt/convert/strided_slice_op.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class StridedSliceOpConverter : public OpConverter {
3939
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("ends"));
4040
std::vector<int> strides =
4141
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("strides"));
42-
std::vector<int> decrease_axises =
42+
std::vector<int> decrease_axes =
4343
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("decrease_axis"));
4444

4545
nvinfer1::ILayer* layer = nullptr;
@@ -96,15 +96,15 @@ class StridedSliceOpConverter : public OpConverter {
9696
layer->setInput(2, *size_tensor);
9797
layer->setInput(3, *step_tensor);
9898

99-
if (!decrease_axises.empty()) {
99+
if (!decrease_axes.empty()) {
100100
std::vector<int32_t> gather_indices;
101101
for (int i = 0; i < trt_size_dims.nbDims; i++) {
102-
if (decrease_axises.end() !=
103-
std::find(decrease_axises.begin(), decrease_axises.end(), i))
102+
if (decrease_axes.end() !=
103+
std::find(decrease_axes.begin(), decrease_axes.end(), i))
104104
continue;
105105
gather_indices.push_back(i);
106106
}
107-
if (gather_indices.empty()) gather_indices.push_back(decrease_axises[0]);
107+
if (gather_indices.empty()) gather_indices.push_back(decrease_axes[0]);
108108
auto real_size_tensor = Gather(size_tensor, gather_indices);
109109
layer = TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *layer->getOutput(0));
110110
layer->setInput(1, *real_size_tensor);

python/paddle/tensor/stat.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -721,7 +721,7 @@ def _compute_quantile(
721721
axis (int|list, optional): The axis along which to calculate quantile. ``axis`` should be int or list of int.
722722
``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .
723723
If ``axis`` is less than 0, it works the same way as :math:`axis + D`.
724-
If ``axis`` is a list, quantile is calculated over all elements of given axises.
724+
If ``axis`` is a list, quantile is calculated over all elements of given axes.
725725
If ``axis`` is None, quantile is calculated over all elements of ``x``. Default is None.
726726
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
727727
in the output Tensor. If ``keepdim`` is True, the dimensions of
@@ -905,7 +905,7 @@ def quantile(
905905
axis (int|list, optional): The axis along which to calculate quantile. ``axis`` should be int or list of int.
906906
``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .
907907
If ``axis`` is less than 0, it works the same way as :math:`axis + D`.
908-
If ``axis`` is a list, quantile is calculated over all elements of given axises.
908+
If ``axis`` is a list, quantile is calculated over all elements of given axes.
909909
If ``axis`` is None, quantile is calculated over all elements of ``x``. Default is None.
910910
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
911911
in the output Tensor. If ``keepdim`` is True, the dimensions of
@@ -989,7 +989,7 @@ def nanquantile(
989989
axis (int|list, optional): The axis along which to calculate quantile. ``axis`` should be int or list of int.
990990
``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .
991991
If ``axis`` is less than 0, it works the same way as :math:`axis + D`.
992-
If ``axis`` is a list, quantile is calculated over all elements of given axises.
992+
If ``axis`` is a list, quantile is calculated over all elements of given axes.
993993
If ``axis`` is None, quantile is calculated over all elements of ``x``. Default is None.
994994
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
995995
in the output Tensor. If ``keepdim`` is True, the dimensions of

test/custom_op/test_custom_concat.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def setUp(self):
119119
np.array([[1, 2, 3], [4, 5, 6]]),
120120
np.array([[11, 12, 13], [14, 15, 16]]),
121121
]
122-
self.axises = [0, 1]
122+
self.axes = [0, 1]
123123

124124
def check_output(self, out, pd_out, name):
125125
np.testing.assert_array_equal(
@@ -130,7 +130,7 @@ def check_output(self, out, pd_out, name):
130130

131131
def test_dynamic(self):
132132
for dtype in self.dtypes:
133-
for axis in self.axises:
133+
for axis in self.axes:
134134
out, grad_inputs = concat_dynamic(
135135
custom_ops.custom_concat, dtype, self.np_inputs, axis
136136
)
@@ -144,7 +144,7 @@ def test_dynamic(self):
144144

145145
def test_static(self):
146146
for dtype in self.dtypes:
147-
for axis in self.axises:
147+
for axis in self.axes:
148148
out, x1_grad, x2_grad = concat_static(
149149
custom_ops.custom_concat, dtype, self.np_inputs, axis
150150
)
@@ -158,7 +158,7 @@ def test_static(self):
158158

159159
def test_dynamic_with_attr(self):
160160
for dtype in self.dtypes:
161-
for axis in self.axises:
161+
for axis in self.axes:
162162
out, grad_inputs = concat_dynamic(
163163
custom_ops.custom_concat_with_attr,
164164
dtype,
@@ -176,7 +176,7 @@ def test_dynamic_with_attr(self):
176176

177177
def test_static_with_attr(self):
178178
for dtype in self.dtypes:
179-
for axis in self.axises:
179+
for axis in self.axes:
180180
out, x1_grad, x2_grad = concat_static(
181181
custom_ops.custom_concat_with_attr,
182182
dtype,

test/legacy_test/test_kthvalue_op.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def init_dtype(self):
121121

122122
class TestKthvalueOpKernels(unittest.TestCase):
123123
def setUp(self):
124-
self.axises = [2, -1]
124+
self.axes = [2, -1]
125125

126126
def test_kthvalue_op(self):
127127
paddle.disable_static()
@@ -132,7 +132,7 @@ def test_cpu_kernel():
132132
paddle.set_device('cpu')
133133
inputs = np.random.random(shape)
134134
tensor = paddle.to_tensor(inputs)
135-
for axis in self.axises:
135+
for axis in self.axes:
136136
value_expect, indice_expect = cal_kthvalue(inputs, k, axis)
137137
v, inds = paddle.kthvalue(tensor, k, axis)
138138
np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05)
@@ -146,7 +146,7 @@ def test_gpu_kernel():
146146
paddle.set_device('gpu')
147147
inputs = np.random.random(shape)
148148
tensor = paddle.to_tensor(inputs)
149-
for axis in self.axises:
149+
for axis in self.axes:
150150
value_expect, indice_expect = cal_kthvalue(inputs, k, axis)
151151
v, inds = paddle.kthvalue(tensor, k, axis)
152152
np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05)

test/legacy_test/test_mode_op.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -187,15 +187,15 @@ def init_args(self):
187187

188188
class TestModeOpKernels(unittest.TestCase):
189189
def setUp(self):
190-
self.axises = [-1, 1]
190+
self.axes = [-1, 1]
191191
np.random.seed(666)
192192
self.inputs = np.ceil(np.random.rand(2, 10, 10) * 1000)
193193

194194
def test_mode_op(self):
195195
def test_cpu_kernel():
196196
paddle.set_device('cpu')
197197
tensor = paddle.to_tensor(self.inputs)
198-
for axis in self.axises:
198+
for axis in self.axes:
199199
value_expect, indice_expect = cal_mode(self.inputs, axis)
200200
v, inds = paddle.mode(tensor, axis)
201201
np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05)
@@ -209,7 +209,7 @@ def test_cpu_kernel():
209209
def test_gpu_kernel():
210210
paddle.set_device('gpu')
211211
tensor = paddle.to_tensor(self.inputs)
212-
for axis in self.axises:
212+
for axis in self.axes:
213213
value_expect, indice_expect = cal_mode(self.inputs, axis)
214214
v, inds = paddle.mode(tensor, axis)
215215
np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05)

test/legacy_test/test_reduce_op.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1331,7 +1331,7 @@ def test_check_grad(self):
13311331
reason="reduce_max is discontinuous non-derivable function,"
13321332
" its gradient check is not supported by unittest framework."
13331333
)
1334-
class TestReduceMaxOpMultiAxises(OpTest):
1334+
class TestReduceMaxOpMultiAxes(OpTest):
13351335
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
13361336

13371337
def setUp(self):
@@ -1363,7 +1363,7 @@ def test_check_grad(self):
13631363
reason="reduce_min is discontinuous non-derivable function,"
13641364
" its gradient check is not supported by unittest framework."
13651365
)
1366-
class TestReduceMinOpMultiAxises(OpTest):
1366+
class TestReduceMinOpMultiAxes(OpTest):
13671367
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
13681368

13691369
def setUp(self):
@@ -1379,7 +1379,7 @@ def test_check_output(self):
13791379
self.check_output()
13801380

13811381

1382-
class TestKeepDimReduceSumMultiAxises(OpTest):
1382+
class TestKeepDimReduceSumMultiAxes(OpTest):
13831383
def setUp(self):
13841384
self.op_type = "reduce_sum"
13851385
self.python_api = paddle.sum
@@ -1404,7 +1404,7 @@ def test_check_grad(self):
14041404
self.check_grad(['X'], 'Out', check_prim=True)
14051405

14061406

1407-
class TestKeepDimReduceSumMultiAxisesForEager(OpTest):
1407+
class TestKeepDimReduceSumMultiAxesForEager(OpTest):
14081408
def setUp(self):
14091409
self.op_type = "reduce_sum"
14101410
self.python_api = reduce_sum_wrapper2

tools/gen_pybind11_stub.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@
8484
'TensorLike': 'paddle._typing.TensorLike',
8585
'DTypeLike': 'paddle._typing.DTypeLike',
8686
'ShapeLike': 'paddle._typing.ShapeLike',
87-
'Numberic': 'paddle._typing.Numberic',
87+
'Numeric': 'paddle._typing.Numeric',
8888
'TypeGuard': 'typing_extensions.TypeGuard',
8989
'_Interpolation': 'paddle.tensor.stat._Interpolation',
9090
'ParamAttrLike': 'paddle._typing.ParamAttrLike',

0 commit comments

Comments
 (0)