Skip to content

Commit 142f0f8

Browse files
cyyeverpytorchmergebot
authored andcommitted
Enable modernize-use-default-member-init (pytorch#149046)
``modernize-use-default-member-init`` prefers initialisation in class members, that make more ``= default`` constructors possible. Some violations or modernize rules have been fixed. Pull Request resolved: pytorch#149046 Approved by: https://github.com/zou3519
1 parent 81f60f3 commit 142f0f8

File tree

20 files changed

+45
-60
lines changed

20 files changed

+45
-60
lines changed

.clang-tidy

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ modernize-*,
5252
-modernize-macro-to-enum,
5353
-modernize-return-braced-init-list,
5454
-modernize-use-auto,
55-
-modernize-use-default-member-init,
5655
-modernize-use-using,
5756
-modernize-use-trailing-return-type,
5857
-modernize-use-nodiscard,

aten/src/ATen/core/Dict.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,7 @@ class DictIterator final {
116116

117117
DictIterator(const DictIterator& rhs): entryRef_(rhs.entryRef_) {}
118118
DictIterator(DictIterator&& rhs) noexcept: entryRef_(std::move(rhs.entryRef_)) {}
119-
DictIterator& operator=(const DictIterator& rhs) {
120-
entryRef_ = rhs.entryRef_;
121-
return *this;
122-
}
119+
DictIterator& operator=(const DictIterator& rhs) = default;
123120
DictIterator& operator=(DictIterator&& rhs) noexcept {
124121
entryRef_ = std::move(rhs.entryRef_);
125122
return *this;

aten/src/ATen/core/dispatch/DispatchKeyExtractor.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -225,8 +225,7 @@ struct TORCH_API DispatchKeyExtractor final {
225225

226226
explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse)
227227
: dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse),
228-
nonFallthroughKeys_(DispatchKeySet::FULL),
229-
requiresBitsetPerBackend_(false) {
228+
nonFallthroughKeys_(DispatchKeySet::FULL) {
230229
for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) {
231230
nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL;
232231
}
@@ -252,7 +251,7 @@ struct TORCH_API DispatchKeyExtractor final {
252251
// Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast
253252
// path), or if we need to fall back to the slower path and check
254253
// nonFallthroughKeysPerBackend_
255-
bool requiresBitsetPerBackend_;
254+
bool requiresBitsetPerBackend_{false};
256255
};
257256

258257
} // namespace c10

aten/src/ATen/cuda/tunable/Tunable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ enum TORCH_CUDA_CPP_API TuningStatus {
4040
class TORCH_CUDA_CPP_API ResultEntry {
4141
public:
4242
explicit ResultEntry(std::string key, double time) : key_(std::move(key)), time_(time) {}
43-
explicit ResultEntry(std::string key, double time, const std::string& blas_sig ) : key_(std::move(key)), time_(time), blas_sig_(blas_sig) {}
43+
explicit ResultEntry(std::string key, double time, std::string blas_sig ) : key_(std::move(key)), time_(time), blas_sig_(std::move(blas_sig)) {}
4444
bool operator==(const ResultEntry& other) const { return key_ == other.key_; }
4545
bool operator!=(const ResultEntry& other) const { return key_ != other.key_; }
4646
operator std::string () { return key_; }

aten/src/ATen/native/RangeUtils.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22
#include <c10/core/Scalar.h>
33
#include <limits>
44

5-
namespace at {
65

7-
namespace native {
6+
7+
namespace at::native {
88

99
template <typename scalar_t>
1010
int64_t compute_arange_size(const Scalar& start, const Scalar& end, const Scalar& step) {
@@ -42,4 +42,4 @@ int64_t compute_arange_size(const Scalar& start, const Scalar& end, const Scalar
4242
return static_cast<int64_t>(size_d);
4343
}
4444

45-
}} // namespace at::native
45+
} // namespace at::native

aten/src/ATen/native/SpectralOps.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -756,7 +756,7 @@ static DimVector default_alldims(const Tensor& self, at::OptionalIntArrayRef dim
756756
IntArrayRef dim_unwrapped = *dim_opt;
757757
dim.resize(dim_unwrapped.size());
758758
for (const auto i : c10::irange(dim.size())) {
759-
dim[i] = maybe_wrap_dim(dim_unwrapped[i], self.dim(), /*wrap_scalars=*/false);
759+
dim[i] = maybe_wrap_dim(dim_unwrapped[i], self.dim(), /*wrap_scalar=*/false);
760760
}
761761
} else {
762762
dim.resize(self.dim());

aten/src/ATen/native/UnaryOps.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -887,7 +887,7 @@ static inline void mvlgamma_check(const Tensor& self, int64_t p) {
887887
Tensor mvlgamma(const Tensor& self, int64_t p) {
888888
mvlgamma_check(self, p);
889889
auto dtype = c10::scalarTypeToTypeMeta(self.scalar_type());
890-
if (at::isIntegralType(self.scalar_type(), /*include_bool=*/true)) {
890+
if (at::isIntegralType(self.scalar_type(), /*includeBool=*/true)) {
891891
// int -> float promotion
892892
dtype = c10::get_default_dtype();
893893
}

aten/src/ATen/native/cuda/CuFFTPlanCache.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
#include <string>
1717
#include <unordered_map>
1818

19-
namespace at { namespace native { namespace detail {
19+
namespace at::native::detail {
2020

2121
// Enum representing the FFT type
2222
enum class CuFFTTransformType : int8_t {
@@ -58,7 +58,7 @@ struct CuFFTParams
5858
}
5959
};
6060

61-
static_assert(std::is_trivial_v<CuFFTParams>, "");
61+
static_assert(std::is_trivial_v<CuFFTParams> );
6262

6363
// Returns true if the transform type has complex input
6464
inline bool cufft_complex_input(CuFFTTransformType type) {
@@ -491,4 +491,4 @@ void cufft_set_plan_cache_max_size_impl(DeviceIndex device_index, int64_t max_si
491491
int64_t cufft_get_plan_cache_size_impl(DeviceIndex device_index);
492492
void cufft_clear_plan_cache_impl(DeviceIndex device_index);
493493

494-
}}} // namespace at::native::detail
494+
} // namespace at::native::detail

aten/src/ATen/native/cuda/MiscUtils.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
#include <ATen/cuda/CUDAConfig.h>
55
#include <ATen/cuda/PinnedMemoryAllocator.h>
66

7-
namespace at {
8-
namespace native {
7+
8+
namespace at::native {
99

1010
static inline int cuda_int_cast(int64_t value, const char* varname) {
1111
auto result = static_cast<int>(value);
@@ -28,5 +28,4 @@ static inline Storage pin_memory(int64_t size) {
2828
/*resizable=*/false);
2929
}
3030

31-
} // namespace native
32-
} // namespace at
31+
} // namespace at::native

aten/src/ATen/native/cuda/Resize.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
#include <c10/cuda/CUDAGuard.h>
77

8-
namespace at { namespace native {
8+
namespace at::native {
99

1010
TORCH_CUDA_CPP_API void resize_bytes_cuda(StorageImpl* storage, size_t size_bytes);
1111

@@ -50,4 +50,4 @@ inline TensorImpl* resize_impl_cuda_(
5050
return self;
5151
}
5252

53-
}}
53+
}

aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
// The current pytorch implementation sets gesvdj tolerance to epsilon of a C++ data type to target the best possible precision.
3737
constexpr int cusolver_gesvdj_max_sweeps = 400;
3838

39-
namespace at {
40-
namespace native {
39+
40+
namespace at::native {
4141

4242
void geqrf_batched_cublas(const Tensor& input, const Tensor& tau);
4343
void triangular_solve_cublas(const Tensor& A, const Tensor& B, bool left, bool upper, TransposeType transpose, bool unitriangular);
@@ -90,4 +90,4 @@ C10_EXPORT void registerLinalgDispatch(const LinalgDispatch&);
9090
}} // namespace cuda::detail
9191
#endif
9292

93-
}} // namespace at::native
93+
} // namespace at::native

aten/src/ATen/native/cudnn/RNNUtils.h

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,8 @@
66
#include <ATen/cudnn/cudnn-wrapper.h>
77

88
// Declares utilities used by RNN.cpp and also needed by external consumers
9-
namespace at {
10-
namespace native {
11-
namespace cudnn_rnn {
9+
10+
namespace at::native::cudnn_rnn {
1211

1312
TORCH_CUDA_CPP_API std::tuple<Tensor, std::vector<Tensor>>
1413
copy_weights_to_flat_buf_views(
@@ -27,6 +26,4 @@ copy_weights_to_flat_buf_views(
2726
bool allow_type_change = false,
2827
bool include_bias = true);
2928

30-
} // namespace cudnn_rnn
31-
} // namespace native
32-
} // namespace at
29+
} // namespace at::native::cudnn_rnn

aten/src/ATen/native/mkldnn/MKLDNNCommon.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#endif
2121
#endif
2222

23-
namespace at { namespace native {
23+
namespace at::native {
2424

2525
// Mapping ScalarType to ideep tensor data_type
2626
TORCH_API ideep::tensor::data_type get_mkldnn_dtype(ScalarType type);
@@ -62,6 +62,6 @@ TORCH_API ideep::tensor itensor_from_tensor(const Tensor& tensor, bool from_cons
6262
// Set MKLDNN verbose level
6363
TORCH_API int set_verbose(int level);
6464

65-
}}
65+
}
6666

6767
#endif // AT_MKLDNN_ENABLED

aten/src/ATen/native/mkldnn/xpu/detail/Attr.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ struct PostOpParam {
131131

132132
class Attr {
133133
public:
134-
Attr() : q_scale_(1.f), q_zero_point_(0) {}
134+
Attr() : q_scale_(1.f) {}
135135
Attr(float q_scale, int64_t zp = 0) : q_scale_(q_scale), q_zero_point_(zp) {}
136136

137137
/***** eltwise *****/

aten/src/ATen/quantized/QTensorImpl.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ struct TORCH_API QTensorImpl : public c10::TensorImpl {
5151
auto impl = c10::make_intrusive<QTensorImpl>(
5252
Storage(storage()), key_set(), data_type_, quantizer_);
5353
copy_tensor_metadata(
54-
/*src_impl=*/this,
55-
/*dest_impl=*/impl.get(),
54+
/*src_q_impl=*/this,
55+
/*dest_q_impl=*/impl.get(),
5656
/*version_counter=*/version_counter,
5757
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
5858
impl->refresh_numel();
@@ -72,8 +72,8 @@ struct TORCH_API QTensorImpl : public c10::TensorImpl {
7272
auto impl = c10::make_intrusive<QTensorImpl>(
7373
Storage(storage()), key_set(), data_type_, quantizer_);
7474
copy_tensor_metadata(
75-
/*src_impl=*/this,
76-
/*dest_impl=*/impl.get(),
75+
/*src_q_impl=*/this,
76+
/*dest_q_impl=*/impl.get(),
7777
/*version_counter=*/std::move(version_counter),
7878
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
7979
impl->refresh_numel();
@@ -91,8 +91,8 @@ struct TORCH_API QTensorImpl : public c10::TensorImpl {
9191
AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
9292
auto q_impl = static_cast<const QTensorImpl*>(impl.get());
9393
copy_tensor_metadata(
94-
/*src_impl=*/q_impl,
95-
/*dest_impl=*/this,
94+
/*src_q_impl=*/q_impl,
95+
/*dest_q_impl=*/this,
9696
/*version_counter=*/version_counter(),
9797
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
9898
refresh_numel();

torch/_inductor/codegen/cpp_prefix.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ struct WelfordHelper {
8686
std::vector<Welford<T>> welford_stk;
8787
uint64_t depth; // depth of welford_stk.
8888
uint64_t num_chunks; // number of chunks stored in welford_stk.
89-
WelfordHelper() {}
89+
WelfordHelper() = default;
9090
WelfordHelper(uint64_t N) {
9191
uint64_t m = (N + kChunkSize - 1) / kChunkSize; //div up
9292
depth = m > 0 ? ceil(log2(m)) : 0;

torch/csrc/autograd/profiler_python.cpp

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1152,16 +1152,13 @@ std::vector<std::shared_ptr<Result>> PythonTracer::getEvents(
11521152
// Assuming python_tracer::PythonMemoryTracerBase is defined elsewhere
11531153
class PythonMemoryTracer final : public python_tracer::PythonMemoryTracerBase {
11541154
public:
1155-
explicit PythonMemoryTracer();
1156-
~PythonMemoryTracer() override;
1155+
explicit PythonMemoryTracer() = default;
1156+
~PythonMemoryTracer() override = default;
11571157
void start() override;
11581158
void stop() override;
11591159
void export_memory_history(const std::string path) override;
11601160
};
11611161

1162-
PythonMemoryTracer::PythonMemoryTracer() {}
1163-
PythonMemoryTracer::~PythonMemoryTracer() {}
1164-
11651162
static void toggle_memory_tracing(bool enable) {
11661163
PyGILState_STATE gil_state = PyGILState_Ensure();
11671164
THPObjectPtr torch_cuda_memory_module(
@@ -1182,9 +1179,9 @@ static void toggle_memory_tracing(bool enable) {
11821179
PyTuple_SetItem(args, 3, THPUtils_packInt64(100000)); // max_entries
11831180
PyTuple_SetItem(args, 4, Py_None); // device (None)
11841181
PyTuple_SetItem(args, 5, PyBool_FromLong(0)); // clear_history (False)
1185-
PyObject* result = PyObject_Call(snapshot_func.get(), args, NULL);
1182+
PyObject* result = PyObject_Call(snapshot_func.get(), args, nullptr);
11861183
Py_DECREF(args);
1187-
if (result == NULL) {
1184+
if (result == nullptr) {
11881185
return;
11891186
}
11901187
PyGILState_Release(gil_state);
@@ -1209,9 +1206,9 @@ void PythonMemoryTracer::export_memory_history(const std::string path) {
12091206
PyObject* py_filename = PyUnicode_FromString(path.c_str());
12101207
// Call the function with arguments (e.g., a file path)
12111208
PyObject* args = PyTuple_Pack(1, py_filename);
1212-
PyObject* result = PyObject_Call(snapshot_func.get(), args, NULL);
1209+
PyObject* result = PyObject_Call(snapshot_func.get(), args, nullptr);
12131210
Py_DECREF(args);
1214-
if (result == NULL) {
1211+
if (result == nullptr) {
12151212
return;
12161213
}
12171214
PyGILState_Release(gil_state);

torch/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ dnnl::engine& Engine::getEngine() {
3131
static dnnl::graph::allocator alloc{
3232
pytorch_default_allocator, pytorch_default_deallocator};
3333
static dnnl::engine cpu_engine = dnnl::graph::make_engine_with_allocator(
34-
dnnl::engine::kind::cpu, /* device_id = */ 0, alloc);
34+
dnnl::engine::kind::cpu, /* index = */ 0, alloc);
3535
return cpu_engine;
3636
}
3737

torch/csrc/jit/runtime/interpreter/code_impl.h

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,7 @@
1818
TORCH_DECLARE_bool(torch_jit_enable_expanded_stacks);
1919
TORCH_DECLARE_bool(torch_jit_expanded_stacks_mangled);
2020

21-
namespace torch::jit {
22-
23-
namespace interpreter {
21+
namespace torch::jit::interpreter {
2422

2523
template <class Ttarget, class Tsource>
2624
Ttarget safe_narrow_cast(Tsource v) {
@@ -64,7 +62,7 @@ struct NodeSourceInfo {
6462
const char* func_name_{nullptr};
6563
const char* file_name_{nullptr};
6664
size_t line_{0};
67-
NodeSourceInfo() {}
65+
NodeSourceInfo() = default;
6866
};
6967

7068
struct CodeImpl {
@@ -1060,5 +1058,4 @@ struct MobileCodeImpl : CodeImpl {
10601058
bool emit_promoted_ops_;
10611059
};
10621060

1063-
} // namespace interpreter
1064-
} // namespace torch::jit
1061+
} // namespace torch::jit::interpreter

torch/lib/libshm/socket.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,12 @@
1717
class Socket {
1818
public:
1919
int socket_fd;
20+
Socket(const Socket& other) = delete;
2021

2122
protected:
2223
Socket() {
2324
SYSCHECK_ERR_RETURN_NEG1(socket_fd = socket(AF_UNIX, SOCK_STREAM, 0));
2425
}
25-
Socket(const Socket& other) = delete;
2626
Socket(Socket&& other) noexcept : socket_fd(other.socket_fd) {
2727
other.socket_fd = -1;
2828
};
@@ -122,7 +122,7 @@ class ManagerServerSocket : public Socket {
122122
SYSCHECK_ERR_RETURN_NEG1(unlink(socket_path.c_str()));
123123
}
124124

125-
virtual ~ManagerServerSocket() {
125+
~ManagerServerSocket() override {
126126
unlink(socket_path.c_str());
127127
}
128128

0 commit comments

Comments
 (0)