Skip to content

Commit

Permalink
Bump clang-format from 18.1.8 to 19.1.6 (#23346)
Browse files Browse the repository at this point in the history
To replace #23327
  • Loading branch information
snnn authored Jan 14, 2025
1 parent d9cd27a commit 228dd16
Show file tree
Hide file tree
Showing 20 changed files with 68 additions and 82 deletions.
4 changes: 2 additions & 2 deletions include/onnxruntime/core/common/profiler_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ class EpProfiler {
virtual ~EpProfiler() = default;
virtual bool StartProfiling(TimePoint profiling_start_time) = 0; // called when profiling starts
virtual void EndProfiling(TimePoint start_time, Events& events) = 0; // called when profiling ends, save all captures numbers to "events"
virtual void Start(uint64_t){}; // called before op start, accept an id as argument to identify the op
virtual void Stop(uint64_t){}; // called after op stop, accept an id as argument to identify the op
virtual void Start(uint64_t) {} // called before op start, accept an id as argument to identify the op
virtual void Stop(uint64_t) {} // called after op stop, accept an id as argument to identify the op
};

// Demangle C++ symbols
Expand Down
18 changes: 9 additions & 9 deletions include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,18 +218,18 @@ class ThreadPoolProfiler {
WAIT_REVOKE,
MAX_EVENT
};
ThreadPoolProfiler(int, const CHAR_TYPE*) {};
ThreadPoolProfiler(int, const CHAR_TYPE*) {}
~ThreadPoolProfiler() = default;
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ThreadPoolProfiler);
void Start() {};
void Start() {}
std::string Stop() { return "not available for minimal build"; }
void LogStart() {};
void LogEnd(ThreadPoolEvent){};
void LogEndAndStart(ThreadPoolEvent){};
void LogStartAndCoreAndBlock(std::ptrdiff_t){};
void LogCoreAndBlock(std::ptrdiff_t){};
void LogThreadId(int) {};
void LogRun(int) {};
void LogStart() {}
void LogEnd(ThreadPoolEvent) {}
void LogEndAndStart(ThreadPoolEvent) {}
void LogStartAndCoreAndBlock(std::ptrdiff_t) {}
void LogCoreAndBlock(std::ptrdiff_t) {}
void LogThreadId(int) {}
void LogRun(int) {}
std::string DumpChildThreadStat() { return {}; }
};
#else
Expand Down
10 changes: 2 additions & 8 deletions js/react_native/e2e/ios/MNISTDataHandler.mm
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,7 @@ @implementation MNISTDataHandler

// It gets raw input data, which can be uri or byte array and others,
// returns cooked data formatted as input of a model.
RCT_EXPORT_METHOD(preprocess
: (NSString*)uri resolve
: (RCTPromiseResolveBlock)resolve reject
: (RCTPromiseRejectBlock)reject) {
RCT_EXPORT_METHOD(preprocess : (NSString*)uri resolve : (RCTPromiseResolveBlock)resolve reject : (RCTPromiseRejectBlock)reject) {
@try {
NSDictionary* inputDataMap = [self preprocess:uri];
resolve(inputDataMap);
Expand All @@ -60,10 +57,7 @@ @implementation MNISTDataHandler

// It gets a result from onnxruntime and a duration of session time for input data,
// returns output data formatted as React Native map.
RCT_EXPORT_METHOD(postprocess
: (NSDictionary*)result resolve
: (RCTPromiseResolveBlock)resolve reject
: (RCTPromiseRejectBlock)reject) {
RCT_EXPORT_METHOD(postprocess : (NSDictionary*)result resolve : (RCTPromiseResolveBlock)resolve reject : (RCTPromiseRejectBlock)reject) {
@try {
NSDictionary* cookedMap = [self postprocess:result];
resolve(cookedMap);
Expand Down
25 changes: 4 additions & 21 deletions js/react_native/ios/OnnxruntimeModule.mm
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager {
* @param reject callback for returning an error back to react native js
* @note when run() is called, the same modelPath must be passed into the first parameter.
*/
RCT_EXPORT_METHOD(loadModel
: (NSString*)modelPath options
: (NSDictionary*)options resolver
: (RCTPromiseResolveBlock)resolve rejecter
: (RCTPromiseRejectBlock)reject) {
RCT_EXPORT_METHOD(loadModel : (NSString*)modelPath options : (NSDictionary*)options resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) {
@try {
NSDictionary* resultMap = [self loadModel:modelPath options:options];
resolve(resultMap);
Expand All @@ -95,11 +91,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager {
* @param reject callback for returning an error back to react native js
* @note when run() is called, the same modelPath must be passed into the first parameter.
*/
RCT_EXPORT_METHOD(loadModelFromBlob
: (NSDictionary*)modelDataBlob options
: (NSDictionary*)options resolver
: (RCTPromiseResolveBlock)resolve rejecter
: (RCTPromiseRejectBlock)reject) {
RCT_EXPORT_METHOD(loadModelFromBlob : (NSDictionary*)modelDataBlob options : (NSDictionary*)options resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) {
@try {
[self checkBlobManager];
NSString* blobId = [modelDataBlob objectForKey:@"blobId"];
Expand All @@ -121,10 +113,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager {
* @param resolve callback for returning output back to react native js
* @param reject callback for returning an error back to react native js
*/
RCT_EXPORT_METHOD(dispose
: (NSString*)key resolver
: (RCTPromiseResolveBlock)resolve rejecter
: (RCTPromiseRejectBlock)reject) {
RCT_EXPORT_METHOD(dispose : (NSString*)key resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) {
@try {
[self dispose:key];
resolve(nil);
Expand All @@ -143,13 +132,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager {
* @param resolve callback for returning an inference result back to react native js
* @param reject callback for returning an error back to react native js
*/
RCT_EXPORT_METHOD(run
: (NSString*)url input
: (NSDictionary*)input output
: (NSArray*)output options
: (NSDictionary*)options resolver
: (RCTPromiseResolveBlock)resolve rejecter
: (RCTPromiseRejectBlock)reject) {
RCT_EXPORT_METHOD(run : (NSString*)url input : (NSDictionary*)input output : (NSArray*)output options : (NSDictionary*)options resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) {
@try {
NSDictionary* resultMap = [self run:url input:input output:output options:options];
resolve(resultMap);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,9 @@ - (void)testOnnxruntimeModule_AppendCoreml {
XCTAssertEqualObjects(outputNames[0], @"output");
}

{ [onnxruntimeModule dispose:sessionKey]; }
{
[onnxruntimeModule dispose:sessionKey];
}
}

@end
4 changes: 3 additions & 1 deletion onnxruntime/core/graph/graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ namespace onnxruntime {
#define NO_CHANGE_ON_SYNC_FLAG(...) \
do { \
const bool sync_needed = GraphProtoSyncNeeded(); \
{ __VA_ARGS__; } \
{ \
__VA_ARGS__; \
} \
GraphProtoSyncNeeded(sync_needed); \
} while (0)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ ElementWiseRangedTransform<T>::~ElementWiseRangedTransform() {
Status Init(const onnxruntime::NodeAttributes& attributes) { \
return (GetFloatParam(#X, attributes, X)); \
} \
GSL_SUPPRESS(r.11) \
GSL_SUPPRESS(r.11) \
ElementWiseRangedTransform<T>* Copy() const final { \
using T1 = typename std::remove_pointer<decltype(this)>::type; \
using T2 = typename std::remove_const<T1>::type; \
Expand All @@ -71,7 +71,7 @@ ElementWiseRangedTransform<T>::~ElementWiseRangedTransform() {
ORT_RETURN_IF_ERROR(GetFloatParam(#Y, attributes, Y)); \
return Status::OK(); \
} \
GSL_SUPPRESS(r.11) \
GSL_SUPPRESS(r.11) \
ElementWiseRangedTransform<T>* Copy() const final { \
using T1 = typename std::remove_pointer<decltype(this)>::type; \
using T2 = typename std::remove_const<T1>::type; \
Expand Down
6 changes: 3 additions & 3 deletions onnxruntime/core/providers/cuda/cuda_profiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ class CudaProfiler final : public EpProfiler {
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CudaProfiler);
~CudaProfiler() {}
bool StartProfiling(TimePoint) override { return true; }
void EndProfiling(TimePoint, Events&) override {};
void Start(uint64_t) override{};
void Stop(uint64_t) override{};
void EndProfiling(TimePoint, Events&) override {}
void Start(uint64_t) override {}
void Stop(uint64_t) override {}
};

#endif
Expand Down
9 changes: 5 additions & 4 deletions onnxruntime/core/providers/cuda/nn/pool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -287,13 +287,14 @@ Status Pool<T, MaxPool<8>, Layout>::ComputeInternal(OpKernelContext* context) co
}

Tensor* I = context->Output(1, TensorShape(i_dims));
constexpr bool pool_template_arg = Layout == LAYOUT_NHWC;
if (nullptr != I || !this->pool_attrs_.default_dilations) {
auto i_data = nullptr == I ? nullptr : I->MutableData<int64_t>();
MaxPoolWithIndex<CudaT, Layout == LAYOUT_NHWC>(this->Stream(context), x_shape, TensorShape(y_dims), kernel_shape,
strides, pads, this->pool_attrs_.dilations,
this->pool_attrs_.storage_order, x_data, y_data, i_data);
MaxPoolWithIndex<CudaT, pool_template_arg>(this->Stream(context), x_shape, TensorShape(y_dims), kernel_shape,
strides, pads, this->pool_attrs_.dilations,
this->pool_attrs_.storage_order, x_data, y_data, i_data);
} else {
ORT_RETURN_IF_ERROR((Pool<T, MaxPool<1>, Layout == LAYOUT_NHWC>::ComputeInternal(context)));
ORT_RETURN_IF_ERROR((Pool<T, MaxPool<1>, pool_template_arg>::ComputeInternal(context)));
}
return Status::OK();
}
Expand Down
24 changes: 12 additions & 12 deletions onnxruntime/core/providers/cuda/tensor/space_depth_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,13 +171,13 @@ Status SpaceToDepth<Layout>::ComputeInternal(OpKernelContext* context) const {
int64_t output_depth = -1;
int64_t output_height = -1;
int64_t output_width = -1;

constexpr bool template_arg = Layout == LAYOUT_NHWC;
ORT_RETURN_IF_ERROR(
InputValidationsAndOutputDimsCalc<Layout == LAYOUT_NHWC>(input,
batch,
input_depth, input_height, input_width,
output_depth, output_height, output_width,
true));
InputValidationsAndOutputDimsCalc<template_arg>(input,
batch,
input_depth, input_height, input_width,
output_depth, output_height, output_width,
true));

// We use the "actual" output shape to construct the output tensor
Tensor& output = (Layout == LAYOUT_NCHW)
Expand Down Expand Up @@ -223,13 +223,13 @@ Status DepthToSpace<Layout>::ComputeInternal(OpKernelContext* context) const {
int64_t output_depth = -1;
int64_t output_height = -1;
int64_t output_width = -1;

constexpr bool template_arg = Layout == LAYOUT_NHWC;
ORT_RETURN_IF_ERROR(
InputValidationsAndOutputDimsCalc<Layout == LAYOUT_NHWC>(input,
batch,
input_depth, input_height, input_width,
output_depth, output_height, output_width,
false));
InputValidationsAndOutputDimsCalc<template_arg>(input,
batch,
input_depth, input_height, input_width,
output_depth, output_height, output_width,
false));

// We use the "actual" output shape to construct the output tensor
Tensor& output = (Layout == LAYOUT_NCHW)
Expand Down
6 changes: 3 additions & 3 deletions onnxruntime/core/providers/rocm/rocm_profiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ class RocmProfiler final : public EpProfiler {
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RocmProfiler);
~RocmProfiler() {}
bool StartProfiling(TimePoint) override { return true; }
void EndProfiling(TimePoint, Events&) override {};
void Start(uint64_t) override{};
void Stop(uint64_t) override{};
void EndProfiling(TimePoint, Events&) override {}
void Start(uint64_t) override {}
void Stop(uint64_t) override {}
};

} // namespace profiling
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/vitisai/vitisai_profiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ class VitisaiProfiler final : public EpProfiler {
~VitisaiProfiler() {}
bool StartProfiling(TimePoint) override;
void EndProfiling(TimePoint, Events&) override;
void Start(uint64_t) override{};
void Stop(uint64_t) override{};
void Start(uint64_t) override {}
void Stop(uint64_t) override {}
};
#endif

Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/fuzzing/src/test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ int main(int argc, char* argv[]) {
// Enable telemetry events
//
env.EnableTelemetryEvents();
struct RunStats run_stats {};
struct RunStats run_stats{};
runtimeOpt opt{};
user_options& user_opt{opt.user_opt};
Logger::wcstream& werr_stream_buf{opt.werr_stream_buf};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ __device__ __forceinline__ T reduce_block_into_lanes(T* x, T val, int lanes = 1,
final = x[tid] + x[tid + 32];
else
final = val;
// __SYNCWARP();
// __SYNCWARP();

#pragma unroll
#if defined(CUDA_VERSION) && CUDA_VERSION >= 9000
Expand Down
4 changes: 3 additions & 1 deletion orttraining/orttraining/test/gradient/gradient_ops_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1571,7 +1571,9 @@ TEST(GradientCheckerTest, SigmoidGrad) { UnaryOpGradientTest("Sigmoid"); }

TEST(GradientCheckerTest, QuickGeluGrad) {
// Default alpha = 1.702, relax the tolerance due failure on Win for some seed.
{ UnaryOpGradientTest("QuickGelu", kMSDomain, 1, nullptr, nullptr, {}, 5e-2f); }
{
UnaryOpGradientTest("QuickGelu", kMSDomain, 1, nullptr, nullptr, {}, 5e-2f);
}

// Silu, alpha = 1.0.
{
Expand Down
2 changes: 1 addition & 1 deletion requirements-lintrunner.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ ruff==0.5.4
black==24.10.0
isort==5.13.2
# CLANGFORMAT
clang-format==18.1.8
clang-format==19.1.6
4 changes: 2 additions & 2 deletions winml/lib/Api.Ort/OnnxruntimeEngine.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ class OnnxruntimeEngine
STDMETHOD(CreateTensorValueFromExternalD3DResource)
(ID3D12Resource* resource, const int64_t* shape, size_t count, winml::TensorKind kind, _Out_ IValue** out) override;
STDMETHOD(CreateTensorValueFromExternalBuffer)
(void* data, size_t size_in_bytes, const int64_t* shape, size_t count, winml::TensorKind kind, _Out_ IValue** out
) override;
(void* data, size_t size_in_bytes, const int64_t* shape, size_t count, winml::TensorKind kind, _Out_ IValue** out)
override;
STDMETHOD(CreateStringTensorValueFromDataWithCopy)
(const char* const* data, size_t num_elements, const int64_t* shape, size_t count, _Out_ IValue** out) override;
STDMETHOD(CreateNullValue)
Expand Down
6 changes: 4 additions & 2 deletions winml/lib/Api/LearningModelSession.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,15 @@ LearningModelSession::LearningModelSession(_winml::IEngine* engine)
}

LearningModelSession::LearningModelSession(winml::LearningModel const& model) try
: LearningModelSession(model, make<LearningModelDevice>(LearningModelDeviceKind::Default)) {}
: LearningModelSession(model, make<LearningModelDevice>(LearningModelDeviceKind::Default)) {
}
WINML_CATCH_ALL

LearningModelSession::LearningModelSession(
winml::LearningModel const& model, winml::LearningModelDevice const& deviceToRunOn
) try
: LearningModelSession(model, deviceToRunOn, nullptr) {}
: LearningModelSession(model, deviceToRunOn, nullptr) {
}
WINML_CATCH_ALL

LearningModelSession::LearningModelSession(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,9 @@ struct RandomAccessStream
ABI::Windows::Storage::Streams::IInputStream,
ABI::Windows::Storage::Streams::IOutputStream,
ABI::Windows::Foundation::IClosable> {
InspectableClass(L"WinMLTest.RandomAccessStream", BaseTrust)
InspectableClass(L"WinMLTest.RandomAccessStream", BaseTrust)

private : Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBuffer> buffer_ = nullptr;
private : Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBuffer> buffer_ = nullptr;
UINT64 position_ = 0;

public:
Expand Down Expand Up @@ -266,8 +266,8 @@ struct BufferBackedRandomAccessStreamReferenceOpenReadAsync
Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::WinRtClassicComMix | Microsoft::WRL::InhibitRoOriginateError>,
__FIAsyncOperation_1_Windows__CStorage__CStreams__CIRandomAccessStreamWithContentType,
ABI::Windows::Foundation::IAsyncInfo> {
InspectableClass(L"WinMLTest.BufferBackedRandomAccessStreamReferenceOpenReadAsync", BaseTrust) public
: Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStreamWithContentType> ras_;
InspectableClass(L"WinMLTest.BufferBackedRandomAccessStreamReferenceOpenReadAsync", BaseTrust) public
: Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStreamWithContentType> ras_;
Microsoft::WRL::ComPtr<ABI::Windows::Foundation::IAsyncOperationCompletedHandler<
ABI::Windows::Storage::Streams::IRandomAccessStreamWithContentType*>>
completed_handler_;
Expand Down
4 changes: 2 additions & 2 deletions winml/test/api/raw/weak_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ struct WeakBuffer
Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::WinRtClassicComMix | Microsoft::WRL::InhibitRoOriginateError>,
ABI::Windows::Storage::Streams::IBuffer,
Windows::Storage::Streams::IBufferByteAccess> {
InspectableClass(L"WinMLTest.WeakBuffer", BaseTrust)
InspectableClass(L"WinMLTest.WeakBuffer", BaseTrust)

private : const T* m_p_begin;
private : const T* m_p_begin;
const T* m_p_end;

public:
Expand Down

0 comments on commit 228dd16

Please sign in to comment.