From 228dd168936a943362665a79e86d37500d3d2f0e Mon Sep 17 00:00:00 2001 From: Changming Sun Date: Tue, 14 Jan 2025 09:02:04 -0800 Subject: [PATCH] Bump clang-format from 18.1.8 to 19.1.6 (#23346) To replace #23327 --- .../onnxruntime/core/common/profiler_common.h | 4 +-- .../platform/EigenNonBlockingThreadPool.h | 18 ++++++------- js/react_native/e2e/ios/MNISTDataHandler.mm | 10 ++------ js/react_native/ios/OnnxruntimeModule.mm | 25 +++---------------- .../OnnxruntimeModuleTest.mm | 4 ++- onnxruntime/core/graph/graph.cc | 4 ++- .../cpu/element_wise_ranged_transform.h | 4 +-- .../core/providers/cuda/cuda_profiler.h | 6 ++--- onnxruntime/core/providers/cuda/nn/pool.cc | 9 ++++--- .../providers/cuda/tensor/space_depth_ops.cc | 24 +++++++++--------- .../core/providers/rocm/rocm_profiler.h | 6 ++--- .../core/providers/vitisai/vitisai_profiler.h | 4 +-- onnxruntime/test/fuzzing/src/test.cpp | 2 +- .../cuda/fused_ops/type_shim.h | 2 +- .../test/gradient/gradient_ops_test.cc | 4 ++- requirements-lintrunner.txt | 2 +- winml/lib/Api.Ort/OnnxruntimeEngine.h | 4 +-- winml/lib/Api/LearningModelSession.cpp | 6 +++-- ...er_backed_random_access_stream_reference.h | 8 +++--- winml/test/api/raw/weak_buffer.h | 4 +-- 20 files changed, 68 insertions(+), 82 deletions(-) diff --git a/include/onnxruntime/core/common/profiler_common.h b/include/onnxruntime/core/common/profiler_common.h index 0074d5e74a461..ab973256fe5f1 100644 --- a/include/onnxruntime/core/common/profiler_common.h +++ b/include/onnxruntime/core/common/profiler_common.h @@ -81,8 +81,8 @@ class EpProfiler { virtual ~EpProfiler() = default; virtual bool StartProfiling(TimePoint profiling_start_time) = 0; // called when profiling starts virtual void EndProfiling(TimePoint start_time, Events& events) = 0; // called when profiling ends, save all captures numbers to "events" - virtual void Start(uint64_t){}; // called before op start, accept an id as argument to identify the op - virtual void Stop(uint64_t){}; // called after op stop, accept an id as argument to identify the op + virtual void Start(uint64_t) {} // called before op start, accept an id as argument to identify the op + virtual void Stop(uint64_t) {} // called after op stop, accept an id as argument to identify the op }; // Demangle C++ symbols diff --git a/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h b/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h index a7c63c507d1ba..26fc440f7bfc5 100644 --- a/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h +++ b/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h @@ -218,18 +218,18 @@ class ThreadPoolProfiler { WAIT_REVOKE, MAX_EVENT }; - ThreadPoolProfiler(int, const CHAR_TYPE*) {}; + ThreadPoolProfiler(int, const CHAR_TYPE*) {} ~ThreadPoolProfiler() = default; ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ThreadPoolProfiler); - void Start() {}; + void Start() {} std::string Stop() { return "not available for minimal build"; } - void LogStart() {}; - void LogEnd(ThreadPoolEvent){}; - void LogEndAndStart(ThreadPoolEvent){}; - void LogStartAndCoreAndBlock(std::ptrdiff_t){}; - void LogCoreAndBlock(std::ptrdiff_t){}; - void LogThreadId(int) {}; - void LogRun(int) {}; + void LogStart() {} + void LogEnd(ThreadPoolEvent) {} + void LogEndAndStart(ThreadPoolEvent) {} + void LogStartAndCoreAndBlock(std::ptrdiff_t) {} + void LogCoreAndBlock(std::ptrdiff_t) {} + void LogThreadId(int) {} + void LogRun(int) {} std::string DumpChildThreadStat() { return {}; } }; #else diff --git a/js/react_native/e2e/ios/MNISTDataHandler.mm b/js/react_native/e2e/ios/MNISTDataHandler.mm index 54a4b629865d0..1a79b66ca5d2f 100644 --- a/js/react_native/e2e/ios/MNISTDataHandler.mm +++ b/js/react_native/e2e/ios/MNISTDataHandler.mm @@ -46,10 +46,7 @@ @implementation MNISTDataHandler // It gets raw input data, which can be uri or byte array and others, // returns cooked data formatted as input of a model. -RCT_EXPORT_METHOD(preprocess - : (NSString*)uri resolve - : (RCTPromiseResolveBlock)resolve reject - : (RCTPromiseRejectBlock)reject) { +RCT_EXPORT_METHOD(preprocess : (NSString*)uri resolve : (RCTPromiseResolveBlock)resolve reject : (RCTPromiseRejectBlock)reject) { @try { NSDictionary* inputDataMap = [self preprocess:uri]; resolve(inputDataMap); @@ -60,10 +57,7 @@ @implementation MNISTDataHandler // It gets a result from onnxruntime and a duration of session time for input data, // returns output data formatted as React Native map. -RCT_EXPORT_METHOD(postprocess - : (NSDictionary*)result resolve - : (RCTPromiseResolveBlock)resolve reject - : (RCTPromiseRejectBlock)reject) { +RCT_EXPORT_METHOD(postprocess : (NSDictionary*)result resolve : (RCTPromiseResolveBlock)resolve reject : (RCTPromiseRejectBlock)reject) { @try { NSDictionary* cookedMap = [self postprocess:result]; resolve(cookedMap); diff --git a/js/react_native/ios/OnnxruntimeModule.mm b/js/react_native/ios/OnnxruntimeModule.mm index 16e64d8ed98b4..d3527aad6ae38 100644 --- a/js/react_native/ios/OnnxruntimeModule.mm +++ b/js/react_native/ios/OnnxruntimeModule.mm @@ -73,11 +73,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager { * @param reject callback for returning an error back to react native js * @note when run() is called, the same modelPath must be passed into the first parameter. */ -RCT_EXPORT_METHOD(loadModel - : (NSString*)modelPath options - : (NSDictionary*)options resolver - : (RCTPromiseResolveBlock)resolve rejecter - : (RCTPromiseRejectBlock)reject) { +RCT_EXPORT_METHOD(loadModel : (NSString*)modelPath options : (NSDictionary*)options resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { @try { NSDictionary* resultMap = [self loadModel:modelPath options:options]; resolve(resultMap); @@ -95,11 +91,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager { * @param reject callback for returning an error back to react native js * @note when run() is called, the same modelPath must be passed into the first parameter. */ -RCT_EXPORT_METHOD(loadModelFromBlob - : (NSDictionary*)modelDataBlob options - : (NSDictionary*)options resolver - : (RCTPromiseResolveBlock)resolve rejecter - : (RCTPromiseRejectBlock)reject) { +RCT_EXPORT_METHOD(loadModelFromBlob : (NSDictionary*)modelDataBlob options : (NSDictionary*)options resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { @try { [self checkBlobManager]; NSString* blobId = [modelDataBlob objectForKey:@"blobId"]; @@ -121,10 +113,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager { * @param resolve callback for returning output back to react native js * @param reject callback for returning an error back to react native js */ -RCT_EXPORT_METHOD(dispose - : (NSString*)key resolver - : (RCTPromiseResolveBlock)resolve rejecter - : (RCTPromiseRejectBlock)reject) { +RCT_EXPORT_METHOD(dispose : (NSString*)key resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { @try { [self dispose:key]; resolve(nil); @@ -143,13 +132,7 @@ - (void)setBlobManager:(RCTBlobManager*)manager { * @param resolve callback for returning an inference result back to react native js * @param reject callback for returning an error back to react native js */ -RCT_EXPORT_METHOD(run - : (NSString*)url input - : (NSDictionary*)input output - : (NSArray*)output options - : (NSDictionary*)options resolver - : (RCTPromiseResolveBlock)resolve rejecter - : (RCTPromiseRejectBlock)reject) { +RCT_EXPORT_METHOD(run : (NSString*)url input : (NSDictionary*)input output : (NSArray*)output options : (NSDictionary*)options resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { @try { NSDictionary* resultMap = [self run:url input:input output:output options:options]; resolve(resultMap); diff --git a/js/react_native/ios/OnnxruntimeModuleTest/OnnxruntimeModuleTest.mm b/js/react_native/ios/OnnxruntimeModuleTest/OnnxruntimeModuleTest.mm index 7059177400f3c..326990a515510 100644 --- a/js/react_native/ios/OnnxruntimeModuleTest/OnnxruntimeModuleTest.mm +++ b/js/react_native/ios/OnnxruntimeModuleTest/OnnxruntimeModuleTest.mm @@ -144,7 +144,9 @@ - (void)testOnnxruntimeModule_AppendCoreml { XCTAssertEqualObjects(outputNames[0], @"output"); } - { [onnxruntimeModule dispose:sessionKey]; } + { + [onnxruntimeModule dispose:sessionKey]; + } } @end diff --git a/onnxruntime/core/graph/graph.cc b/onnxruntime/core/graph/graph.cc index 9fee3e49dc0d2..7ee794ccbd2e8 100644 --- a/onnxruntime/core/graph/graph.cc +++ b/onnxruntime/core/graph/graph.cc @@ -50,7 +50,9 @@ namespace onnxruntime { #define NO_CHANGE_ON_SYNC_FLAG(...) \ do { \ const bool sync_needed = GraphProtoSyncNeeded(); \ - { __VA_ARGS__; } \ + { \ + __VA_ARGS__; \ + } \ GraphProtoSyncNeeded(sync_needed); \ } while (0) diff --git a/onnxruntime/core/providers/cpu/element_wise_ranged_transform.h b/onnxruntime/core/providers/cpu/element_wise_ranged_transform.h index f457a4d7dcaf9..0f8b0df63c074 100644 --- a/onnxruntime/core/providers/cpu/element_wise_ranged_transform.h +++ b/onnxruntime/core/providers/cpu/element_wise_ranged_transform.h @@ -56,7 +56,7 @@ ElementWiseRangedTransform::~ElementWiseRangedTransform() { Status Init(const onnxruntime::NodeAttributes& attributes) { \ return (GetFloatParam(#X, attributes, X)); \ } \ - GSL_SUPPRESS(r.11) \ + GSL_SUPPRESS(r.11) \ ElementWiseRangedTransform* Copy() const final { \ using T1 = typename std::remove_pointer::type; \ using T2 = typename std::remove_const::type; \ @@ -71,7 +71,7 @@ ElementWiseRangedTransform::~ElementWiseRangedTransform() { ORT_RETURN_IF_ERROR(GetFloatParam(#Y, attributes, Y)); \ return Status::OK(); \ } \ - GSL_SUPPRESS(r.11) \ + GSL_SUPPRESS(r.11) \ ElementWiseRangedTransform* Copy() const final { \ using T1 = typename std::remove_pointer::type; \ using T2 = typename std::remove_const::type; \ diff --git a/onnxruntime/core/providers/cuda/cuda_profiler.h b/onnxruntime/core/providers/cuda/cuda_profiler.h index 4930e55351615..1d8ecddce4c79 100644 --- a/onnxruntime/core/providers/cuda/cuda_profiler.h +++ b/onnxruntime/core/providers/cuda/cuda_profiler.h @@ -33,9 +33,9 @@ class CudaProfiler final : public EpProfiler { ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CudaProfiler); ~CudaProfiler() {} bool StartProfiling(TimePoint) override { return true; } - void EndProfiling(TimePoint, Events&) override {}; - void Start(uint64_t) override{}; - void Stop(uint64_t) override{}; + void EndProfiling(TimePoint, Events&) override {} + void Start(uint64_t) override {} + void Stop(uint64_t) override {} }; #endif diff --git a/onnxruntime/core/providers/cuda/nn/pool.cc b/onnxruntime/core/providers/cuda/nn/pool.cc index 4acdcfcf35491..25c8210d4aba3 100644 --- a/onnxruntime/core/providers/cuda/nn/pool.cc +++ b/onnxruntime/core/providers/cuda/nn/pool.cc @@ -287,13 +287,14 @@ Status Pool, Layout>::ComputeInternal(OpKernelContext* context) co } Tensor* I = context->Output(1, TensorShape(i_dims)); + constexpr bool pool_template_arg = Layout == LAYOUT_NHWC; if (nullptr != I || !this->pool_attrs_.default_dilations) { auto i_data = nullptr == I ? nullptr : I->MutableData(); - MaxPoolWithIndex(this->Stream(context), x_shape, TensorShape(y_dims), kernel_shape, - strides, pads, this->pool_attrs_.dilations, - this->pool_attrs_.storage_order, x_data, y_data, i_data); + MaxPoolWithIndex(this->Stream(context), x_shape, TensorShape(y_dims), kernel_shape, + strides, pads, this->pool_attrs_.dilations, + this->pool_attrs_.storage_order, x_data, y_data, i_data); } else { - ORT_RETURN_IF_ERROR((Pool, Layout == LAYOUT_NHWC>::ComputeInternal(context))); + ORT_RETURN_IF_ERROR((Pool, pool_template_arg>::ComputeInternal(context))); } return Status::OK(); } diff --git a/onnxruntime/core/providers/cuda/tensor/space_depth_ops.cc b/onnxruntime/core/providers/cuda/tensor/space_depth_ops.cc index aaaf3600b676e..cbfc62494fde4 100644 --- a/onnxruntime/core/providers/cuda/tensor/space_depth_ops.cc +++ b/onnxruntime/core/providers/cuda/tensor/space_depth_ops.cc @@ -171,13 +171,13 @@ Status SpaceToDepth::ComputeInternal(OpKernelContext* context) const { int64_t output_depth = -1; int64_t output_height = -1; int64_t output_width = -1; - + constexpr bool template_arg = Layout == LAYOUT_NHWC; ORT_RETURN_IF_ERROR( - InputValidationsAndOutputDimsCalc(input, - batch, - input_depth, input_height, input_width, - output_depth, output_height, output_width, - true)); + InputValidationsAndOutputDimsCalc(input, + batch, + input_depth, input_height, input_width, + output_depth, output_height, output_width, + true)); // We use the "actual" output shape to construct the output tensor Tensor& output = (Layout == LAYOUT_NCHW) @@ -223,13 +223,13 @@ Status DepthToSpace::ComputeInternal(OpKernelContext* context) const { int64_t output_depth = -1; int64_t output_height = -1; int64_t output_width = -1; - + constexpr bool template_arg = Layout == LAYOUT_NHWC; ORT_RETURN_IF_ERROR( - InputValidationsAndOutputDimsCalc(input, - batch, - input_depth, input_height, input_width, - output_depth, output_height, output_width, - false)); + InputValidationsAndOutputDimsCalc(input, + batch, + input_depth, input_height, input_width, + output_depth, output_height, output_width, + false)); // We use the "actual" output shape to construct the output tensor Tensor& output = (Layout == LAYOUT_NCHW) diff --git a/onnxruntime/core/providers/rocm/rocm_profiler.h b/onnxruntime/core/providers/rocm/rocm_profiler.h index d5c7e3f273565..52c6d4ea05f99 100644 --- a/onnxruntime/core/providers/rocm/rocm_profiler.h +++ b/onnxruntime/core/providers/rocm/rocm_profiler.h @@ -34,9 +34,9 @@ class RocmProfiler final : public EpProfiler { ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RocmProfiler); ~RocmProfiler() {} bool StartProfiling(TimePoint) override { return true; } - void EndProfiling(TimePoint, Events&) override {}; - void Start(uint64_t) override{}; - void Stop(uint64_t) override{}; + void EndProfiling(TimePoint, Events&) override {} + void Start(uint64_t) override {} + void Stop(uint64_t) override {} }; } // namespace profiling diff --git a/onnxruntime/core/providers/vitisai/vitisai_profiler.h b/onnxruntime/core/providers/vitisai/vitisai_profiler.h index aedbda31f7b1d..afe4058f7290a 100644 --- a/onnxruntime/core/providers/vitisai/vitisai_profiler.h +++ b/onnxruntime/core/providers/vitisai/vitisai_profiler.h @@ -14,8 +14,8 @@ class VitisaiProfiler final : public EpProfiler { ~VitisaiProfiler() {} bool StartProfiling(TimePoint) override; void EndProfiling(TimePoint, Events&) override; - void Start(uint64_t) override{}; - void Stop(uint64_t) override{}; + void Start(uint64_t) override {} + void Stop(uint64_t) override {} }; #endif diff --git a/onnxruntime/test/fuzzing/src/test.cpp b/onnxruntime/test/fuzzing/src/test.cpp index 0755b8493f2a9..c9786fa48dae3 100644 --- a/onnxruntime/test/fuzzing/src/test.cpp +++ b/onnxruntime/test/fuzzing/src/test.cpp @@ -282,7 +282,7 @@ int main(int argc, char* argv[]) { // Enable telemetry events // env.EnableTelemetryEvents(); - struct RunStats run_stats {}; + struct RunStats run_stats{}; runtimeOpt opt{}; user_options& user_opt{opt.user_opt}; Logger::wcstream& werr_stream_buf{opt.werr_stream_buf}; diff --git a/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/type_shim.h b/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/type_shim.h index 3d508b80a0c2c..93f6945a264ee 100644 --- a/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/type_shim.h +++ b/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/type_shim.h @@ -51,7 +51,7 @@ __device__ __forceinline__ T reduce_block_into_lanes(T* x, T val, int lanes = 1, final = x[tid] + x[tid + 32]; else final = val; - // __SYNCWARP(); + // __SYNCWARP(); #pragma unroll #if defined(CUDA_VERSION) && CUDA_VERSION >= 9000 diff --git a/orttraining/orttraining/test/gradient/gradient_ops_test.cc b/orttraining/orttraining/test/gradient/gradient_ops_test.cc index b81a08e23e3cf..b683010a72218 100644 --- a/orttraining/orttraining/test/gradient/gradient_ops_test.cc +++ b/orttraining/orttraining/test/gradient/gradient_ops_test.cc @@ -1571,7 +1571,9 @@ TEST(GradientCheckerTest, SigmoidGrad) { UnaryOpGradientTest("Sigmoid"); } TEST(GradientCheckerTest, QuickGeluGrad) { // Default alpha = 1.702, relax the tolerance due failure on Win for some seed. - { UnaryOpGradientTest("QuickGelu", kMSDomain, 1, nullptr, nullptr, {}, 5e-2f); } + { + UnaryOpGradientTest("QuickGelu", kMSDomain, 1, nullptr, nullptr, {}, 5e-2f); + } // Silu, alpha = 1.0. { diff --git a/requirements-lintrunner.txt b/requirements-lintrunner.txt index 029a0869bef46..55919f5f81748 100644 --- a/requirements-lintrunner.txt +++ b/requirements-lintrunner.txt @@ -8,4 +8,4 @@ ruff==0.5.4 black==24.10.0 isort==5.13.2 # CLANGFORMAT -clang-format==18.1.8 +clang-format==19.1.6 diff --git a/winml/lib/Api.Ort/OnnxruntimeEngine.h b/winml/lib/Api.Ort/OnnxruntimeEngine.h index 88945b75c75e4..93c097c7deaca 100644 --- a/winml/lib/Api.Ort/OnnxruntimeEngine.h +++ b/winml/lib/Api.Ort/OnnxruntimeEngine.h @@ -91,8 +91,8 @@ class OnnxruntimeEngine STDMETHOD(CreateTensorValueFromExternalD3DResource) (ID3D12Resource* resource, const int64_t* shape, size_t count, winml::TensorKind kind, _Out_ IValue** out) override; STDMETHOD(CreateTensorValueFromExternalBuffer) - (void* data, size_t size_in_bytes, const int64_t* shape, size_t count, winml::TensorKind kind, _Out_ IValue** out - ) override; + (void* data, size_t size_in_bytes, const int64_t* shape, size_t count, winml::TensorKind kind, _Out_ IValue** out) + override; STDMETHOD(CreateStringTensorValueFromDataWithCopy) (const char* const* data, size_t num_elements, const int64_t* shape, size_t count, _Out_ IValue** out) override; STDMETHOD(CreateNullValue) diff --git a/winml/lib/Api/LearningModelSession.cpp b/winml/lib/Api/LearningModelSession.cpp index 57bafda57fe54..508b77e964b4c 100644 --- a/winml/lib/Api/LearningModelSession.cpp +++ b/winml/lib/Api/LearningModelSession.cpp @@ -37,13 +37,15 @@ LearningModelSession::LearningModelSession(_winml::IEngine* engine) } LearningModelSession::LearningModelSession(winml::LearningModel const& model) try - : LearningModelSession(model, make(LearningModelDeviceKind::Default)) {} + : LearningModelSession(model, make(LearningModelDeviceKind::Default)) { +} WINML_CATCH_ALL LearningModelSession::LearningModelSession( winml::LearningModel const& model, winml::LearningModelDevice const& deviceToRunOn ) try - : LearningModelSession(model, deviceToRunOn, nullptr) {} + : LearningModelSession(model, deviceToRunOn, nullptr) { +} WINML_CATCH_ALL LearningModelSession::LearningModelSession( diff --git a/winml/test/api/raw/buffer_backed_random_access_stream_reference.h b/winml/test/api/raw/buffer_backed_random_access_stream_reference.h index 6f492bf8340c9..32ab8de12f650 100644 --- a/winml/test/api/raw/buffer_backed_random_access_stream_reference.h +++ b/winml/test/api/raw/buffer_backed_random_access_stream_reference.h @@ -116,9 +116,9 @@ struct RandomAccessStream ABI::Windows::Storage::Streams::IInputStream, ABI::Windows::Storage::Streams::IOutputStream, ABI::Windows::Foundation::IClosable> { - InspectableClass(L"WinMLTest.RandomAccessStream", BaseTrust) + InspectableClass(L"WinMLTest.RandomAccessStream", BaseTrust) - private : Microsoft::WRL::ComPtr buffer_ = nullptr; + private : Microsoft::WRL::ComPtr buffer_ = nullptr; UINT64 position_ = 0; public: @@ -266,8 +266,8 @@ struct BufferBackedRandomAccessStreamReferenceOpenReadAsync Microsoft::WRL::RuntimeClassFlags, __FIAsyncOperation_1_Windows__CStorage__CStreams__CIRandomAccessStreamWithContentType, ABI::Windows::Foundation::IAsyncInfo> { - InspectableClass(L"WinMLTest.BufferBackedRandomAccessStreamReferenceOpenReadAsync", BaseTrust) public - : Microsoft::WRL::ComPtr ras_; + InspectableClass(L"WinMLTest.BufferBackedRandomAccessStreamReferenceOpenReadAsync", BaseTrust) public + : Microsoft::WRL::ComPtr ras_; Microsoft::WRL::ComPtr> completed_handler_; diff --git a/winml/test/api/raw/weak_buffer.h b/winml/test/api/raw/weak_buffer.h index 488ba0639cc18..74e358f0972f9 100644 --- a/winml/test/api/raw/weak_buffer.h +++ b/winml/test/api/raw/weak_buffer.h @@ -18,9 +18,9 @@ struct WeakBuffer Microsoft::WRL::RuntimeClassFlags, ABI::Windows::Storage::Streams::IBuffer, Windows::Storage::Streams::IBufferByteAccess> { - InspectableClass(L"WinMLTest.WeakBuffer", BaseTrust) + InspectableClass(L"WinMLTest.WeakBuffer", BaseTrust) - private : const T* m_p_begin; + private : const T* m_p_begin; const T* m_p_end; public: