Skip to content

Commit

Permalink
Remove unused local variables (#23634)
Browse files Browse the repository at this point in the history
### Description
Remove unused local variables


### Motivation and Context
To make the code be compatible with the latest ABSL.
  • Loading branch information
snnn authored Feb 11, 2025
1 parent 74c778e commit 6cac589
Show file tree
Hide file tree
Showing 4 changed files with 1 addition and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,6 @@ Status EinsumTypedComputeProcessor<T>::Run() {
{
TensorShapeVector reduced_dims;
TensorShapeVector preserved_dims; // dims which were not reduced
TensorShapeVector preserved_shape; // shape pertaining to only the dims that were preserved (not reduced)
reduced_dims.reserve(onnxruntime::narrow<size_t>(num_subscript_labels)); // num_subscript_labels is the upper bound. No harm in over-reserving.
preserved_dims.reserve(onnxruntime::narrow<size_t>(num_subscript_labels)); // num_subscript_labels is the upper bound. No harm in over-reserving.

Expand Down
2 changes: 0 additions & 2 deletions onnxruntime/core/providers/cpu/reduction/reduction_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -789,7 +789,6 @@ bool CommonFastReduceSwitch(OpKernelContext* ctx,
fast_reduce_fct* case_rk,
fast_reduce_fct* case_krk,
fast_reduce_fct* case_rkr) {
TensorShapeVector axes;
const Tensor* input = ctx->Input<Tensor>(0);
auto reduced_dims = input->Shape().GetDims();
TensorShapeVector input_axes;
Expand Down Expand Up @@ -1055,7 +1054,6 @@ template <typename T>
std::unique_ptr<Tensor> ReduceSum<T>::Impl(const Tensor& input, gsl::span<const int64_t> reduce_axes,
AllocatorPtr allocator, concurrency::ThreadPool* tp, bool keep_dims,
const TensorShape* input_shape_override) {
TensorShapeVector axes;
TensorShapeVector output_shape, fast_shape, fast_axes;
TensorShape new_input_shape = input_shape_override == nullptr ? input.Shape() : *input_shape_override;
auto reduced_dims = new_input_shape.GetDims();
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/cuda/tensor/gather_elements.cc
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,8 @@ Status GatherElements::ComputeInternal(OpKernelContext* context) const {
TensorShapeVector input_shape_vec = input_shape.AsShapeVector();
TensorShapeVector indices_shape_vec = indices_shape.AsShapeVector();
TensorShapeVector* p_indices_strides_vec = nullptr;
TensorShapeVector indices_strides_vec;
#ifdef ENABLE_STRIDED_TENSORS
TensorShapeVector indices_strides_vec;
if (!indices_tensor->IsContiguous()) {
indices_strides_vec = ToShapeVector(indices_tensor->Strides());
p_indices_strides_vec = &indices_strides_vec;
Expand Down
1 change: 0 additions & 1 deletion onnxruntime/core/providers/xnnpack/tensor/resize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ bool Resize::IsOnnxNodeSupported(const NodeUnit& node_unit,

// check the scale for the second dim is 1 or the size of the second dim matches the input shape.
// if not, it is not the C dim as a Resize will not change the number of channels.
InlinedVector<float> scale(4, 1.0F);
if (scale_tensor) {
const Initializer scale_val(*scale_tensor, node_unit.ModelPath());
const auto scales = scale_val.DataAsSpan<float>();
Expand Down

0 comments on commit 6cac589

Please sign in to comment.