diff --git a/onnx/defs/math/defs.cc b/onnx/defs/math/defs.cc index 4f2970e4f46..8f46fc4ef8c 100644 --- a/onnx/defs/math/defs.cc +++ b/onnx/defs/math/defs.cc @@ -2063,11 +2063,12 @@ ONNX_OPERATOR_SET_SCHEMA( for (int64_t i = 0; i < dim_value; ++i) { second_shape.add_dim(); } + } else { + return; } bidirectionalBroadcastShapeInference( input_shape, second_shape, *getOutputShape(ctx, 0)); } - return; })); static const char* Sinh_ver9_doc = R"DOC( diff --git a/onnx/defs/math/old.cc b/onnx/defs/math/old.cc index f77a2fc5e0c..92ddf3af67c 100644 --- a/onnx/defs/math/old.cc +++ b/onnx/defs/math/old.cc @@ -1020,11 +1020,12 @@ ONNX_OPERATOR_SET_SCHEMA( for (int64_t i = 0; i < dim_value; ++i) { second_shape.add_dim(); } + } else { + return; } bidirectionalBroadcastShapeInference( input_shape, second_shape, *getOutputShape(ctx, 0)); } - return; })); static const char* Sign_ver9_doc = R"DOC( diff --git a/onnx/defs/tensor/defs.cc b/onnx/defs/tensor/defs.cc index 6c2512ea9d5..e4d3d223f93 100755 --- a/onnx/defs/tensor/defs.cc +++ b/onnx/defs/tensor/defs.cc @@ -590,7 +590,7 @@ ONNX_OPERATOR_SET_SCHEMA( for (size_t i = 0; i < numInputs; i++) { const auto& shape = ctx.getInputType(i)->tensor_type().shape(); if (shape.dim_size() != rank) { - fail_shape_inference("All inputs to Concat must have same rank"); + fail_shape_inference("All inputs to Concat must have same rank. Input ", i , " has rank ", shape.dim_size(), " != ", rank); } for (int j = 0; j < rank; j++) { if (j == axis) { diff --git a/onnx/defs/tensor/old.cc b/onnx/defs/tensor/old.cc index b638c6c262a..dac30d9cb62 100644 --- a/onnx/defs/tensor/old.cc +++ b/onnx/defs/tensor/old.cc @@ -559,7 +559,7 @@ ONNX_OPERATOR_SET_SCHEMA( for (size_t i = 0; i < numInputs; i++) { const auto& shape = ctx.getInputType(i)->tensor_type().shape(); if (shape.dim_size() != rank) { - fail_shape_inference("All inputs to Concat must have same rank"); + fail_shape_inference("All inputs to Concat must have same rank. Input ", i , " has rank ", shape.dim_size(), " != ", rank); } for (int j = 0; j < rank; j++) { if (j == axis) { diff --git a/onnx/test/shape_inference_test.py b/onnx/test/shape_inference_test.py index e987d50e398..500026b2d40 100644 --- a/onnx/test/shape_inference_test.py +++ b/onnx/test/shape_inference_test.py @@ -73,14 +73,16 @@ def _compare_value_infos(self, vi_type: TypeProto, inferred_vi_type: TypeProto) assert vi_type.tensor_type.HasField('elem_type') assert inferred_vi_type.tensor_type.HasField('elem_type') assert vi_type.tensor_type.elem_type == inferred_vi_type.tensor_type.elem_type - for dim_i in range(len(vi_type.tensor_type.shape.dim)): - dim = vi_type.tensor_type.shape.dim[dim_i] - inferred_dim = inferred_vi_type.tensor_type.shape.dim[dim_i] - # if it is a symbolic shape, make sure the inferred symbol has generated (dim_param) - if dim.dim_param: - assert dim.dim_param == inferred_dim.dim_param, '\n%s\n%s\n' % (vi_type, inferred_vi_type) - else: - assert dim.dim_value == inferred_dim.dim_value, '\n%s\n%s\n' % (vi_type, inferred_vi_type) + assert vi_type.tensor_type.HasField('shape') == inferred_vi_type.tensor_type.HasField('shape') + if vi_type.tensor_type.HasField('shape'): + for dim_i in range(len(vi_type.tensor_type.shape.dim)): + dim = vi_type.tensor_type.shape.dim[dim_i] + inferred_dim = inferred_vi_type.tensor_type.shape.dim[dim_i] + # if it is a symbolic shape, make sure the inferred symbol has generated (dim_param) + if dim.dim_param: + assert dim.dim_param == inferred_dim.dim_param, '\n%s\n%s\n' % (vi_type, inferred_vi_type) + else: + assert dim.dim_value == inferred_dim.dim_value, '\n%s\n%s\n' % (vi_type, inferred_vi_type) elif vi_type.HasField('sequence_type'): assert inferred_vi_type.HasField('sequence_type') vi = vi_type.sequence_type.elem_type @@ -383,6 +385,18 @@ def test_expand_dynamic_shape(self) -> None: graph, [make_tensor_value_info('y', TensorProto.INT32, (None, 2, None))]) + def test_expand_symbolic_shape(self) -> None: + graph = self._make_graph( + [('x', TensorProto.INT32, (1, 2, None)), + ('shape', TensorProto.INT64, ('unk__0',))], + [make_node("Expand", ['x', 'shape'], ['y'])], + [], + initializer=[]) + # if giving a symbolic shape, Expand should not infer any shape or rank inference + self._assert_inferred( + graph, + [make_tensor_value_info('y', TensorProto.INT32, None)]) + def test_resize_size(self) -> None: graph = self._make_graph( [('x', TensorProto.INT32, (2, 4, 3, 5)), @@ -2111,7 +2125,7 @@ def test_if_with_different_optional_shapes_in_then_else_branches(self) -> None: [] ) - output_tensor_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=None) + output_tensor_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=(None, )) output_optional_type_proto = helper.make_optional_type_proto(output_tensor_proto) output_optional_vi = helper.make_value_info('if_output', output_optional_type_proto) self._assert_inferred(graph, [output_optional_vi]) # type: ignore @@ -3862,7 +3876,7 @@ def test_optional_tensor_has_element(self) -> None: make_node('OptionalHasElement', ['sequence'], ['output'])], []) self._assert_inferred(graph, [optional_val_info, - make_tensor_value_info('output', TensorProto.BOOL, None)]) # type: ignore + make_tensor_value_info('output', TensorProto.BOOL, ())]) # type: ignore def test_optional_sequence_has_element(self) -> None: tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=[0, 3, 4]) @@ -3881,7 +3895,7 @@ def test_optional_sequence_has_element(self) -> None: make_node('OptionalHasElement', ['optional'], ['output'])], []) self._assert_inferred(graph, [sequence_val_info, optional_val_info, - make_tensor_value_info('output', TensorProto.BOOL, None)]) # type: ignore + make_tensor_value_info('output', TensorProto.BOOL, ())]) # type: ignore def test_optional_tensor_get_element(self) -> None: tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.DOUBLE, shape=[2, 1, 4])