From 9497736cc59a429dbcf6350709c483479f67f9f5 Mon Sep 17 00:00:00 2001 From: 11happy Date: Fri, 18 Apr 2025 23:24:46 +0530 Subject: [PATCH 1/6] feat: add support for numpy.nan_to_num Signed-off-by: 11happy --- keras/src/backend/openvino/core.py | 28 +++++++++++++++++++ .../openvino/excluded_concrete_tests.txt | 1 - keras/src/backend/openvino/numpy.py | 28 +++++++++++++++++-- 3 files changed, 53 insertions(+), 4 deletions(-) diff --git a/keras/src/backend/openvino/core.py b/keras/src/backend/openvino/core.py index 252b2b0dae14..50241f84136d 100644 --- a/keras/src/backend/openvino/core.py +++ b/keras/src/backend/openvino/core.py @@ -41,6 +41,34 @@ "string": ov.Type.string, } +DTYPES_MAX = { + ov.Type.f16: np.finfo(np.float16).max, + ov.Type.f32: np.finfo(np.float32).max, + ov.Type.f64: np.finfo(np.float64).max, + ov.Type.u8: np.iinfo(np.uint8).max, + ov.Type.u16: np.iinfo(np.uint16).max, + ov.Type.u32: np.iinfo(np.uint32).max, + ov.Type.u64: np.iinfo(np.uint64).max, + ov.Type.i8: np.iinfo(np.int8).max, + ov.Type.i16: np.iinfo(np.int16).max, + ov.Type.i32: np.iinfo(np.int32).max, + ov.Type.i64: np.iinfo(np.int64).max, +} + +DTYPES_MIN = { + ov.Type.f16: np.finfo(np.float16).min, + ov.Type.f32: np.finfo(np.float32).min, + ov.Type.f64: np.finfo(np.float64).min, + ov.Type.u8: np.iinfo(np.uint8).min, + ov.Type.u16: np.iinfo(np.uint16).min, + ov.Type.u32: np.iinfo(np.uint32).min, + ov.Type.u64: np.iinfo(np.uint64).min, + ov.Type.i8: np.iinfo(np.int8).min, + ov.Type.i16: np.iinfo(np.int16).min, + ov.Type.i32: np.iinfo(np.int32).min, + ov.Type.i64: np.iinfo(np.int64).min, +} + def align_operand_types(x1, x2, op_name): x1_type = x1.element_type diff --git a/keras/src/backend/openvino/excluded_concrete_tests.txt b/keras/src/backend/openvino/excluded_concrete_tests.txt index f1ae053e623b..023403ce6d46 100644 --- a/keras/src/backend/openvino/excluded_concrete_tests.txt +++ b/keras/src/backend/openvino/excluded_concrete_tests.txt @@ -96,7 +96,6 @@ NumpyOneInputOpsCorrectnessTest::test_median NumpyOneInputOpsCorrectnessTest::test_meshgrid NumpyOneInputOpsCorrectnessTest::test_min NumpyOneInputOpsCorrectnessTest::test_moveaxis -NumpyOneInputOpsCorrectnessTest::test_nan_to_num NumpyOneInputOpsCorrectnessTest::test_pad_float16_constant_2 NumpyOneInputOpsCorrectnessTest::test_pad_float32_constant_2 NumpyOneInputOpsCorrectnessTest::test_pad_float64_constant_2 diff --git a/keras/src/backend/openvino/numpy.py b/keras/src/backend/openvino/numpy.py index a242a3c8d569..36ccf93d5b7b 100644 --- a/keras/src/backend/openvino/numpy.py +++ b/keras/src/backend/openvino/numpy.py @@ -5,6 +5,8 @@ from keras.src.backend import config from keras.src.backend.common import dtypes from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.openvino.core import DTYPES_MAX +from keras.src.backend.openvino.core import DTYPES_MIN from keras.src.backend.openvino.core import OPENVINO_DTYPES from keras.src.backend.openvino.core import OpenVINOKerasTensor from keras.src.backend.openvino.core import ( @@ -1030,9 +1032,29 @@ def moveaxis(x, source, destination): def nan_to_num(x, nan=0.0, posinf=None, neginf=None): - raise NotImplementedError( - "`nan_to_num` is not supported with openvino backend" - ) + x = get_ov_output(x) + dtype = x.get_element_type() + shape_x = ov_opset.shape_of(x).output(0) + nan_vector = ov_opset.broadcast( + ov_opset.constant(nan, dtype), shape_x + ).output(0) + posinf_val = posinf if posinf is not None else DTYPES_MAX[dtype] + neginf_val = neginf if neginf is not None else DTYPES_MIN[dtype] + posinf_vector = ov_opset.broadcast( + ov_opset.constant(posinf_val, dtype), shape_x + ).output(0) + neginf_vector = ov_opset.broadcast( + ov_opset.constant(neginf_val, dtype), shape_x + ).output(0) + nan_mask = ov_opset.is_nan(x).output(0) + x = ov_opset.select(nan_mask, nan_vector, x).output(0) + inf_const = ov_opset.constant(np.PINF, dtype) + posinf_mask = ov_opset.equal(x, inf_const).output(0) + x = ov_opset.select(posinf_mask, posinf_vector, x).output(0) + ninf_const = ov_opset.constant(np.NINF, dtype) + neginf_mask = ov_opset.equal(x, ninf_const).output(0) + x = ov_opset.select(neginf_mask, neginf_vector, x).output(0) + return OpenVINOKerasTensor(x) def ndim(x): From eb041286125322dbe59315c4f8e3e697d78327dd Mon Sep 17 00:00:00 2001 From: 11happy Date: Fri, 18 Apr 2025 23:35:53 +0530 Subject: [PATCH 2/6] use np.inf Signed-off-by: 11happy --- keras/src/backend/openvino/numpy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/openvino/numpy.py b/keras/src/backend/openvino/numpy.py index 36ccf93d5b7b..a31f603af2ba 100644 --- a/keras/src/backend/openvino/numpy.py +++ b/keras/src/backend/openvino/numpy.py @@ -1048,10 +1048,10 @@ def nan_to_num(x, nan=0.0, posinf=None, neginf=None): ).output(0) nan_mask = ov_opset.is_nan(x).output(0) x = ov_opset.select(nan_mask, nan_vector, x).output(0) - inf_const = ov_opset.constant(np.PINF, dtype) + inf_const = ov_opset.constant(np.inf, dtype) posinf_mask = ov_opset.equal(x, inf_const).output(0) x = ov_opset.select(posinf_mask, posinf_vector, x).output(0) - ninf_const = ov_opset.constant(np.NINF, dtype) + ninf_const = ov_opset.constant(-np.inf, dtype) neginf_mask = ov_opset.equal(x, ninf_const).output(0) x = ov_opset.select(neginf_mask, neginf_vector, x).output(0) return OpenVINOKerasTensor(x) From 4d72bbfd6910bdef8f36b18c497275fc5fee7760 Mon Sep 17 00:00:00 2001 From: 11happy Date: Sat, 19 Apr 2025 18:12:47 +0530 Subject: [PATCH 3/6] correct implementation based on new tests Signed-off-by: 11happy --- keras/src/backend/openvino/core.py | 49 ++++++++++--------- .../openvino/excluded_concrete_tests.txt | 1 - keras/src/backend/openvino/numpy.py | 27 +++++----- 3 files changed, 38 insertions(+), 39 deletions(-) diff --git a/keras/src/backend/openvino/core.py b/keras/src/backend/openvino/core.py index 50241f84136d..a1b5142554da 100644 --- a/keras/src/backend/openvino/core.py +++ b/keras/src/backend/openvino/core.py @@ -4,6 +4,7 @@ import numpy as np import openvino as ov import openvino.runtime.opset14 as ov_opset +import torch from openvino import Model from openvino import Tensor from openvino import compile_model @@ -42,31 +43,35 @@ } DTYPES_MAX = { - ov.Type.f16: np.finfo(np.float16).max, - ov.Type.f32: np.finfo(np.float32).max, - ov.Type.f64: np.finfo(np.float64).max, - ov.Type.u8: np.iinfo(np.uint8).max, - ov.Type.u16: np.iinfo(np.uint16).max, - ov.Type.u32: np.iinfo(np.uint32).max, - ov.Type.u64: np.iinfo(np.uint64).max, - ov.Type.i8: np.iinfo(np.int8).max, - ov.Type.i16: np.iinfo(np.int16).max, - ov.Type.i32: np.iinfo(np.int32).max, - ov.Type.i64: np.iinfo(np.int64).max, + ov.Type.bf16: torch.finfo(torch.bfloat16).max, + ov.Type.f16: torch.finfo(torch.float16).max, + ov.Type.f32: torch.finfo(torch.float32).max, + ov.Type.f64: torch.finfo(torch.float64).max, + ov.Type.u8: torch.iinfo(torch.uint8).max, + ov.Type.u16: torch.iinfo(torch.uint16).max, + ov.Type.u32: torch.iinfo(torch.uint32).max, + ov.Type.u64: torch.iinfo(torch.uint64).max, + ov.Type.i8: torch.iinfo(torch.int8).max, + ov.Type.i16: torch.iinfo(torch.int16).max, + ov.Type.i32: torch.iinfo(torch.int32).max, + ov.Type.i64: torch.iinfo(torch.int64).max, + ov.Type.boolean: 1, } DTYPES_MIN = { - ov.Type.f16: np.finfo(np.float16).min, - ov.Type.f32: np.finfo(np.float32).min, - ov.Type.f64: np.finfo(np.float64).min, - ov.Type.u8: np.iinfo(np.uint8).min, - ov.Type.u16: np.iinfo(np.uint16).min, - ov.Type.u32: np.iinfo(np.uint32).min, - ov.Type.u64: np.iinfo(np.uint64).min, - ov.Type.i8: np.iinfo(np.int8).min, - ov.Type.i16: np.iinfo(np.int16).min, - ov.Type.i32: np.iinfo(np.int32).min, - ov.Type.i64: np.iinfo(np.int64).min, + ov.Type.bf16: torch.finfo(torch.bfloat16).min, + ov.Type.f16: torch.finfo(torch.float16).min, + ov.Type.f32: torch.finfo(torch.float32).min, + ov.Type.f64: torch.finfo(torch.float64).min, + ov.Type.u8: torch.iinfo(torch.uint8).min, + ov.Type.u16: torch.iinfo(torch.uint16).min, + ov.Type.u32: torch.iinfo(torch.uint32).min, + ov.Type.u64: torch.iinfo(torch.uint64).min, + ov.Type.i8: torch.iinfo(torch.int8).min, + ov.Type.i16: torch.iinfo(torch.int16).min, + ov.Type.i32: torch.iinfo(torch.int32).min, + ov.Type.i64: torch.iinfo(torch.int64).min, + ov.Type.boolean: 0, } diff --git a/keras/src/backend/openvino/excluded_concrete_tests.txt b/keras/src/backend/openvino/excluded_concrete_tests.txt index 023403ce6d46..7d53416b586b 100644 --- a/keras/src/backend/openvino/excluded_concrete_tests.txt +++ b/keras/src/backend/openvino/excluded_concrete_tests.txt @@ -38,7 +38,6 @@ NumpyDtypeTest::test_meshgrid NumpyDtypeTest::test_min NumpyDtypeTest::test_moveaxis NumpyDtypeTest::test_multiply -NumpyDtypeTest::test_nan NumpyDtypeTest::test_outer_ NumpyDtypeTest::test_power NumpyDtypeTest::test_prod diff --git a/keras/src/backend/openvino/numpy.py b/keras/src/backend/openvino/numpy.py index a31f603af2ba..1b5d4d38b9a2 100644 --- a/keras/src/backend/openvino/numpy.py +++ b/keras/src/backend/openvino/numpy.py @@ -1034,26 +1034,21 @@ def moveaxis(x, source, destination): def nan_to_num(x, nan=0.0, posinf=None, neginf=None): x = get_ov_output(x) dtype = x.get_element_type() - shape_x = ov_opset.shape_of(x).output(0) - nan_vector = ov_opset.broadcast( - ov_opset.constant(nan, dtype), shape_x + nan_val = ov_opset.constant(nan, dtype).output(0) + posinf_val = ov_opset.constant( + posinf if posinf is not None else DTYPES_MAX[dtype], dtype ).output(0) - posinf_val = posinf if posinf is not None else DTYPES_MAX[dtype] - neginf_val = neginf if neginf is not None else DTYPES_MIN[dtype] - posinf_vector = ov_opset.broadcast( - ov_opset.constant(posinf_val, dtype), shape_x + neginf_val = ov_opset.constant( + neginf if neginf is not None else DTYPES_MIN[dtype], dtype ).output(0) - neginf_vector = ov_opset.broadcast( - ov_opset.constant(neginf_val, dtype), shape_x - ).output(0) - nan_mask = ov_opset.is_nan(x).output(0) - x = ov_opset.select(nan_mask, nan_vector, x).output(0) - inf_const = ov_opset.constant(np.inf, dtype) + nan_mask = ov_opset.is_nan(ov_opset.convert(x, Type.f32)).output(0) + x = ov_opset.select(nan_mask, nan_val, x).output(0) + inf_const = ov_opset.constant(DTYPES_MAX[dtype], dtype) posinf_mask = ov_opset.equal(x, inf_const).output(0) - x = ov_opset.select(posinf_mask, posinf_vector, x).output(0) - ninf_const = ov_opset.constant(-np.inf, dtype) + x = ov_opset.select(posinf_mask, posinf_val, x).output(0) + ninf_const = ov_opset.constant(DTYPES_MIN[dtype], dtype) neginf_mask = ov_opset.equal(x, ninf_const).output(0) - x = ov_opset.select(neginf_mask, neginf_vector, x).output(0) + x = ov_opset.select(neginf_mask, neginf_val, x).output(0) return OpenVINOKerasTensor(x) From e8ad5431e9be46d9d1378c315c9074a6eaf71f25 Mon Sep 17 00:00:00 2001 From: 11happy Date: Sat, 19 Apr 2025 18:45:09 +0530 Subject: [PATCH 4/6] use np only torch having import errors Signed-off-by: 11happy --- keras/src/backend/openvino/core.py | 49 +++++++++++++++--------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/keras/src/backend/openvino/core.py b/keras/src/backend/openvino/core.py index a1b5142554da..89a24b281d75 100644 --- a/keras/src/backend/openvino/core.py +++ b/keras/src/backend/openvino/core.py @@ -4,7 +4,6 @@ import numpy as np import openvino as ov import openvino.runtime.opset14 as ov_opset -import torch from openvino import Model from openvino import Tensor from openvino import compile_model @@ -43,34 +42,34 @@ } DTYPES_MAX = { - ov.Type.bf16: torch.finfo(torch.bfloat16).max, - ov.Type.f16: torch.finfo(torch.float16).max, - ov.Type.f32: torch.finfo(torch.float32).max, - ov.Type.f64: torch.finfo(torch.float64).max, - ov.Type.u8: torch.iinfo(torch.uint8).max, - ov.Type.u16: torch.iinfo(torch.uint16).max, - ov.Type.u32: torch.iinfo(torch.uint32).max, - ov.Type.u64: torch.iinfo(torch.uint64).max, - ov.Type.i8: torch.iinfo(torch.int8).max, - ov.Type.i16: torch.iinfo(torch.int16).max, - ov.Type.i32: torch.iinfo(torch.int32).max, - ov.Type.i64: torch.iinfo(torch.int64).max, + ov.Type.bf16: 3.38953139e38, + ov.Type.f16: np.finfo(np.float16).max, + ov.Type.f32: np.finfo(np.float32).max, + ov.Type.f64: np.finfo(np.float64).max, + ov.Type.u8: np.iinfo(np.uint8).max, + ov.Type.u16: np.iinfo(np.uint16).max, + ov.Type.u32: np.iinfo(np.uint32).max, + ov.Type.u64: np.iinfo(np.uint64).max, + ov.Type.i8: np.iinfo(np.int8).max, + ov.Type.i16: np.iinfo(np.int16).max, + ov.Type.i32: np.iinfo(np.int32).max, + ov.Type.i64: np.iinfo(np.int64).max, ov.Type.boolean: 1, } DTYPES_MIN = { - ov.Type.bf16: torch.finfo(torch.bfloat16).min, - ov.Type.f16: torch.finfo(torch.float16).min, - ov.Type.f32: torch.finfo(torch.float32).min, - ov.Type.f64: torch.finfo(torch.float64).min, - ov.Type.u8: torch.iinfo(torch.uint8).min, - ov.Type.u16: torch.iinfo(torch.uint16).min, - ov.Type.u32: torch.iinfo(torch.uint32).min, - ov.Type.u64: torch.iinfo(torch.uint64).min, - ov.Type.i8: torch.iinfo(torch.int8).min, - ov.Type.i16: torch.iinfo(torch.int16).min, - ov.Type.i32: torch.iinfo(torch.int32).min, - ov.Type.i64: torch.iinfo(torch.int64).min, + ov.Type.bf16: -3.38953139e38, + ov.Type.f16: np.finfo(np.float16).min, + ov.Type.f32: np.finfo(np.float32).min, + ov.Type.f64: np.finfo(np.float64).min, + ov.Type.u8: np.iinfo(np.uint8).min, + ov.Type.u16: np.iinfo(np.uint16).min, + ov.Type.u32: np.iinfo(np.uint32).min, + ov.Type.u64: np.iinfo(np.uint64).min, + ov.Type.i8: np.iinfo(np.int8).min, + ov.Type.i16: np.iinfo(np.int16).min, + ov.Type.i32: np.iinfo(np.int32).min, + ov.Type.i64: np.iinfo(np.int64).min, ov.Type.boolean: 0, } From cff09beab477e62ad1a15517d7bba1b4a4a1f73b Mon Sep 17 00:00:00 2001 From: 11happy Date: Sat, 19 Apr 2025 23:59:32 +0530 Subject: [PATCH 5/6] use inf approach Signed-off-by: 11happy --- keras/src/backend/openvino/numpy.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/keras/src/backend/openvino/numpy.py b/keras/src/backend/openvino/numpy.py index 1b5d4d38b9a2..62c8ca72002a 100644 --- a/keras/src/backend/openvino/numpy.py +++ b/keras/src/backend/openvino/numpy.py @@ -1041,13 +1041,17 @@ def nan_to_num(x, nan=0.0, posinf=None, neginf=None): neginf_val = ov_opset.constant( neginf if neginf is not None else DTYPES_MIN[dtype], dtype ).output(0) + posinf_mask = ov_opset.is_inf( + ov_opset.convert(x, Type.f32), + {"detect_positive": True, "detect_negative": False}, + ).output(0) + neginf_mask = ov_opset.is_inf( + ov_opset.convert(x, Type.f32), + {"detect_positive": False, "detect_negative": True}, + ).output(0) nan_mask = ov_opset.is_nan(ov_opset.convert(x, Type.f32)).output(0) x = ov_opset.select(nan_mask, nan_val, x).output(0) - inf_const = ov_opset.constant(DTYPES_MAX[dtype], dtype) - posinf_mask = ov_opset.equal(x, inf_const).output(0) x = ov_opset.select(posinf_mask, posinf_val, x).output(0) - ninf_const = ov_opset.constant(DTYPES_MIN[dtype], dtype) - neginf_mask = ov_opset.equal(x, ninf_const).output(0) x = ov_opset.select(neginf_mask, neginf_val, x).output(0) return OpenVINOKerasTensor(x) From 1ed1b379a5de61f7b805e53a740dd019278c530c Mon Sep 17 00:00:00 2001 From: 11happy Date: Sun, 27 Apr 2025 19:40:18 +0530 Subject: [PATCH 6/6] refactor code Signed-off-by: 11happy --- keras/src/backend/openvino/numpy.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/keras/src/backend/openvino/numpy.py b/keras/src/backend/openvino/numpy.py index 62c8ca72002a..90aa45fb77dc 100644 --- a/keras/src/backend/openvino/numpy.py +++ b/keras/src/backend/openvino/numpy.py @@ -1034,6 +1034,12 @@ def moveaxis(x, source, destination): def nan_to_num(x, nan=0.0, posinf=None, neginf=None): x = get_ov_output(x) dtype = x.get_element_type() + if dtype.is_integral(): + return OpenVINOKerasTensor(x) + isfloat64 = True if dtype == Type.f64 else False + if isfloat64: # conversion to f32 due to https://github.com/openvinotoolkit/openvino/issues/30264 + x = ov_opset.convert(x, Type.f32).output(0) + dtype = Type.f32 nan_val = ov_opset.constant(nan, dtype).output(0) posinf_val = ov_opset.constant( posinf if posinf is not None else DTYPES_MAX[dtype], dtype @@ -1042,17 +1048,19 @@ def nan_to_num(x, nan=0.0, posinf=None, neginf=None): neginf if neginf is not None else DTYPES_MIN[dtype], dtype ).output(0) posinf_mask = ov_opset.is_inf( - ov_opset.convert(x, Type.f32), + x, {"detect_positive": True, "detect_negative": False}, ).output(0) neginf_mask = ov_opset.is_inf( - ov_opset.convert(x, Type.f32), + x, {"detect_positive": False, "detect_negative": True}, ).output(0) - nan_mask = ov_opset.is_nan(ov_opset.convert(x, Type.f32)).output(0) + nan_mask = ov_opset.is_nan(x).output(0) x = ov_opset.select(nan_mask, nan_val, x).output(0) x = ov_opset.select(posinf_mask, posinf_val, x).output(0) x = ov_opset.select(neginf_mask, neginf_val, x).output(0) + if isfloat64: + x = ov_opset.convert(x, Type.f64).output(0) return OpenVINOKerasTensor(x)