From 640d8a2d2875327ad899406983714565b80697b9 Mon Sep 17 00:00:00 2001 From: Bosco Yung <15840328+bhky@users.noreply.github.com> Date: Fri, 24 Jan 2025 09:02:45 +0900 Subject: [PATCH 1/2] Update setup.cfg --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 13c267b..78f00c4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ name = targetran version = attr: targetran.__version__ author = attr: targetran.__author__ -author_email = xtorch501@gmail.com +author_email = bhky.dev@gmail.com description = Target transformation for data augmentation in objection detection long_description = file: README.md long_description_content_type = text/markdown @@ -20,4 +20,4 @@ install_requires = numpy>=1.22.0 [options.package_data] -targetran = py.typed \ No newline at end of file +targetran = py.typed From d13d07f12d403f783f5f78ba83c8e023a8c3d7d5 Mon Sep 17 00:00:00 2001 From: Bosco Yung <15840328+bhky@users.noreply.github.com> Date: Fri, 24 Jan 2025 18:36:07 +0900 Subject: [PATCH 2/2] Fix mypy issues (#12) * For now use mypy 1.13.0 * Adjustment for mypy, no change in code logic * Add back type ignore * Replace np.float_ by np.float32, as the former has been removed since NumPy 2.0 * Fix union type * Type ignore * Revert "Type ignore" This reverts commit 52d5152a220ca1ab548194e05b3a02c671d5c048. * Try float type * Try np.generic * Type ignore * Type ignore * Type ignore * Apply pylint differently * Skip type ignore * Use floatarray instead of anyarray * Use mypy 1.12.1 * No type ignore * NDFloatArray = NDAnyArray --- .github/workflows/ci.yml | 2 +- README.md | 3 +-- examples/kaggle/run_tf_dataset_kaggle_example.py | 2 +- examples/local/run_pt_dataset_local_example.py | 2 +- examples/local/run_tf_dataset_local_example.py | 2 +- targetran/_np_functional.py | 4 +++- targetran/_typing.py | 2 +- targetran/np/_np.py | 5 ++--- tests/run_pt_dataset_test.py | 2 +- tests/run_tf_dataset_test.py | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 872a115..82463db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: - name: Install tools run: | python3 -m pip install pylint - python3 -m pip install mypy + python3 -m pip install mypy==1.12.1 - name: Test run: | ./tests/install_from_local_and_test.sh \ No newline at end of file diff --git a/README.md b/README.md index f41fc98..1ca8d8d 100644 --- a/README.md +++ b/README.md @@ -70,8 +70,7 @@ python3 -m pip install . ## Notations -- `NDFloatArray`: NumPy float array type, which is an alias to `np.typing.NDArray[np.float_]`. - The values are converted to `np.float32` internally. +- `NDFloatArray`: NumPy float array type. The values are converted to `np.float32` internally. - `tf.Tensor`: General TensorFlow Tensor type. The values are converted to `tf.float32` internally. ## Data format diff --git a/examples/kaggle/run_tf_dataset_kaggle_example.py b/examples/kaggle/run_tf_dataset_kaggle_example.py index 18423e8..73ff7f4 100644 --- a/examples/kaggle/run_tf_dataset_kaggle_example.py +++ b/examples/kaggle/run_tf_dataset_kaggle_example.py @@ -19,7 +19,7 @@ import targetran.tf as tt import tensorflow as tf -NDAnyArray = np.typing.NDArray[np.float_] +NDAnyArray = np.typing.NDArray[np.float32] # This will be the data path when you use "Add Data" on the right panel # of a Kaggle Notebook. diff --git a/examples/local/run_pt_dataset_local_example.py b/examples/local/run_pt_dataset_local_example.py index 1f507de..6b88f2c 100644 --- a/examples/local/run_pt_dataset_local_example.py +++ b/examples/local/run_pt_dataset_local_example.py @@ -24,7 +24,7 @@ ) from targetran.utils import Compose, collate_fn -NDAnyArray = np.typing.NDArray[np.float_] +NDAnyArray = np.typing.NDArray[np.float32] def load_images() -> Dict[str, NDAnyArray]: diff --git a/examples/local/run_tf_dataset_local_example.py b/examples/local/run_tf_dataset_local_example.py index 05cb2f1..adfdb7c 100644 --- a/examples/local/run_tf_dataset_local_example.py +++ b/examples/local/run_tf_dataset_local_example.py @@ -24,7 +24,7 @@ TFResize, ) -NDAnyArray = np.typing.NDArray[np.float_] +NDAnyArray = np.typing.NDArray[np.float32] def load_images() -> Dict[str, NDAnyArray]: diff --git a/targetran/_np_functional.py b/targetran/_np_functional.py index 8284eb0..98da2b4 100644 --- a/targetran/_np_functional.py +++ b/targetran/_np_functional.py @@ -68,11 +68,13 @@ def _np_resize_image( """ dest_size: (image_height, image_width) """ - resized_image: NDAnyArray = cv2.resize( # pylint: disable=no-member + # pylint: disable=no-member + resized_image: NDFloatArray = cv2.resize( image, dsize=(dest_size[1], dest_size[0]), interpolation=_INTERPOLATION_DICT[interpolation] ) + # pylint: enable=no-member return resized_image diff --git a/targetran/_typing.py b/targetran/_typing.py index 5d6c2d9..170bce1 100644 --- a/targetran/_typing.py +++ b/targetran/_typing.py @@ -9,7 +9,7 @@ ArrayLike = np.typing.ArrayLike NDAnyArray = np.typing.NDArray[Any] NDBoolArray = np.typing.NDArray[np.bool_] -NDFloatArray = np.typing.NDArray[np.float_] +NDFloatArray = NDAnyArray # Not nice, but since NumPy 2.0 there's no better way yet. NDIntArray = np.typing.NDArray[np.int_] # T is treated semantically as "NDAnyArray or tf.Tensor" in this library. diff --git a/targetran/np/_np.py b/targetran/np/_np.py index da79927..77baf3b 100644 --- a/targetran/np/_np.py +++ b/targetran/np/_np.py @@ -294,9 +294,8 @@ def _get_mats( else: self._rng.shuffle(indices) - indices = indices.tolist() - image_dest_tran_mats = np.take(image_dest_tran_mats, indices, 0) - bboxes_tran_mats = np.take(bboxes_tran_mats, indices, 0) + image_dest_tran_mats = np.take(image_dest_tran_mats, indices.tolist(), 0) + bboxes_tran_mats = np.take(bboxes_tran_mats, indices.tolist(), 0) image_dest_tran_mat = np.linalg.multi_dot(image_dest_tran_mats) # Note the reversed order for the bboxes tran matrices. diff --git a/tests/run_pt_dataset_test.py b/tests/run_pt_dataset_test.py index ad2aca4..321528a 100644 --- a/tests/run_pt_dataset_test.py +++ b/tests/run_pt_dataset_test.py @@ -11,7 +11,7 @@ import targetran.np from targetran.utils import Compose, collate_fn -NDAnyArray = np.typing.NDArray[np.float_] +NDAnyArray = np.typing.NDArray[np.float32] def make_np_data() -> Tuple[Sequence[NDAnyArray], diff --git a/tests/run_tf_dataset_test.py b/tests/run_tf_dataset_test.py index c55bef5..f4af974 100644 --- a/tests/run_tf_dataset_test.py +++ b/tests/run_tf_dataset_test.py @@ -9,7 +9,7 @@ import targetran.tf -NDAnyArray = np.typing.NDArray[np.float_] +NDAnyArray = np.typing.NDArray[np.float32] def make_np_data() -> Tuple[Sequence[NDAnyArray],