Skip to content

Commit b5fa0ea

Browse files
committed
quality check
Signed-off-by: Raphael Glon <[email protected]>
1 parent 3daa1ad commit b5fa0ea

14 files changed

+17
-26
lines changed

src/huggingface_inference_toolkit/heavy_utils.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from typing import Optional, Union
88

99
from huggingface_hub import HfApi, login, snapshot_download
10-
1110
from transformers import WhisperForConditionalGeneration, pipeline
1211
from transformers.file_utils import is_tf_available, is_torch_available
1312
from transformers.pipelines import Pipeline
@@ -17,15 +16,15 @@
1716
is_diffusers_available,
1817
)
1918
from huggingface_inference_toolkit.logging import logger
19+
from huggingface_inference_toolkit.optimum_utils import (
20+
get_optimum_neuron_pipeline,
21+
is_optimum_neuron_available,
22+
)
2023
from huggingface_inference_toolkit.sentence_transformers_utils import (
2124
get_sentence_transformers_pipeline,
2225
is_sentence_transformers_available,
2326
)
2427
from huggingface_inference_toolkit.utils import create_artifact_filter
25-
from huggingface_inference_toolkit.optimum_utils import (
26-
get_optimum_neuron_pipeline,
27-
is_optimum_neuron_available,
28-
)
2928

3029

3130
def load_repository_from_hf(
@@ -185,4 +184,4 @@ def get_pipeline(
185184
hf_pipeline.model.config.forced_decoder_ids = hf_pipeline.tokenizer.get_decoder_prompt_ids(
186185
language="english", task="transcribe"
187186
)
188-
return hf_pipeline # type: ignore
187+
return hf_pipeline # type: ignore

src/huggingface_inference_toolkit/idle.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import signal
66
import time
77

8-
98
LOG = logging.getLogger(__name__)
109

1110
LAST_START = None

src/huggingface_inference_toolkit/utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
from huggingface_inference_toolkit.const import HF_DEFAULT_PIPELINE_NAME, HF_MODULE_NAME
66
from huggingface_inference_toolkit.logging import logger
77

8-
98
_optimum_available = importlib.util.find_spec("optimum") is not None
109

1110

src/huggingface_inference_toolkit/webservice_starlette.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from huggingface_inference_toolkit.serialization.base import ContentType
2828
from huggingface_inference_toolkit.serialization.json_utils import Jsoner
2929
from huggingface_inference_toolkit.utils import convert_params_to_int_or_bool
30+
3031
# _load_repository_from_hf,
3132
# convert_params_to_int_or_bool,
3233
# )

tests/integ/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77
import docker
88
import pytest
99
import tenacity
10+
from huggingface_inference_toolkit.utils import _load_repository_from_hf
1011
from transformers.testing_utils import _run_slow_tests
1112

12-
from huggingface_inference_toolkit.utils import _load_repository_from_hf
1313
from tests.integ.config import task2model
1414

1515
HF_HUB_CACHE = os.environ.get("HF_HUB_CACHE", "/home/ubuntu/.cache/huggingface/hub")

tests/integ/helpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88
import pytest
99
import requests
1010
from docker import DockerClient
11+
from huggingface_inference_toolkit.utils import _load_repository_from_hf
1112
from transformers.testing_utils import _run_slow_tests, require_tf, require_torch
1213

13-
from huggingface_inference_toolkit.utils import _load_repository_from_hf
1414
from tests.integ.config import task2input, task2model, task2output, task2validation
1515

1616
IS_GPU = _run_slow_tests

tests/integ/test_pytorch_local_inf2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import pytest
2+
from huggingface_inference_toolkit.optimum_utils import is_optimum_neuron_available
23
from transformers.testing_utils import require_torch
34

4-
from huggingface_inference_toolkit.optimum_utils import is_optimum_neuron_available
55
from tests.integ.helpers import verify_task
66

77
require_inferentia = pytest.mark.skipif(

tests/unit/test_diffusers.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
11
import logging
22
import tempfile
33

4-
from PIL import Image
5-
from transformers.testing_utils import require_torch, slow
6-
74
from huggingface_inference_toolkit.diffusers_utils import IEAutoPipelineForText2Image
85
from huggingface_inference_toolkit.utils import _load_repository_from_hf, get_pipeline
6+
from PIL import Image
7+
from transformers.testing_utils import require_torch, slow
98

109
logging.basicConfig(level="DEBUG")
1110

tests/unit/test_handler.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@
22
from typing import Dict
33

44
import pytest
5-
from transformers.testing_utils import require_tf, require_torch
6-
75
from huggingface_inference_toolkit.handler import (
86
HuggingFaceHandler,
97
get_inference_handler_either_custom_or_default_handler,
@@ -12,6 +10,7 @@
1210
_is_gpu_available,
1311
_load_repository_from_hf,
1412
)
13+
from transformers.testing_utils import require_tf, require_torch
1514

1615
TASK = "text-classification"
1716
MODEL = "hf-internal-testing/tiny-random-distilbert"

tests/unit/test_optimum_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,13 @@
22
import tempfile
33

44
import pytest
5-
from transformers.testing_utils import require_torch
6-
75
from huggingface_inference_toolkit.optimum_utils import (
86
get_input_shapes,
97
get_optimum_neuron_pipeline,
108
is_optimum_neuron_available,
119
)
1210
from huggingface_inference_toolkit.utils import _load_repository_from_hf
11+
from transformers.testing_utils import require_torch
1312

1413
require_inferentia = pytest.mark.skipif(
1514
not is_optimum_neuron_available(),

0 commit comments

Comments
 (0)