Skip to content

Commit 0ed51c1

Browse files
pass
1 parent c80d1aa commit 0ed51c1

File tree

5 files changed

+79
-51
lines changed

5 files changed

+79
-51
lines changed

requirements-test.txt

+5-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
tox
2-
pytest
31
pytest-xdist
42
parameterized
53
psutil
@@ -8,4 +6,8 @@ pytest-sugar
86
mock==2.0.0
97
docker
108
requests
11-
tenacity
9+
tenacity
10+
termcolor
11+
execnet
12+
pluggy
13+
py

setup.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
# We don't declare our dependency on transformers here because we build with
66
# different packages for different variants
77

8-
VERSION = "0.2.0"
8+
VERSION = "0.3.0"
99

1010

1111
# Ubuntu packages
@@ -15,14 +15,10 @@
1515

1616
install_requires = [
1717
# transformers
18-
"transformers[sklearn,sentencepiece]==4.27.0",
18+
"transformers[sklearn,sentencepiece]==4.37.2",
1919
"huggingface_hub>=0.20.3",
20-
# api stuff
2120
"orjson",
22-
# "robyn",
23-
# vision
2421
"Pillow",
25-
# speech + torchaudio
2622
"librosa",
2723
"pyctcdecode>=0.3.0",
2824
"phonemizer",
@@ -34,9 +30,9 @@
3430
extras["st"] = ["sentence_transformers==2.2.1"]
3531
extras["diffusers"] = ["diffusers==0.26.3", "accelerate==0.27.2"]
3632
extras["torch"] = ["torch==2.2.0", "torchaudio"]
37-
extras["tensorflow"] = ["tensorflow==2.9.3"]
33+
extras["tensorflow"] = ["tensorflow"]
3834
extras["test"] = [
39-
"pytest",
35+
"pytest==7.2.1",
4036
"pytest-xdist",
4137
"parameterized",
4238
"psutil",

src/huggingface_inference_toolkit/utils.py

+10-14
Original file line numberDiff line numberDiff line change
@@ -74,19 +74,14 @@ def wrap_conversation_pipeline(pipeline):
7474
"""
7575

7676
def wrapped_pipeline(inputs, *args, **kwargs):
77-
converted_input = Conversation(
78-
inputs["text"],
79-
past_user_inputs=inputs.get("past_user_inputs", []),
80-
generated_responses=inputs.get("generated_responses", []),
81-
)
77+
logging.info(f"Inputs: {inputs}")
78+
logging.info(f"Args: {args}")
79+
logging.info(f"KWArgs: {kwargs}")
80+
converted_input = Conversation(messages = inputs)
8281
prediction = pipeline(converted_input, *args, **kwargs)
83-
return {
84-
"generated_text": prediction.generated_responses[-1],
85-
"conversation": {
86-
"past_user_inputs": prediction.past_user_inputs,
87-
"generated_responses": prediction.generated_responses,
88-
},
89-
}
82+
logging.info(f"Prediction: {prediction}")
83+
return prediction
84+
9085

9186
return wrapped_pipeline
9287

@@ -295,11 +290,12 @@ def get_pipeline(
295290
):
296291
# set chunk length to 30s for whisper to enable long audio files
297292
hf_pipeline._preprocess_params["chunk_length_s"] = 30
298-
hf_pipeline._preprocess_params["ignore_warning"] = True
293+
#hf_pipeline._preprocess_params["ignore_warning"] = True
299294
# set decoder to english by default
300295
# TODO: replace when transformers 4.26.0 is release with
301296
hf_pipeline.model.config.forced_decoder_ids = hf_pipeline.tokenizer.get_decoder_prompt_ids(
302-
language="english", task="transcribe"
297+
language="english",
298+
task="transcribe"
303299
)
304300
""""
305301
hf_pipeline.tokenizer.language = "english"

tests/unit/test_utils.py

+35-15
Original file line numberDiff line numberDiff line change
@@ -160,29 +160,49 @@ def test_wrap_conversation_pipeline():
160160
framework="pt",
161161
)
162162
conv_pipe = wrap_conversation_pipeline(init_pipeline)
163-
data = {
164-
"past_user_inputs": ["Which movie is the best ?"],
165-
"generated_responses": ["It's Die Hard for sure."],
166-
"text": "Can you explain why?",
167-
}
163+
data = [
164+
{
165+
"role": "user",
166+
"content": "Which movie is the best ?"
167+
},
168+
{
169+
"role": "assistant",
170+
"content": "It's Die Hard for sure."
171+
},
172+
{
173+
"role": "user",
174+
"content": "Can you explain why?"
175+
}
176+
]
168177
res = conv_pipe(data)
169-
assert "conversation" in res
170-
assert "generated_text" in res
178+
assert "content" in res.messages[-1]
171179

172180

173181
@require_torch
174182
def test_wrapped_pipeline():
175183
with tempfile.TemporaryDirectory() as tmpdirname:
176-
storage_dir = _load_repository_from_hf("hf-internal-testing/tiny-random-blenderbot", tmpdirname, framework="pytorch")
184+
storage_dir = _load_repository_from_hf(
185+
repository_id = "microsoft/DialoGPT-small",
186+
target_dir = tmpdirname,
187+
framework="pytorch"
188+
)
177189
conv_pipe = get_pipeline("conversational", storage_dir.as_posix())
178-
data = {
179-
"past_user_inputs": ["Which movie is the best ?"],
180-
"generated_responses": ["It's Die Hard for sure."],
181-
"text": "Can you explain why?",
182-
}
190+
data = [
191+
{
192+
"role": "user",
193+
"content": "Which movie is the best ?"
194+
},
195+
{
196+
"role": "assistant",
197+
"content": "It's Die Hard for sure."
198+
},
199+
{
200+
"role": "user",
201+
"content": "Can you explain why?"
202+
}
203+
]
183204
res = conv_pipe(data)
184-
assert "conversation" in res
185-
assert "generated_text" in res
205+
assert "content" in res.messages[-1]
186206

187207

188208
def test_local_custom_pipeline():

tox.ini

+25-11
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,38 @@
11
[tox]
2-
envlist = py39
2+
envlist = 311
33
skipsdist = true
4+
allowlist_externals =
5+
pytest
46

57
[testenv]
6-
deps = -r requirements.txt
7-
install_command =
8-
pip install -U pip
9-
pip install -e .
10-
setenv =
11-
PYTHONPATH=.
8+
deps =
9+
uv
10+
pytest
11+
allowlist_externals =
12+
pytest
13+
uv
14+
commands_pre =
15+
uv pip install -e ".[test]"
16+
commands = pytest --version
17+
setenv =
18+
PYTHONPATH = .
1219

1320
[testenv:lint]
14-
basepython = python
21+
basepython = python
1522
commands = ruff src
1623

1724
[testenv:fix]
1825
basepython = python
1926
commands = ruff src --fix
2027

2128
[testenv:unit-torch]
22-
install_command =
23-
pip install -e ".[test,torch,st]"
29+
install_command =
30+
uv pip install -e ".[torch,st]"
2431
allowlist_externals =
2532
pytest
33+
uv
34+
source
35+
rm
2636
commands =
2737
pytest -s -v \
2838
{tty:--color=yes} \
@@ -32,8 +42,12 @@ commands =
3242
tests/unit/test_serializer.py \
3343
tests/unit/test_utils.py \
3444
{posargs} \
35-
--log-cli-level=DEBUG \
45+
--log-cli-level=INFO \
3646
--log-format='%(asctime)s %(levelname)s %(module)s:%(lineno)d %(message)s'
47+
setenv =
48+
PYTHONPATH=.
49+
TORCH_USE_CUDA_DSA=true
50+
3751

3852
[testenv:unit-torch-slow]
3953
install_command = pip install -e ".[torch, st, diffusers]"

0 commit comments

Comments
 (0)