@@ -34,7 +34,6 @@ def make_sure_other_containers_are_stopped(client: DockerClient, container_name:
34
34
# reraise = True
35
35
# )
36
36
def wait_for_container_to_be_ready (base_url , time_between_retries = 3 , max_retries = 30 ):
37
-
38
37
retries = 0
39
38
error = None
40
39
@@ -46,9 +45,7 @@ def wait_for_container_to_be_ready(base_url, time_between_retries=3, max_retries
46
45
logging .info ("Container ready!" )
47
46
return True
48
47
else :
49
- raise ConnectionError (
50
- f"Couldn'start container, Error: { response .status_code } "
51
- )
48
+ raise ConnectionError (f"Couldn'start container, Error: { response .status_code } " )
52
49
except Exception as exception :
53
50
error = exception
54
51
logging .warning (f"Container at { base_url } not ready, trying again..." )
@@ -62,7 +59,6 @@ def verify_task(
62
59
# container: DockerClient,
63
60
task : str ,
64
61
port : int = 5000 ,
65
- framework : str = "pytorch" ,
66
62
):
67
63
BASE_URL = f"http://localhost:{ port } "
68
64
logging .info (f"Base URL: { BASE_URL } " )
@@ -90,10 +86,7 @@ def verify_task(
90
86
headers = {"content-type" : "audio/x-audio" },
91
87
).json ()
92
88
elif task == "text-to-image" :
93
- prediction = requests .post (
94
- f"{ BASE_URL } " , json = input , headers = {"accept" : "image/png" }
95
- ).content
96
-
89
+ prediction = requests .post (f"{ BASE_URL } " , json = input , headers = {"accept" : "image/png" }).content
97
90
else :
98
91
prediction = requests .post (f"{ BASE_URL } " , json = input ).json ()
99
92
@@ -119,6 +112,8 @@ def verify_task(
119
112
@pytest .mark .parametrize (
120
113
"task" ,
121
114
[
115
+ # transformers
116
+ # TODO: "visual-question-answering" and "zero-shot-image-classification" not supported yet due to multimodality input
122
117
"text-classification" ,
123
118
"zero-shot-classification" ,
124
119
"token-classification" ,
@@ -136,25 +131,22 @@ def verify_task(
136
131
"image-segmentation" ,
137
132
"table-question-answering" ,
138
133
"conversational" ,
139
- # TODO currently not supported due to multimodality input
140
- # "visual-question-answering",
141
- # "zero-shot-image-classification",
134
+ "image-text-to-text" ,
135
+ # sentence-transformers
142
136
"sentence-similarity" ,
143
137
"sentence-embeddings" ,
144
138
"sentence-ranking" ,
145
139
# diffusers
146
140
"text-to-image" ,
147
141
],
148
142
)
149
- def test_pt_container_remote_model (task ) -> None :
143
+ def test_pt_container_remote_model (task : str ) -> None :
150
144
container_name = f"integration-test-{ task } "
151
145
container_image = f"starlette-transformers:{ DEVICE } "
152
146
framework = "pytorch"
153
147
model = task2model [task ][framework ]
154
148
port = random .randint (5000 , 6000 )
155
- device_request = (
156
- [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
157
- )
149
+ device_request = [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
158
150
159
151
make_sure_other_containers_are_stopped (client , container_name )
160
152
container = client .containers .run (
@@ -177,6 +169,8 @@ def test_pt_container_remote_model(task) -> None:
177
169
@pytest .mark .parametrize (
178
170
"task" ,
179
171
[
172
+ # transformers
173
+ # TODO: "visual-question-answering" and "zero-shot-image-classification" not supported yet due to multimodality input
180
174
"text-classification" ,
181
175
"zero-shot-classification" ,
182
176
"token-classification" ,
@@ -194,29 +188,26 @@ def test_pt_container_remote_model(task) -> None:
194
188
"image-segmentation" ,
195
189
"table-question-answering" ,
196
190
"conversational" ,
197
- # TODO currently not supported due to multimodality input
198
- # "visual-question-answering",
199
- # "zero-shot-image-classification",
191
+ "image-text-to-text" ,
192
+ # sentence-transformers
200
193
"sentence-similarity" ,
201
194
"sentence-embeddings" ,
202
195
"sentence-ranking" ,
203
196
# diffusers
204
197
"text-to-image" ,
205
198
],
206
199
)
207
- def test_pt_container_local_model (task ) -> None :
200
+ def test_pt_container_local_model (task : str ) -> None :
208
201
container_name = f"integration-test-{ task } "
209
202
container_image = f"starlette-transformers:{ DEVICE } "
210
203
framework = "pytorch"
211
204
model = task2model [task ][framework ]
212
205
port = random .randint (5000 , 6000 )
213
- device_request = (
214
- [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
215
- )
206
+ device_request = [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
216
207
make_sure_other_containers_are_stopped (client , container_name )
217
208
with tempfile .TemporaryDirectory () as tmpdirname :
218
209
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
219
- _storage_dir = _load_repository_from_hf (model , tmpdirname , framework = "pytorch" )
210
+ _load_repository_from_hf (model , tmpdirname , framework = "pytorch" )
220
211
container = client .containers .run (
221
212
container_image ,
222
213
name = container_name ,
@@ -241,9 +232,7 @@ def test_pt_container_local_model(task) -> None:
241
232
def test_pt_container_custom_handler (repository_id ) -> None :
242
233
container_name = "integration-test-custom"
243
234
container_image = f"starlette-transformers:{ DEVICE } "
244
- device_request = (
245
- [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
246
- )
235
+ device_request = [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
247
236
port = random .randint (5000 , 6000 )
248
237
249
238
make_sure_other_containers_are_stopped (client , container_name )
@@ -277,12 +266,10 @@ def test_pt_container_custom_handler(repository_id) -> None:
277
266
"repository_id" ,
278
267
["philschmid/custom-pipeline-text-classification" ],
279
268
)
280
- def test_pt_container_legacy_custom_pipeline (repository_id ) -> None :
269
+ def test_pt_container_legacy_custom_pipeline (repository_id : str ) -> None :
281
270
container_name = "integration-test-custom"
282
271
container_image = f"starlette-transformers:{ DEVICE } "
283
- device_request = (
284
- [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
285
- )
272
+ device_request = [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
286
273
port = random .randint (5000 , 6000 )
287
274
288
275
make_sure_other_containers_are_stopped (client , container_name )
@@ -345,9 +332,7 @@ def test_tf_container_remote_model(task) -> None:
345
332
container_image = f"starlette-transformers:{ DEVICE } "
346
333
framework = "tensorflow"
347
334
model = task2model [task ][framework ]
348
- device_request = (
349
- [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
350
- )
335
+ device_request = [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
351
336
if model is None :
352
337
pytest .skip ("no supported TF model" )
353
338
port = random .randint (5000 , 6000 )
@@ -401,9 +386,7 @@ def test_tf_container_local_model(task) -> None:
401
386
container_image = f"starlette-transformers:{ DEVICE } "
402
387
framework = "tensorflow"
403
388
model = task2model [task ][framework ]
404
- device_request = (
405
- [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
406
- )
389
+ device_request = [docker .types .DeviceRequest (count = - 1 , capabilities = [["gpu" ]])] if IS_GPU else []
407
390
if model is None :
408
391
pytest .skip ("no supported TF model" )
409
392
port = random .randint (5000 , 6000 )
0 commit comments