Skip to content

Commit 4e3877f

Browse files
authored
Add google extra with google-cloud-storage (#77)
* Fix formatting in `setup.py` * Add missing trove classifiers * Add missing description * Add missing `google-cloud-storage` as `google` extra * Update `Dockerfile` indent size and `google-cloud-storage` installation - Align indent size with `apt-get install` even if those are just args - Align flag usage notation from `-U` to `--upgrade` i.e. using full name instead of shortened one for readability - Install `google-cloud-storage` from `google` extra instead (`google` extra was included recently as otherwise the installations from the wheel not relying on this Dockerfile would fail with an ImportError on `google-cloud-storage` when either deploying from Vertex AI or setting the `AIP_STORAGE_URI` env var) * Add newline at the end of `setup.cfg` * Update `.github/workflows/unit-test.yaml` to install `google` extras too Ideally the `pip install` command shouldn't be required as it's installed already within the `integration-test-pytorch:gpu` image, but if adding it, to ensure consistency we should also include the recently included `google` extra that comes with `google-cloud-storage`; otherwise the tests within #76 will fail
1 parent 58b760f commit 4e3877f

File tree

4 files changed

+38
-34
lines changed

4 files changed

+38
-34
lines changed

.github/workflows/unit-test.yaml

+1-2
Original file line numberDiff line numberDiff line change
@@ -44,5 +44,4 @@ jobs:
4444
-v ./tests:${{ env.CACHE_TEST_DIR }} \
4545
--entrypoint /bin/bash \
4646
integration-test-pytorch:gpu \
47-
-c "pip install '.[test, st, diffusers]' && pytest ${{ env.CACHE_TEST_DIR }}/unit"
48-
47+
-c "pip install '.[test,st,diffusers,google]' && pytest ${{ env.CACHE_TEST_DIR }}/unit"

dockerfiles/pytorch/Dockerfile

+21-21
Original file line numberDiff line numberDiff line change
@@ -14,29 +14,30 @@ RUN apt-get update && \
1414
add-apt-repository ppa:deadsnakes/ppa && \
1515
apt-get -y upgrade --only-upgrade systemd openssl cryptsetup && \
1616
apt-get install -y \
17-
build-essential \
18-
bzip2 \
19-
curl \
20-
git \
21-
git-lfs \
22-
tar \
23-
gcc \
24-
g++ \
25-
cmake \
26-
libprotobuf-dev \
27-
protobuf-compiler \
28-
python3-dev \
29-
python3-pip \
30-
python3.11 \
31-
libsndfile1-dev \
32-
ffmpeg \
17+
build-essential \
18+
bzip2 \
19+
curl \
20+
git \
21+
git-lfs \
22+
tar \
23+
gcc \
24+
g++ \
25+
cmake \
26+
libprotobuf-dev \
27+
protobuf-compiler \
28+
python3-dev \
29+
python3-pip \
30+
python3.11 \
31+
libsndfile1-dev \
32+
ffmpeg \
3333
&& apt-get clean autoremove --yes \
3434
&& rm -rf /var/lib/{apt,dpkg,cache,log}
35+
3536
# Copying only necessary files as filtered by .dockerignore
3637
COPY . .
3738

3839
# install wheel and setuptools
39-
RUN pip install --no-cache-dir -U pip ".[torch, st, diffusers]"
40+
RUN pip install --no-cache-dir --upgrade pip ".[torch,st,diffusers]"
4041

4142
# copy application
4243
COPY src/huggingface_inference_toolkit huggingface_inference_toolkit
@@ -47,8 +48,7 @@ COPY --chmod=0755 scripts/entrypoint.sh entrypoint.sh
4748

4849
ENTRYPOINT ["bash", "-c", "./entrypoint.sh"]
4950

51+
FROM base AS vertex
5052

51-
from base as vertex
52-
53-
# Install Vertex AI requiremented packages
54-
RUN pip install --no-cache-dir google-cloud-storage
53+
# Install `google` extra for Vertex AI compatibility
54+
RUN pip install --no-cache-dir --upgrade ".[google]"

setup.cfg

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ use_parentheses = True
1919

2020
[flake8]
2121
ignore = E203, E501, E741, W503, W605
22-
max-line-length = 119
22+
max-line-length = 119

setup.py

+15-10
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from __future__ import absolute_import
2+
23
from setuptools import find_packages, setup
34

45
# We don't declare our dependency on transformers here because we build with
@@ -12,7 +13,7 @@
1213
# libavcodec-extra : libavcodec-extra inculdes additional codecs for ffmpeg
1314

1415
install_requires = [
15-
"transformers[sklearn,sentencepiece, audio,vision]==4.41.1",
16+
"transformers[sklearn,sentencepiece,audio,vision]==4.41.1",
1617
"orjson",
1718
# vision
1819
"Pillow",
@@ -25,7 +26,7 @@
2526
"starlette",
2627
"uvicorn",
2728
"pandas",
28-
"peft==0.11.1"
29+
"peft==0.11.1",
2930
]
3031

3132
extras = {}
@@ -43,26 +44,26 @@
4344
"mock==2.0.0",
4445
"docker",
4546
"requests",
46-
"tenacity"
47-
]
48-
extras["quality"] = [
49-
"isort",
50-
"ruff"
47+
"tenacity",
5148
]
49+
extras["quality"] = ["isort", "ruff"]
5250
extras["inf2"] = ["optimum-neuron"]
51+
extras["google"] = ["google-cloud-storage"]
5352

5453
setup(
5554
name="huggingface-inference-toolkit",
5655
version=VERSION,
5756
author="HuggingFace",
58-
description=".",
57+
description="Hugging Face Inference Toolkit is for serving 🤗 Transformers models in containers.",
5958
url="",
6059
package_dir={"": "src"},
6160
packages=find_packages(where="src"),
6261
install_requires=install_requires,
6362
extras_require=extras,
64-
entry_points={"console_scripts": "serve=sagemaker_huggingface_inference_toolkit.serving:main"},
65-
python_requires=">=3.8.0",
63+
entry_points={
64+
"console_scripts": "serve=sagemaker_huggingface_inference_toolkit.serving:main"
65+
},
66+
python_requires=">=3.8",
6667
license="Apache License 2.0",
6768
classifiers=[
6869
"Development Status :: 5 - Production/Stable",
@@ -72,7 +73,11 @@
7273
"License :: OSI Approved :: Apache Software License",
7374
"Operating System :: OS Independent",
7475
"Programming Language :: Python :: 3",
76+
"Programming Language :: Python :: 3.8",
77+
"Programming Language :: Python :: 3.9",
78+
"Programming Language :: Python :: 3.10",
7579
"Programming Language :: Python :: 3.11",
80+
"Programming Language :: Python :: 3.12",
7681
"Topic :: Scientific/Engineering :: Artificial Intelligence",
7782
],
7883
)

0 commit comments

Comments
 (0)