diff --git a/docker/Dockerfile b/docker/Dockerfile index e449aaf61..37d63746e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -23,7 +23,7 @@ RUN git clone $VLLM_REPO /workspace/vllm RUN if [ -n "$VLLM_COMMIT_HASH" ]; then \ git checkout $VLLM_COMMIT_HASH; \ fi -RUN --mount=type=cache,target=/root/.cache/pip pip install -r requirements/tpu.txt --retries 3 +RUN --mount=type=cache,target=/root/.cache/pip pip install --extra-index-url https://download.pytorch.org/whl/cpu -r requirements/tpu.txt --retries 3 RUN --mount=type=cache,target=/root/.cache/pip VLLM_TARGET_DEVICE="tpu" pip install -e . # Install test dependencies @@ -39,7 +39,7 @@ RUN --mount=type=cache,target=/root/.cache/pip python3 -m pip install \ WORKDIR /workspace/tpu_inference # Install requirements first and cache so we don't need to re-install on code change. COPY requirements.txt . -RUN --mount=type=cache,target=/root/.cache/pip pip install -r requirements.txt --retries 3 +RUN --mount=type=cache,target=/root/.cache/pip pip install --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt --retries 3 COPY requirements_benchmarking.txt . # These are needed for the E2E benchmarking tests (i.e. tests/e2e/benchmarking/mlperf.sh) RUN --mount=type=cache,target=/root/.cache/pip pip install -r requirements_benchmarking.txt --retries 3