diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e23d9e5fb..a0e64b9d0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ concurrency: jobs: cmake-build: name: Build FlexFlow Serve - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 defaults: run: shell: bash -l {0} # required to use an activated conda environment diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml index 3af6ba664..3b2343079 100644 --- a/.github/workflows/clang-format-check.yml +++ b/.github/workflows/clang-format-check.yml @@ -3,7 +3,7 @@ on: [push, pull_request, workflow_dispatch] jobs: formatting-check: name: Formatting Check - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: path: diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 757689d22..ab50784b3 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -14,7 +14,7 @@ concurrency: jobs: docker-build-rocm: name: Build and Install FlexFlow in a Docker Container (ROCm backend) - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 if: ${{ ( github.event_name != 'push' && github.event_name != 'schedule' && github.event_name != 'workflow_dispatch' ) || github.ref_name != 'inference' }} env: FF_GPU_BACKEND: "hip_rocm" @@ -69,7 +69,7 @@ jobs: docker-build-cuda: name: Build and Install FlexFlow in a Docker Container (CUDA backend) - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: matrix: cuda_version: ["11.8", "12.0", "12.1", "12.2"] @@ -119,7 +119,7 @@ jobs: notify-slack: name: Notify Slack in case of failure - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: [docker-build-cuda, docker-build-and-publish-rocm] if: ${{ failure() && github.repository_owner == 'flexflow' && ( github.event_name == 'push' || github.event_name == 'workflow_dispatch' ) && github.ref_name == 'inference' }} steps: diff --git a/.github/workflows/helpers/install_cudnn.sh b/.github/workflows/helpers/install_cudnn.sh index 73b8e8841..1e8038573 100755 --- a/.github/workflows/helpers/install_cudnn.sh +++ b/.github/workflows/helpers/install_cudnn.sh @@ -8,72 +8,11 @@ cd "${BASH_SOURCE[0]%/*}" ubuntu_version=$(lsb_release -rs) ubuntu_version=${ubuntu_version//./} -# Install CUDNN -cuda_version=${1:-12.1.1} -cuda_version=$(echo "${cuda_version}" | cut -f1,2 -d'.') -echo "Installing CUDNN for CUDA version: ${cuda_version} ..." -CUDNN_LINK=http://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-11.1-linux-x64-v8.0.5.39.tgz -CUDNN_TARBALL_NAME=cudnn-11.1-linux-x64-v8.0.5.39.tgz -if [[ "$cuda_version" == "10.1" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-10.1-linux-x64-v8.0.5.39.tgz - CUDNN_TARBALL_NAME=cudnn-10.1-linux-x64-v8.0.5.39.tgz -elif [[ "$cuda_version" == "10.2" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-10.2-linux-x64-v8.0.5.39.tgz - CUDNN_TARBALL_NAME=cudnn-10.2-linux-x64-v8.0.5.39.tgz -elif [[ "$cuda_version" == "11.0" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-11.0-linux-x64-v8.0.5.39.tgz - CUDNN_TARBALL_NAME=cudnn-11.0-linux-x64-v8.0.5.39.tgz -elif [[ "$cuda_version" == "11.1" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.5/cudnn-11.1-linux-x64-v8.0.5.39.tgz - CUDNN_TARBALL_NAME=cudnn-11.1-linux-x64-v8.0.5.39.tgz -elif [[ "$cuda_version" == "11.2" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.1.1/cudnn-11.2-linux-x64-v8.1.1.33.tgz - CUDNN_TARBALL_NAME=cudnn-11.2-linux-x64-v8.1.1.33.tgz -elif [[ "$cuda_version" == "11.3" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.2.1/cudnn-11.3-linux-x64-v8.2.1.32.tgz - CUDNN_TARBALL_NAME=cudnn-11.3-linux-x64-v8.2.1.32.tgz -elif [[ "$cuda_version" == "11.4" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.2.4/cudnn-11.4-linux-x64-v8.2.4.15.tgz - CUDNN_TARBALL_NAME=cudnn-11.4-linux-x64-v8.2.4.15.tgz -elif [[ "$cuda_version" == "11.5" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.0/cudnn-11.5-linux-x64-v8.3.0.98.tgz - CUDNN_TARBALL_NAME=cudnn-11.5-linux-x64-v8.3.0.98.tgz -elif [[ "$cuda_version" == "11.6" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.4.0/local_installers/11.6/cudnn-linux-x86_64-8.4.0.27_cuda11.6-archive.tar.xz - CUDNN_TARBALL_NAME=cudnn-linux-x86_64-8.4.0.27_cuda11.6-archive.tar.xz -elif [[ "$cuda_version" == "11.7" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.5.0/local_installers/11.7/cudnn-linux-x86_64-8.5.0.96_cuda11-archive.tar.xz - CUDNN_TARBALL_NAME=cudnn-linux-x86_64-8.5.0.96_cuda11-archive.tar.xz -elif [[ "$cuda_version" == "11.8" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/cudnn-linux-x86_64-8.7.0.84_cuda11-archive.tar.xz - CUDNN_TARBALL_NAME=cudnn-linux-x86_64-8.7.0.84_cuda11-archive.tar.xz -elif [[ "$cuda_version" == "12.0" || "$cuda_version" == "12.1" || "$cuda_version" == "12.2" || "$cuda_version" == "12.3" || "$cuda_version" == "12.4" || "$cuda_version" == "12.5" ]]; then - CUDNN_LINK=https://developer.download.nvidia.com/compute/redist/cudnn/v8.8.0/local_installers/12.0/cudnn-local-repo-ubuntu2004-8.8.0.121_1.0-1_amd64.deb - CUDNN_TARBALL_NAME=cudnn-local-repo-ubuntu2004-8.8.0.121_1.0-1_amd64.deb -else - echo "CUDNN support for CUDA version above 12.5 not yet added" - exit 1 -fi -wget -c -q $CUDNN_LINK -if [[ "$cuda_version" == "11.6" || "$cuda_version" == "11.7" || "$cuda_version" == "11.8" ]]; then - tar -xf $CUDNN_TARBALL_NAME -C ./ - CUDNN_EXTRACTED_TARBALL_NAME="${CUDNN_TARBALL_NAME::-7}" - sudo cp -r "$CUDNN_EXTRACTED_TARBALL_NAME"/include/* /usr/local/include - sudo cp -r "$CUDNN_EXTRACTED_TARBALL_NAME"/lib/* /usr/local/lib - rm -rf "$CUDNN_EXTRACTED_TARBALL_NAME" -elif [[ "$CUDNN_TARBALL_NAME" == *.deb ]]; then - wget -c -q "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${ubuntu_version}/x86_64/cuda-keyring_1.1-1_all.deb" - sudo dpkg -i cuda-keyring_1.1-1_all.deb - sudo apt update -y - rm -f cuda-keyring_1.1-1_all.deb - sudo dpkg -i $CUDNN_TARBALL_NAME - sudo cp /var/cudnn-local-repo-ubuntu2004-8.8.0.121/cudnn-local-A9E17745-keyring.gpg /usr/share/keyrings/ - sudo apt update -y - sudo apt install -y libcudnn8 - sudo apt install -y libcudnn8-dev - sudo apt install -y libcudnn8-samples -else - sudo tar -xzf $CUDNN_TARBALL_NAME -C /usr/local -fi -rm $CUDNN_TARBALL_NAME +wget -c -q "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${ubuntu_version}/x86_64/cuda-keyring_1.1-1_all.deb" +sudo dpkg -i cuda-keyring_1.1-1_all.deb +sudo apt update -y +rm -f cuda-keyring_1.1-1_all.deb +sudo apt-get -y install libcudnn9-cuda-12 +sudo apt-get -y install libcudnn9-dev-cuda-12 +sudo apt-get -y install libcudnn9-samples sudo ldconfig diff --git a/.github/workflows/helpers/install_dependencies.sh b/.github/workflows/helpers/install_dependencies.sh index 9d07ea71f..ac52a80e1 100755 --- a/.github/workflows/helpers/install_dependencies.sh +++ b/.github/workflows/helpers/install_dependencies.sh @@ -40,7 +40,20 @@ if [[ "$FF_GPU_BACKEND" == "hip_cuda" || "$FF_GPU_BACKEND" = "hip_rocm" ]]; then elif [ "$hip_version" = "5.5" ]; then AMD_GPU_SCRIPT_NAME=amdgpu-install_5.5.50500-1_all.deb fi - AMD_GPU_SCRIPT_URL="https://repo.radeon.com/amdgpu-install/${hip_version}/ubuntu/focal/${AMD_GPU_SCRIPT_NAME}" + # Detect Ubuntu version + UBUNTU_VERSION=$(lsb_release -rs) + if [[ "$UBUNTU_VERSION" == "20.04" ]]; then + UBUNTU_CODENAME="focal" + elif [[ "$UBUNTU_VERSION" == "22.04" ]]; then + UBUNTU_CODENAME="jammy" + elif [[ "$UBUNTU_VERSION" == "24.04" ]]; then + UBUNTU_CODENAME="jammy" + else + echo "Unsupported Ubuntu version: $UBUNTU_VERSION" + exit 1 + fi + + AMD_GPU_SCRIPT_URL="https://repo.radeon.com/amdgpu-install/${hip_version}/ubuntu/${UBUNTU_CODENAME}/${AMD_GPU_SCRIPT_NAME}" # Download and install AMD GPU software with ROCM and HIP support wget "$AMD_GPU_SCRIPT_URL" sudo apt-get install -y ./${AMD_GPU_SCRIPT_NAME} @@ -48,20 +61,20 @@ if [[ "$FF_GPU_BACKEND" == "hip_cuda" || "$FF_GPU_BACKEND" = "hip_rocm" ]]; then sudo amdgpu-install -y --usecase=hip,rocm --no-dkms sudo apt-get install -y hip-dev hipblas miopen-hip rocm-hip-sdk rocm-device-libs - # Install protobuf v3.20.x manually - sudo apt-get update -y && sudo apt-get install -y pkg-config zip g++ zlib1g-dev unzip python autoconf automake libtool curl make - git clone -b 3.20.x https://github.com/protocolbuffers/protobuf.git - cd protobuf/ - git submodule update --init --recursive - ./autogen.sh - ./configure - cores_available=$(nproc --all) - n_build_cores=$(( cores_available -1 )) - if (( n_build_cores < 1 )) ; then n_build_cores=1 ; fi - make -j $n_build_cores - sudo make install - sudo ldconfig - cd .. + # # Install protobuf v3.20.x manually + # sudo apt-get update -y && sudo apt-get install -y pkg-config zip g++ zlib1g-dev unzip python autoconf automake libtool curl make + # git clone -b 3.20.x https://github.com/protocolbuffers/protobuf.git + # cd protobuf/ + # git submodule update --init --recursive + # ./autogen.sh + # ./configure + # cores_available=$(nproc --all) + # n_build_cores=$(( cores_available -1 )) + # if (( n_build_cores < 1 )) ; then n_build_cores=1 ; fi + # make -j $n_build_cores + # sudo make install + # sudo ldconfig + # cd .. else echo "FF_GPU_BACKEND: ${FF_GPU_BACKEND}. Skipping installing HIP dependencies" fi diff --git a/.github/workflows/helpers/install_nccl.sh b/.github/workflows/helpers/install_nccl.sh index ae6793ea2..306e5d699 100755 --- a/.github/workflows/helpers/install_nccl.sh +++ b/.github/workflows/helpers/install_nccl.sh @@ -5,47 +5,10 @@ set -x # Cd into directory holding this script cd "${BASH_SOURCE[0]%/*}" -# Add NCCL key ring ubuntu_version=$(lsb_release -rs) ubuntu_version=${ubuntu_version//./} -wget "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${ubuntu_version}/x86_64/cuda-keyring_1.1-1_all.deb" +wget -c -q "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${ubuntu_version}/x86_64/cuda-keyring_1.1-1_all.deb" sudo dpkg -i cuda-keyring_1.1-1_all.deb -sudo apt update -y +sudo apt-get update -y --allow-change-held-packages rm -f cuda-keyring_1.1-1_all.deb - -# Install NCCL -cuda_version=${1:-12.1.1} -cuda_version=$(echo "${cuda_version}" | cut -f1,2 -d'.') -echo "Installing NCCL for CUDA version: ${cuda_version} ..." - -# We need to run a different install command based on the CUDA version, otherwise running `sudo apt install libnccl2 libnccl-dev` -# will automatically upgrade CUDA to the latest version. - -if [[ "$cuda_version" == "11.0" ]]; then - sudo apt install libnccl2=2.15.5-1+cuda11.0 libnccl-dev=2.15.5-1+cuda11.0 -elif [[ "$cuda_version" == "11.1" ]]; then - sudo apt install libnccl2=2.8.4-1+cuda11.1 libnccl-dev=2.8.4-1+cuda11.1 -elif [[ "$cuda_version" == "11.2" ]]; then - sudo apt install libnccl2=2.8.4-1+cuda11.2 libnccl-dev=2.8.4-1+cuda11.2 -elif [[ "$cuda_version" == "11.3" ]]; then - sudo apt install libnccl2=2.9.9-1+cuda11.3 libnccl-dev=2.9.9-1+cuda11.3 -elif [[ "$cuda_version" == "11.4" ]]; then - sudo apt install libnccl2=2.11.4-1+cuda11.4 libnccl-dev=2.11.4-1+cuda11.4 -elif [[ "$cuda_version" == "11.5" ]]; then - sudo apt install libnccl2=2.11.4-1+cuda11.5 libnccl-dev=2.11.4-1+cuda11.5 -elif [[ "$cuda_version" == "11.6" ]]; then - sudo apt install libnccl2=2.12.12-1+cuda11.6 libnccl-dev=2.12.12-1+cuda11.6 -elif [[ "$cuda_version" == "11.7" ]]; then - sudo apt install libnccl2=2.14.3-1+cuda11.7 libnccl-dev=2.14.3-1+cuda11.7 -elif [[ "$cuda_version" == "11.8" ]]; then - sudo apt install libnccl2=2.16.5-1+cuda11.8 libnccl-dev=2.16.5-1+cuda11.8 -elif [[ "$cuda_version" == "12.0" ]]; then - sudo apt install libnccl2=2.18.3-1+cuda12.0 libnccl-dev=2.18.3-1+cuda12.0 -elif [[ "$cuda_version" == "12.1" ]]; then - sudo apt install libnccl2=2.18.3-1+cuda12.1 libnccl-dev=2.18.3-1+cuda12.1 -elif [[ "$cuda_version" == "12.2" ]]; then - sudo apt install libnccl2=2.18.3-1+cuda12.2 libnccl-dev=2.18.3-1+cuda12.2 -else - echo "Installing NCCL for CUDA version ${cuda_version} is not supported" - exit 1 -fi +sudo apt install -y --allow-change-held-packages libnccl2 libnccl-dev diff --git a/.github/workflows/pip-deploy.yml b/.github/workflows/pip-deploy.yml index 5558e51e3..37ea63705 100644 --- a/.github/workflows/pip-deploy.yml +++ b/.github/workflows/pip-deploy.yml @@ -9,7 +9,7 @@ concurrency: jobs: build-n-publish: name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 permissions: # IMPORTANT: this permission is mandatory for trusted publishing id-token: write diff --git a/.github/workflows/pip-install.yml b/.github/workflows/pip-install.yml index f348bfe5e..26a74f8db 100644 --- a/.github/workflows/pip-install.yml +++ b/.github/workflows/pip-install.yml @@ -13,7 +13,7 @@ concurrency: jobs: pip-install-flexflow: name: Install FlexFlow with pip - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 defaults: run: shell: bash -l {0} # required to use an activated conda environment diff --git a/.github/workflows/shell-check.yml b/.github/workflows/shell-check.yml index a825d63d9..f2e31429c 100644 --- a/.github/workflows/shell-check.yml +++ b/.github/workflows/shell-check.yml @@ -3,7 +3,7 @@ on: [push, pull_request, workflow_dispatch] jobs: shellcheck: name: Shellcheck - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 - name: Run ShellCheck diff --git a/conda/flexflow.yml b/conda/flexflow.yml index d457684d0..7568ed648 100644 --- a/conda/flexflow.yml +++ b/conda/flexflow.yml @@ -3,23 +3,19 @@ channels: - defaults - conda-forge dependencies: - - python>=3.6,<3.12 - - cffi>=1.11.0 - - Pillow + - python + - cffi - rust - cmake-build-extension - jq - pytest - pip - pip: - - qualname>=0.1.0 - - keras_preprocessing>=1.1.2 - - numpy>=1.16.0 - - torch>=1.13.1 - - torchaudio>=0.13.1 - - torchvision>=0.14.1 + - numpy + - torch + - torchaudio + - torchvision - regex - - onnx - transformers>=4.47.1 - sentencepiece - einops diff --git a/docker/flexflow-environment/Dockerfile b/docker/flexflow-environment/Dockerfile index 25c45646f..338563710 100644 --- a/docker/flexflow-environment/Dockerfile +++ b/docker/flexflow-environment/Dockerfile @@ -19,9 +19,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends wget sudo binut # Install Python3 with Miniconda ARG python_version "latest" -#RUN MINICONDA_SCRIPT_NAME=Miniconda3-latest-Linux-x86_64.sh; \ -RUN MINICONDA_SCRIPT_NAME=Miniconda3-py311_23.5.2-0-Linux-x86_64.sh; \ - if [ "$python_version" != "3.8" ] && [ "$python_version" != "3.9" ] && [ "$python_version" != "3.10" ] && [ "$python_version" != "3.11" ] && [ "$python_version" != "latest" ]; then \ +RUN MINICONDA_SCRIPT_NAME=Miniconda3-latest-Linux-x86_64.sh; \ +# RUN MINICONDA_SCRIPT_NAME=Miniconda3-py311_23.5.2-0-Linux-x86_64.sh; \ + if [ "$python_version" != "3.8" ] && [ "$python_version" != "3.9" ] && [ "$python_version" != "3.10" ] && [ "$python_version" != "3.11" ] && [ "$python_version" != "3.12" ] && [ "$python_version" != "latest" ]; then \ echo "python_version '${python_version}' is not supported, please choose among {3.8, 3.9, 3.10, 3.11 or latest (default)}"; \ exit 1; \ fi; \ @@ -33,6 +33,10 @@ RUN MINICONDA_SCRIPT_NAME=Miniconda3-py311_23.5.2-0-Linux-x86_64.sh; \ MINICONDA_SCRIPT_NAME=Miniconda3-py310_23.5.2-0-Linux-x86_64.sh; \ elif [ "${python_version}" = "3.11" ]; then \ MINICONDA_SCRIPT_NAME=Miniconda3-py311_23.5.2-0-Linux-x86_64.sh; \ + elif [ "${python_version}" = "3.12" ]; then \ + MINICONDA_SCRIPT_NAME=Miniconda3-py312_25.1.1-2-Linux-x86_64.sh; \ + elif [ "${python_version}" = "latest" ]; then \ + MINICONDA_SCRIPT_NAME=Miniconda3-latest-Linux-x86_64.sh; \ fi; \ wget -c -q https://repo.continuum.io/miniconda/${MINICONDA_SCRIPT_NAME} && \ mv ./${MINICONDA_SCRIPT_NAME} ~/${MINICONDA_SCRIPT_NAME} && \ @@ -93,7 +97,8 @@ RUN if [ "$FF_GPU_BACKEND" = "hip_cuda" ] || [ "$FF_GPU_BACKEND" = "hip_rocm" ] elif [ "$hip_version" = "5.5" ]; then \ AMD_GPU_SCRIPT_NAME=amdgpu-install_5.5.50500-1_all.deb; \ fi; \ - AMD_GPU_SCRIPT_URL="https://repo.radeon.com/amdgpu-install/${hip_version}/ubuntu/focal/${AMD_GPU_SCRIPT_NAME}"; \ + ubuntu_codename=$(lsb_release -cs); \ + AMD_GPU_SCRIPT_URL="https://repo.radeon.com/amdgpu-install/${hip_version}/ubuntu/${ubuntu_codename}/${AMD_GPU_SCRIPT_NAME}"; \ # Download and install AMD GPU software with ROCM and HIP support wget $AMD_GPU_SCRIPT_URL; \ apt-get install -y ./${AMD_GPU_SCRIPT_NAME}; \ @@ -101,14 +106,14 @@ RUN if [ "$FF_GPU_BACKEND" = "hip_cuda" ] || [ "$FF_GPU_BACKEND" = "hip_rocm" ] amdgpu-install -y --usecase=hip,rocm --no-dkms; \ apt-get install -y hip-dev hipblas miopen-hip rocm-hip-sdk rocm-device-libs; \ # Install protobuf dependencies - apt-get update -y && sudo apt-get install -y pkg-config zip g++ zlib1g-dev autoconf automake libtool make; \ + # apt-get update -y && sudo apt-get install -y pkg-config zip g++ zlib1g-dev autoconf automake libtool make; \ else \ echo "FF_GPU_BACKEND: ${FF_GPU_BACKEND}. Skipping installing HIP dependencies"; \ fi RUN rm -rf /var/lib/apt/lists/* # Install python packages and other dependencies -RUN conda install -c conda-forge cmake make pillow cmake-build-extension numpy pandas keras-preprocessing +RUN conda install -c conda-forge cmake make cmake-build-extension numpy pandas # Install Pytorch COPY docker/flexflow-environment/install_pytorch.sh /usr/local/bin/install_pytorch.sh RUN if [ "$FF_GPU_BACKEND" == "cuda" ] ; then \ @@ -130,5 +135,31 @@ RUN pip3 install streamlit # Install Rust RUN curl https://sh.rustup.rs -sSf | sh -s -- -y ENV PATH /root/.cargo/bin:$PATH +# RUN ln -s /opt/conda /opt/saturncloud +# RUN sudo apt update -y && sudo apt install -y openssh-server openssh-client +# # Make a user called jovyan with uid=1000 and their home directory created +# RUN useradd -m -u 1000 -s /bin/bash jovyan +# # Enable passwordless sudo for all users +# RUN echo "ALL ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +# RUN sudo useradd --uid 1000 --create-home --home-dir /home/jovyan --shell /bin/bash jovyan +# RUN sudo passwd -d jovyan + +# Install sshd +RUN apt-get update && apt-get install -y openssh-server + +# Symlink conda folder to /opt/saturncloud +RUN ln -s /opt/conda /opt/saturncloud + +# Create the jovyan user with UID 1000 +RUN useradd -m -u 1000 -s /bin/bash jovyan +# Add jovyan to the sudo group +RUN usermod -aG sudo jovyan +# Configure passwordless sudo for jovyan +RUN echo "jovyan ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/jovyan && \ + chmod 440 /etc/sudoers.d/jovyan +# Set user to jovyan for subsequent commands (optional) +USER jovyan +# Set working directory (optional) +WORKDIR /home/jovyan ENTRYPOINT ["/bin/bash"] diff --git a/requirements.txt b/requirements.txt index 34d0a39bf..87e9b9695 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,12 @@ -cffi>=1.11.0 -numpy>=1.16.0 -qualname>=0.1.0 -keras_preprocessing>=1.1.2 -Pillow +cffi +numpy cmake-build-extension ninja requests regex -torch>=1.13.1 -torchaudio>=0.13.1 -torchvision>=0.14.1 -onnx +torch +torchaudio +torchvision transformers>=4.47.1 sentencepiece einops