diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 9e621329049..ad236040cb8 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -632,32 +632,33 @@ jobs: # run eval_llama wikitext task PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_wikitext.sh - test-eval_llama-mmlu-linux: - name: test-eval_llama-mmlu-linux - uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main - permissions: - id-token: write - contents: read - strategy: - fail-fast: false - with: - runner: linux.24xlarge - docker-image: ci-image:executorch-ubuntu-22.04-clang12 - submodules: 'recursive' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake" - - # install llama requirements - bash examples/models/llama/install_requirements.sh - - # run eval_llama mmlu task - PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_mmlu.sh + # TODO(larryliu0820): Fix this issue before reenabling it: https://gist.github.com/larryliu0820/7377ecd0d79dbc06076cec8d9f2b85d2 + # test-eval_llama-mmlu-linux: + # name: test-eval_llama-mmlu-linux + # uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main + # permissions: + # id-token: write + # contents: read + # strategy: + # fail-fast: false + # with: + # runner: linux.24xlarge + # docker-image: ci-image:executorch-ubuntu-22.04-clang12 + # submodules: 'recursive' + # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + # timeout: 90 + # script: | + # # The generic Linux job chooses to use base env, not the one setup by the image + # CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") + # conda activate "${CONDA_ENV}" + + # PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake" + + # # install llama requirements + # bash examples/models/llama/install_requirements.sh + + # # run eval_llama mmlu task + # PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_mmlu.sh test-llama_runner_eager-linux: name: test-llama_runner_eager-linux