|
| 1 | +name: OpenVino GPU Nightly Test |
| 2 | + |
| 3 | +on: |
| 4 | + workflow_dispatch: |
| 5 | + |
| 6 | +permissions: read-all |
| 7 | + |
| 8 | +jobs: |
| 9 | + nightly: |
| 10 | + name: OpenVino GPU Nightly |
| 11 | + runs-on: [self-hosted, l0] |
| 12 | + |
| 13 | + env: |
| 14 | + GH_TOKEN: ${{ github.token }} |
| 15 | + |
| 16 | + steps: |
| 17 | + - name: "Checkout Graph Compiler" |
| 18 | + uses: actions/checkout@v4 |
| 19 | + with: |
| 20 | + repository: 'intel/graph-compiler' |
| 21 | + path: 'graph-compiler' |
| 22 | + |
| 23 | + - name: Setup MLIR Python bindings |
| 24 | + id: setup-mlir-python-bindings |
| 25 | + uses: ./graph-compiler/.github/actions/setup-mlir-python-bindings |
| 26 | + |
| 27 | + - name: Build LLVM with IMEX |
| 28 | + working-directory: graph-compiler |
| 29 | + run: | |
| 30 | + scripts/compile.sh --dev --llvm --imex |
| 31 | + echo LLVM_INST_PATH=$(pwd)/externals/llvm-project/build >>$GITHUB_ENV |
| 32 | +
|
| 33 | + - name: "Checkout OpenVino" |
| 34 | + uses: actions/checkout@v4 |
| 35 | + with: |
| 36 | + repository: 'dchigarev/openvino' |
| 37 | + ref: 'gc-gpu' |
| 38 | + path: 'openvino' |
| 39 | + submodules: true |
| 40 | + fetch-depth: 0 |
| 41 | + recursive: true |
| 42 | + |
| 43 | + - name: Build OpenVino |
| 44 | + working-directory: openvino |
| 45 | + run: | |
| 46 | + cmake -B build -G Ninja -DLLVM_DIR=${LLVM_INST_PATH}/lib/cmake/llvm -DMLIR_DIR=${LLVM_INST_PATH}/lib/cmake/mlir -DENABLE_GRAPH_COMPILER=ON -DENABLE_INTEL_GPU=ON -DENABLE_TESTS=ON |
| 47 | + cmake --build build --target all |
| 48 | + |
| 49 | + - name: Benchmark |
| 50 | + working-directory: openvino |
| 51 | + run: | |
| 52 | + pip install openvino torch |
| 53 | + for param in 'linear[512,512,512]' 'linear[1024,1024,1024]' 'linear[2048,2048,2048]', 'linear[4096,4096,4096]' 'linear[8192,8192,8192]' 'linear[4096,512,4096]'; do |
| 54 | + python3 ./tools/mlir_bench/ov_model_gen.py -l=$param -t f16 -n test.xml |
| 55 | + OV_MLIR_MODE=GC_GPU ./bin/intel64/Release/benchmark_app -m ./test.xml -d GPU -use_device_mem -ip f16 -infer_precision f16 -niter 100 -hint none -nstreams 1 -nthreads 1 >tmp |
| 56 | + echo 'gc-gpu: ' $param |
| 57 | + cat tmp |
| 58 | + COMPILE_TIME=$(grep "Compile model took" tmp|tr -cd '0-9.') |
| 59 | + FIRST_INFERENCE_TIME=$(grep "First inference took" tmp|tr -cd '0-9.') |
| 60 | + THROUGHPUT=$(grep "Throughput" tmp|tr -cd '0-9.') |
| 61 | + echo "openvino,$param,$COMPILE_TIME,$FIRST_INFERENCE_TIME,$THROUGHPUT" >> perf.csv |
| 62 | +
|
| 63 | + OV_MLIR_MODE=0 ./bin/intel64/Release/benchmark_app -m ./test.xml -d GPU -use_device_mem -ip f16 -infer_precision f16 -niter 100 -hint none -nstreams 1 -nthreads 1 >tmp |
| 64 | + echo 'baseline: ' $param |
| 65 | + cat tmp |
| 66 | + COMPILE_TIME=$(grep "Compile model took" tmp|tr -cd '0-9.') |
| 67 | + FIRST_INFERENCE_TIME=$(grep "First inference took" tmp|tr -cd '0-9.') |
| 68 | + THROUGHPUT=$(grep "Throughput" tmp|tr -cd '0-9.') |
| 69 | + echo "openvino,$param,$COMPILE_TIME,$FIRST_INFERENCE_TIME,$THROUGHPUT" >> perf-baseline.csv |
| 70 | + done |
| 71 | +
|
| 72 | + - name: Upload Performance Artifact |
| 73 | + uses: actions/upload-artifact@v4 |
| 74 | + with: |
| 75 | + name: perf.csv |
| 76 | + path: openvino/perf.csv |
| 77 | + |
| 78 | + - name: Upload Baseline Artifact |
| 79 | + uses: actions/upload-artifact@v4 |
| 80 | + with: |
| 81 | + name: perf-baseline.csv |
| 82 | + path: openvino/perf-baseline.csv |
0 commit comments