Skip to content

WIP fix rocm runners #431

WIP fix rocm runners

WIP fix rocm runners #431

name: CLI ROCm Pytorch Single-GPU Tests
on:
workflow_dispatch:
push:
branches:
- main
paths:
- .github/workflows/test_cli_rocm_pytorch_single_gpu.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
pull_request:
branches:
- main
paths:
- .github/workflows/test_cli_rocm_pytorch_single_gpu.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
run_cli_rocm_pytorch_single_gpu_tests:
runs-on: [self-hosted, amd-gpu, single-gpu, mi250]
container:
image: ghcr.io/huggingface/optimum-benchmark:latest-rocm
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: |
pip install -e .[testing,diffusers,timm,peft,autoawq,auto-gptq]
- name: Run tests
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/rocm
run: |
pytest -x -s -k "cli and cuda and pytorch and not (dp or ddp or device_map or deepspeed) and not (bnb or awq)"