Skip to content

Commit

Permalink
attribute workflows to correct runners
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Mar 12, 2024
1 parent d2580dd commit a72524b
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 36 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test_api_cuda.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
image:
[
{ torch_cuda: cu118, torch_pre_release: 0, cuda_version: 11.8.0 },
{ torch_cuda: cu121, torch_pre_release: 1, cuda_version: 12.1.0 },
{ torch_cuda: cu121, torch_pre_release: 1, cuda_version: 12.1.1 },
]

runs-on: [multi-gpu, nvidia-gpu, 4-a10, ci]
Expand Down
10 changes: 3 additions & 7 deletions .github/workflows/test_cli_cuda_onnxruntime.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ concurrency:

jobs:
build_image_and_run_cli_cuda_onnxruntime_tests:
runs-on: nvidia-gpu
runs-on: [single-gpu, nvidia-gpu, a10, ci]

steps:
- name: Checkout
uses: actions/checkout@v3
Expand All @@ -28,19 +29,14 @@ jobs:
--tag opt-bench-cuda:11.8.0
.

- name: Get GPUs with most free memory
id: get_devices
run: |
echo "::set-output name=devices::$(nvidia-smi --query-gpu=memory.free,index --format=csv,noheader,nounits | sort -n -k1 | tail -n 2 | awk -F', ' '{print $2}' | xargs echo -n | sed 's/ /,/g' | awk '{print $0}')"
- name: Run tests
run: docker run
--rm
--gpus all
--pid host
--shm-size 64G
--env USE_CUDA="1"
--entrypoint /bin/bash
--gpus '"device=${{ steps.get_devices.outputs.devices }}"'
--volume $(pwd):/workspace/optimum-benchmark
--workdir /workspace/optimum-benchmark
opt-bench-cuda:11.8.0
Expand Down
10 changes: 3 additions & 7 deletions .github/workflows/test_cli_cuda_pytorch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ jobs:
{ torch_cuda: cu121, torch_pre_release: 1, cuda_version: 12.1.1 },
]

runs-on: nvidia-gpu
runs-on: [multi-gpu, nvidia-gpu, 4-a10, ci]

steps:
- name: Checkout
uses: actions/checkout@v3
Expand All @@ -37,18 +38,13 @@ jobs:
--tag opt-bench-cuda:${{ matrix.image.cuda_version }}
.

- name: Get GPUs with most free memory
id: get_devices
run: |
echo "::set-output name=devices::$(nvidia-smi --query-gpu=memory.free,index --format=csv,noheader,nounits | sort -n -k1 | tail -n 2 | awk -F', ' '{print $2}' | xargs echo -n | sed 's/ /,/g' | awk '{print $0}')"
- name: Run tests
run: docker run
--rm
--gpus all
--pid host
--shm-size 64G
--env USE_CUDA="1"
--gpus '"device=${{ steps.get_devices.outputs.devices }}"'
--volume $(pwd):/workspace/optimum-benchmark
--workdir /workspace/optimum-benchmark
--entrypoint /bin/bash
Expand Down
10 changes: 3 additions & 7 deletions .github/workflows/test_cli_cuda_torch_ort.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ concurrency:

jobs:
build_image_and_run_cli_cuda_torch_ort_tests:
runs-on: nvidia-gpu
runs-on: [multi-gpu, nvidia-gpu, 4-a10, ci]

steps:
- name: Checkout
uses: actions/checkout@v3
Expand All @@ -28,19 +29,14 @@ jobs:
--tag opt-bench-cuda:11.8.0
.

- name: Get GPUs with most free memory
id: get_devices
run: |
echo "::set-output name=devices::$(nvidia-smi --query-gpu=memory.free,index --format=csv,noheader,nounits | sort -n -k1 | tail -n 2 | awk -F', ' '{print $2}' | xargs echo -n | sed 's/ /,/g' | awk '{print $0}')"
- name: Run tests
run: docker run
--rm
--gpus all
--pid host
--shm-size 64G
--env USE_CUDA="1"
--entrypoint /bin/bash
--gpus '"device=${{ steps.get_devices.outputs.devices }}"'
--volume $(pwd):/workspace/optimum-benchmark
--workdir /workspace/optimum-benchmark
opt-bench-cuda:11.8.0
Expand Down
10 changes: 3 additions & 7 deletions .github/workflows/test_cli_tensorrt_llm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ concurrency:

jobs:
pull_image_and_run_cli_tensorrt_llm_tests:
runs-on: nvidia-gpu
runs-on: [single-gpu, nvidia-gpu, a10, ci]

steps:
- name: Checkout
uses: actions/checkout@v3
Expand All @@ -26,18 +27,13 @@ jobs:
--tag opt-bench-tensorrt-llm:latest
.

- name: Get GPUs with most free memory
id: get_devices
run: |
echo "::set-output name=devices::$(nvidia-smi --query-gpu=memory.free,index --format=csv,noheader,nounits | sort -n -k1 | tail -n 2 | awk -F', ' '{print $2}' | xargs echo -n | sed 's/ /,/g' | awk '{print $0}')"
- name: Run tests
run: docker run
--rm
--gpus all
--pid host
--shm-size 64G
--env USE_CUDA="1"
--gpus '"device=${{ steps.get_devices.outputs.devices }}"'
--volume $(pwd):/workspace/optimum-benchmark
--workdir /workspace/optimum-benchmark
--entrypoint /bin/bash
Expand Down
10 changes: 3 additions & 7 deletions .github/workflows/test_cli_tensorrt_onnxruntime.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ concurrency:

jobs:
build_image_and_run_cli_tensorrt_onnxruntime_tests:
runs-on: nvidia-gpu
runs-on: [single-gpu, nvidia-gpu, a10, ci]

steps:
- name: Checkout
uses: actions/checkout@v3
Expand All @@ -26,18 +27,13 @@ jobs:
--tag opt-bench-tensorrt:latest
.

- name: Get GPUs with most free memory
id: get_devices
run: |
echo "::set-output name=devices::$(nvidia-smi --query-gpu=memory.free,index --format=csv,noheader,nounits | sort -n -k1 | tail -n 2 | awk -F', ' '{print $2}' | xargs echo -n | sed 's/ /,/g' | awk '{print $0}')"
- name: Run tests
run: docker run
--rm
--gpus all
--pid host
--shm-size 64G
--env USE_CUDA="1"
--gpus '"device=${{ steps.get_devices.outputs.devices }}"'
--volume $(pwd):/workspace/optimum-benchmark
--workdir /workspace/optimum-benchmark
--entrypoint /bin/bash
Expand Down

0 comments on commit a72524b

Please sign in to comment.