Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
alright ... we shall see
Browse files Browse the repository at this point in the history
  • Loading branch information
andy-neuma committed Jun 7, 2024
1 parent 86b5833 commit 435d0ad
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 149 deletions.
14 changes: 0 additions & 14 deletions .github/workflows/nm-build-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -134,20 +134,6 @@ jobs:
push_benchmark_results_to_gh_pages: "${{ github.event_name == 'schedule' || inputs.push_benchmark_results_to_gh_pages }}"
secrets: inherit

TEST-ACCURACY-SMOKE:
needs: [BUILD]
if: inputs.wf_category == 'NIGHTLY'
uses: ./.github/workflows/nm-test-accuracy-smoke.yml
with:
label: ${{ inputs.test_label_solo }}
timeout: ${{ inputs.benchmark_timeout }}
gitref: ${{ inputs.gitref }}
Gi_per_thread: ${{ inputs.Gi_per_thread }}
nvcc_threads: ${{ inputs.nvcc_threads }}
python: ${{ inputs.python }}
whl: ${{ needs.BUILD.outputs.whl }}
secrets: inherit

TEST-ACCURACY-FULL:
needs: [BUILD]
if: ${{ inputs.wf_category == 'WEEKLY' || inputs.wf_category == 'RELEASE' }}
Expand Down
14 changes: 7 additions & 7 deletions .github/workflows/nm-nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ jobs:
python: 3.8.17
gitref: ${{ github.ref }}

test_label_solo: aws-test-a10g-24G
test_label_solo: gcp-k8s-l4-solo
test_label_multi: ignore
test_timeout: 480
test_skip_list: neuralmagic/tests/skip-for-remote-push-tmp.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_remote_push_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: "${{ github.event_name == 'schedule' || inputs.push_benchmark_results_to_gh_pages }}"
Expand All @@ -42,12 +42,12 @@ jobs:
python: 3.9.17
gitref: ${{ github.ref }}

test_label_solo: aws-test-a10g-24G
test_label_solo: gcp-k8s-l4-solo
test_label_multi: ignore
test_timeout: 480
test_skip_list: neuralmagic/tests/skip-for-remote-push-tmp.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_remote_push_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: "${{ github.event_name == 'schedule' || inputs.push_benchmark_results_to_gh_pages }}"
Expand All @@ -65,7 +65,7 @@ jobs:
test_timeout: 480
test_skip_list: neuralmagic/tests/skip-for-remote-push-tmp.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_remote_push_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: "${{ github.event_name == 'schedule' || inputs.push_benchmark_results_to_gh_pages }}"
Expand All @@ -78,12 +78,12 @@ jobs:
python: 3.11.4
gitref: ${{ github.ref }}

test_label_solo: aws-test-a10g-24G
test_label_solo: gcp-k8s-l4-solo
test_label_multi: ignore
test_timeout: 480
test_skip_list: neuralmagic/tests/skip-for-remote-push-tmp.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_remote_push_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: "${{ github.event_name == 'schedule' || inputs.push_benchmark_results_to_gh_pages }}"
Expand Down
16 changes: 8 additions & 8 deletions .github/workflows/nm-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ jobs:
python: 3.8.17
gitref: ${{ github.ref }}

test_label_solo: aws-test-a10g-24G
test_label_solo: gcp-k8s-l4-solo
test_label_multi: ignore
test_timeout: 720
test_skip_list: neuralmagic/tests/skip-for-release.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_nightly_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: ${{ inputs.push_benchmark_results_to_gh_pages }}
Expand All @@ -38,12 +38,12 @@ jobs:
python: 3.9.17
gitref: ${{ github.ref }}

test_label_solo: aws-test-a10g-24G
test_label_solo: gcp-k8s-l4-solo
test_label_multi: ignore
test_timeout: 720
test_skip_list: neuralmagic/tests/skip-for-release.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_nightly_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: ${{ inputs.push_benchmark_results_to_gh_pages }}
Expand All @@ -56,12 +56,12 @@ jobs:
python: 3.10.12
gitref: ${{ github.ref }}

test_label_solo: aws-test-a10g-24G
test_label_solo: gcp-k8s-l4-solo
test_label_multi: ignore
test_timeout: 720
test_skip_list: neuralmagic/tests/skip-for-release.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_nightly_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: ${{ inputs.push_benchmark_results_to_gh_pages }}
Expand All @@ -74,12 +74,12 @@ jobs:
python: 3.11.4
gitref: ${{ github.ref }}

test_label_solo: aws-test-a10g-24G
test_label_solo: gcp-k8s-l4-solo
test_label_multi: ignore
test_timeout: 720
test_skip_list: neuralmagic/tests/skip-for-release.txt

benchmark_label: aws-test-a10g-24G
benchmark_label: gcp-k8s-l4-solo
benchmark_config_list_file: ./.github/data/nm_benchmark_nightly_configs_list.txt
benchmark_timeout: 720
push_benchmark_results_to_gh_pages: ${{ inputs.push_benchmark_results_to_gh_pages }}
Expand Down
120 changes: 0 additions & 120 deletions .github/workflows/nm-test-accuracy-smoke.yml

This file was deleted.

4 comments on commit 435d0ad

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 435d0ad Previous: 87571b8 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.11.4 (main, Jun 7 2023, 11:01:02) [GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 2.382740539054952 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.11.4 (main, Jun 7 2023, 11:01:02) [GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 914.9723669971015 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 435d0ad Previous: 87571b8 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.10.12 (main, Jun 7 2023, 13:43:11) [GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 2.387614648913205 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.10.12 (main, Jun 7 2023, 13:43:11) [GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 916.8440251826706 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 435d0ad Previous: 87571b8 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.9.17 (main, Jun 7 2023, 12:34:12) \n[GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 2.382038783911934 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.9.17 (main, Jun 7 2023, 12:34:12) \n[GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 914.7028930221828 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 435d0ad Previous: 87571b8 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.8.17 (default, Jun 7 2023, 12:29:56) \n[GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 2.368913394656634 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.0", "python_version": "3.8.17 (default, Jun 7 2023, 12:29:56) \n[GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 909.6627435481475 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.