Skip to content

Commit

Permalink
generalised code for skipping test query count
Browse files Browse the repository at this point in the history
  • Loading branch information
anandhu-eng committed Sep 24, 2024
1 parent 21d16ed commit 5d4a302
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 10 deletions.
4 changes: 2 additions & 2 deletions docs/benchmarks/text_to_image/reproducibility/scc24.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@ or supporting multi-node execution) useful for the community and [MLCommons](htt
=== "MLCommons-Python"
## MLPerf Reference Implementation in Python

{{ mlperf_inference_implementation_readme (4, "sdxl", "reference", extra_variation_tags=",_short,_scc24-base", devices=["ROCm", "CUDA"],scenarios=["Offline"],categories=["Datacenter"], setup_tips=False) }}
{{ mlperf_inference_implementation_readme (4, "sdxl", "reference", extra_variation_tags=",_short,_scc24-base", devices=["ROCm", "CUDA"],scenarios=["Offline"],categories=["Datacenter"], setup_tips=False, skip_test_query_count=True) }}

=== "Nvidia"
## Nvidia MLPerf Implementation
{{ mlperf_inference_implementation_readme (4, "sdxl", "nvidia", extra_variation_tags=",_short", scenarios=["Offline"],categories=["Datacenter"], setup_tips=False, implementation_tips=False) }}
{{ mlperf_inference_implementation_readme (4, "sdxl", "nvidia", extra_variation_tags=",_short", scenarios=["Offline"],categories=["Datacenter"], setup_tips=False, implementation_tips=False, skip_test_query_count=True) }}

## Submission Commands

Expand Down
16 changes: 8 additions & 8 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
def define_env(env):

@env.macro
def mlperf_inference_implementation_readme(spaces, model, implementation, *, implementation_tips=True, setup_tips=True, run_tips=True, scenarios = [], devices=[], frameworks=[], categories=[], extra_variation_tags="", extra_input_string="", extra_docker_input_string=""):
def mlperf_inference_implementation_readme(spaces, model, implementation, *, implementation_tips=True, setup_tips=True, run_tips=True, skip_test_query_count=False, scenarios = [], devices=[], frameworks=[], categories=[], extra_variation_tags="", extra_input_string="", extra_docker_input_string=""):
pre_space = ""

for i in range(1,spaces):
Expand Down Expand Up @@ -150,7 +150,7 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp
content += f"\n{cur_space3}!!! tip\n\n"
content += f"{cur_space3} - Host and Port number of the server can be configured through `--host` and `--port` options. Otherwise, server will run on the default host `localhost` and port `8000`.\n\n"

setup_run_cmd = mlperf_inference_run_command(spaces+17, model, implementation, framework.lower(), category.lower(), "Offline", device.lower(), "test", test_query_count, True, scenarios, code_version, extra_variation_tags, extra_input_string, extra_docker_input_string)
setup_run_cmd = mlperf_inference_run_command(spaces+17, model, implementation, framework.lower(), category.lower(), "Offline", device.lower(), "test", test_query_count, True, skip_test_query_count, scenarios, code_version, extra_variation_tags, extra_input_string, extra_docker_input_string)

if execution_env == "Native": # Native implementation steps through virtual environment
content += f"{cur_space3}####### Setup a virtual environment for Python\n"
Expand Down Expand Up @@ -186,7 +186,7 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp
content += f"{cur_space3} You can reuse the same environment as described for {model.split('.')[0]}.\n"
content += f"{cur_space3}###### Performance Estimation for Offline Scenario\n"

content += mlperf_inference_run_command(spaces+17, model, implementation, framework.lower(), category.lower(), "Offline", device.lower(), "test", test_query_count, True, scenarios, code_version).replace("--docker ","")
content += mlperf_inference_run_command(spaces+17, model, implementation, framework.lower(), category.lower(), "Offline", device.lower(), "test", test_query_count, True, skip_test_query_count, scenarios, code_version).replace("--docker ","")
content += f"{cur_space3}The above command should do a test run of Offline scenario and record the estimated offline_target_qps.\n\n"


Expand Down Expand Up @@ -222,13 +222,13 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp

for scenario in scenarios:
content += f"{cur_space3}=== \"{scenario}\"\n{cur_space4}###### {scenario}\n\n"
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), scenario, device.lower(), final_run_mode, -1, False, scenarios, code_version, extra_variation_tags, extra_input_string)
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), scenario, device.lower(), final_run_mode, -1, False, skip_test_query_count, scenarios, code_version, extra_variation_tags, extra_input_string)
content += run_cmd
#content += run_suffix

if len(scenarios) > 1:
content += f"{cur_space3}=== \"All Scenarios\"\n{cur_space4}###### All Scenarios\n\n"
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), "All Scenarios", device.lower(), final_run_mode, -1, False, scenarios, code_version, extra_variation_tags, extra_input_string)
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), "All Scenarios", device.lower(), final_run_mode, -1, False, skip_test_query_count, scenarios, code_version, extra_variation_tags, extra_input_string)
content += run_cmd
content += run_suffix

Expand Down Expand Up @@ -396,7 +396,7 @@ def get_run_cmd_extra(f_pre_space, model, implementation, device, scenario, scen


@env.macro
def mlperf_inference_run_command(spaces, model, implementation, framework, category, scenario, device="cpu", execution_mode="test", test_query_count="20", docker=False, scenarios = [], code_version="r4.1-dev", extra_variation_tags="", extra_input_string="", extra_docker_input_string=""):
def mlperf_inference_run_command(spaces, model, implementation, framework, category, scenario, device="cpu", execution_mode="test", test_query_count="20", docker=False, skip_test_query_count=False, scenarios = [], code_version="r4.1-dev", extra_variation_tags="", extra_input_string="", extra_docker_input_string=""):
pre_space = ""
for i in range(1,spaces):
pre_space = pre_space + " "
Expand All @@ -417,7 +417,7 @@ def mlperf_inference_run_command(spaces, model, implementation, framework, categ

if docker:
docker_cmd_suffix = f" \\\n{pre_space} --docker --quiet"
if test_query_count > 0:
if not skip_test_query_count:
docker_cmd_suffix += f" \\\n{pre_space} --test_query_count={test_query_count}"
if extra_docker_input_string != "" or extra_input_string != "":
docker_cmd_suffix += f" \\\n{pre_space} {extra_docker_input_string} {extra_input_string}"
Expand Down Expand Up @@ -456,7 +456,7 @@ def mlperf_inference_run_command(spaces, model, implementation, framework, categ
else:
cmd_suffix = f"\\\n{pre_space} --quiet {extra_input_string}"

if execution_mode == "test" and test_query_count > 0:
if execution_mode == "test" and not skip_test_query_count:
cmd_suffix += f" \\\n {pre_space} --test_query_count={test_query_count}"

if "bert" in model.lower() and framework == "deepsparse":
Expand Down

0 comments on commit 5d4a302

Please sign in to comment.