Skip to content

Commit

Permalink
Improve the support of short variation
Browse files Browse the repository at this point in the history
  • Loading branch information
arjunsuresh committed Sep 20, 2024
1 parent 00c650f commit 4c92e2a
Showing 1 changed file with 9 additions and 7 deletions.
16 changes: 9 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp
# model name
content += f"{pre_space}{model.upper()}\n\n"

final_run_mode = "valid" if "short" not in extra_variation_tags else "test"

for category in categories:
if not scenarios:
if category == "Edge" and not scenarios:
Expand Down Expand Up @@ -137,7 +139,7 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp
content += f"{cur_space3}###### {execution_env} Environment\n\n"
# ref to cm installation
content += f"{cur_space3}Please refer to the [installation page](site:inference/install/) to install CM for running the automated benchmark commands.\n\n"
test_query_count=get_test_query_count(model, implementation, device)
test_query_count=get_test_query_count(model, implementation, device.lower())

if "99.9" not in model: #not showing docker command as it is already done for the 99% variant
if implementation == "neuralmagic":
Expand Down Expand Up @@ -220,13 +222,13 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp

for scenario in scenarios:
content += f"{cur_space3}=== \"{scenario}\"\n{cur_space4}###### {scenario}\n\n"
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), scenario, device.lower(), "valid", 0, False, scenarios, code_version, extra_variation_tags, extra_input_string)
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), scenario, device.lower(), final_run_mode, -1, False, scenarios, code_version, extra_variation_tags, extra_input_string)
content += run_cmd
#content += run_suffix

if len(scenarios) > 1:
content += f"{cur_space3}=== \"All Scenarios\"\n{cur_space4}###### All Scenarios\n\n"
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), "All Scenarios", device.lower(), "valid", 0, False, scenarios, code_version, extra_variation_tags, extra_input_string)
run_cmd = mlperf_inference_run_command(spaces+21, model, implementation, framework.lower(), category.lower(), "All Scenarios", device.lower(), final_run_mode, -1, False, scenarios, code_version, extra_variation_tags, extra_input_string)
content += run_cmd
content += run_suffix

Expand All @@ -245,10 +247,10 @@ def get_test_query_count(model, implementation, device, num_devices=1):
elif model in [ "retinanet", "bert-99", "bert-99.9" ]:
p_range = 100
else:
p_range = 50
p_range = 10

if device == "cuda":
p_range *= 40
p_range *= 5
p_range *= num_devices

return p_range
Expand Down Expand Up @@ -452,7 +454,7 @@ def mlperf_inference_run_command(spaces, model, implementation, framework, categ
else:
cmd_suffix = f"\\\n{pre_space} --quiet {extra_input_string}"

if execution_mode == "test":
if execution_mode == "test" and test_query_count > 0:
cmd_suffix += f" \\\n {pre_space} --test_query_count={test_query_count}"

if "bert" in model.lower() and framework == "deepsparse":
Expand All @@ -471,7 +473,7 @@ def mlperf_inference_run_command(spaces, model, implementation, framework, categ

run_cmd = f"""\n
{f_pre_space}```bash
{f_pre_space}cm run script --tags=run-mlperf,inference,_{code_version}{scenario_variation_tag} \\
{f_pre_space}cm run script --tags=run-mlperf,inference,_{code_version}{scenario_variation_tag}{extra_variation_tags} \\
{pre_space} --model={model} \\
{pre_space} --implementation={implementation} \\
{pre_space} --framework={framework} \\
Expand Down

0 comments on commit 4c92e2a

Please sign in to comment.