From 8ab622534141d2e3184e17e72e74deb64d811778 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Mon, 30 Sep 2024 11:49:29 +0530 Subject: [PATCH 1/2] initial commit for tip - native run CUDA --- main.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/main.py b/main.py index ab08233ec..cb2316ad2 100755 --- a/main.py +++ b/main.py @@ -140,6 +140,9 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp # ref to cm installation content += f"{cur_space3}Please refer to the [installation page](site:inference/install/) to install CM for running the automated benchmark commands.\n\n" test_query_count=get_test_query_count(model, implementation, device.lower()) + if device.lower() == "cuda" and execution_env.lower() == "native": + content += f"\n{cur_space3}!!! tip\n\n" + content += f"{cur_space3} - Unless CUDA, cuDNN and TensorRT are available in the environment it is recommended to use the Docker option.\n\n" if "99.9" not in model: #not showing docker command as it is already done for the 99% variant if implementation == "neuralmagic": From 54d8585a175362923f748271213f003c5e24e652 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 30 Sep 2024 16:28:23 +0530 Subject: [PATCH 2/2] Updated tip --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index cb2316ad2..6a607cc10 100755 --- a/main.py +++ b/main.py @@ -142,7 +142,7 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp test_query_count=get_test_query_count(model, implementation, device.lower()) if device.lower() == "cuda" and execution_env.lower() == "native": content += f"\n{cur_space3}!!! tip\n\n" - content += f"{cur_space3} - Unless CUDA, cuDNN and TensorRT are available in the environment it is recommended to use the Docker option.\n\n" + content += f"{cur_space3} - It is advisable to use the commands in the Docker tab for CUDA. Run the below native command only if you are already on a CUDA setup with cuDNN and TensorRT installed.\n\n" if "99.9" not in model: #not showing docker command as it is already done for the 99% variant if implementation == "neuralmagic":