diff --git a/main.py b/main.py index ab08233ec..6a607cc10 100755 --- a/main.py +++ b/main.py @@ -140,6 +140,9 @@ def mlperf_inference_implementation_readme(spaces, model, implementation, *, imp # ref to cm installation content += f"{cur_space3}Please refer to the [installation page](site:inference/install/) to install CM for running the automated benchmark commands.\n\n" test_query_count=get_test_query_count(model, implementation, device.lower()) + if device.lower() == "cuda" and execution_env.lower() == "native": + content += f"\n{cur_space3}!!! tip\n\n" + content += f"{cur_space3} - It is advisable to use the commands in the Docker tab for CUDA. Run the below native command only if you are already on a CUDA setup with cuDNN and TensorRT installed.\n\n" if "99.9" not in model: #not showing docker command as it is already done for the 99% variant if implementation == "neuralmagic":