Skip to content

Commit

Permalink
Update benchmark (#34)
Browse files Browse the repository at this point in the history
* Update benchmark

* Auto-format by https://ultralytics.com/actions

---------

Co-authored-by: UltralyticsAssistant <[email protected]>
  • Loading branch information
initialencounter and UltralyticsAssistant authored Nov 1, 2024
1 parent 8b1c808 commit 474d5c0
Showing 1 changed file with 37 additions and 9 deletions.
46 changes: 37 additions & 9 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,17 @@ def transform_and_check(self, name, filename, transformation_func, suffix, check
return summarize_model(output_file, suffix)
return None

def run_model_test(self, name, filename, check_func=None):
summary_list = [summarize_model(filename)]
summary_list.append(self.transform_and_check(name, filename, bench_onnxslim, "onnxslim", check_func))
summary_list.append(self.transform_and_check(name, filename, bench_onnxsim, "onnxsim", check_func))
summary_list.append(self.transform_and_check(name, filename, bench_polygraphy, "polygraphy", check_func))

summary_list = [summary for summary in summary_list if summary is not None]

print()
print_model_info_as_table(name, summary_list)

def test_silero_vad(self, request):
def check_model_inference(model_path):
batch_size = 2
Expand All @@ -63,18 +74,35 @@ def check_model_inference(model_path):

name = request.node.originalname[len("test_") :]
filename = f"{MODELZOO_PATH}/{name}/{name}.onnx"
self.run_model_test(name, filename, check_model_inference)

summary_list = [summarize_model(filename)]
summary_list.append(self.transform_and_check(name, filename, bench_onnxslim, "onnxslim", check_model_inference))
summary_list.append(self.transform_and_check(name, filename, bench_onnxsim, "onnxsim", check_model_inference))
summary_list.append(
self.transform_and_check(name, filename, bench_polygraphy, "polygraphy", check_model_inference)
)
def test_decoder_with_past_model(self, request):
def check_model_inference(model_path):
batch_size = 2
input_ids = np.ones((batch_size, 256), dtype=np.int64)
encoder_hidden_states = np.zeros((batch_size, 128, 16), dtype=np.float32)

summary_list = [summary for summary in summary_list if summary is not None]
ort_sess = ort.InferenceSession(model_path)
ort_sess.run(None, {"input_ids": input_ids, "encoder_hidden_states": encoder_hidden_states})

print()
print_model_info_as_table(request.node.name, summary_list)
name = request.node.originalname[len("test_") :]
filename = f"{MODELZOO_PATH}/{name}/{name}.onnx"
self.run_model_test(name, filename, check_model_inference)

def test_tiny_en_decoder(self, request):
name = request.node.originalname[len("test_") :]
filename = f"{MODELZOO_PATH}/{name}/{name}.onnx"
self.run_model_test(name, filename)

def test_transformer_encoder(self, request):
name = request.node.originalname[len("test_") :]
filename = f"{MODELZOO_PATH}/{name}/{name}.onnx"
self.run_model_test(name, filename)

def test_uiex(self, request):
name = request.node.originalname[len("test_") :]
filename = f"{MODELZOO_PATH}/{name}/{name}.onnx"
self.run_model_test(name, filename)


if __name__ == "__main__":
Expand Down

0 comments on commit 474d5c0

Please sign in to comment.