Skip to content

Commit

Permalink
Merge pull request #54 from krai/improved_performance_summary
Browse files Browse the repository at this point in the history
A more detailed summary of performance runs available with 'get performance' command
  • Loading branch information
bmsgit1 authored Aug 21, 2024
2 parents c9f08f3 + e0e929c commit b1bca67
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 23 deletions.
51 changes: 38 additions & 13 deletions base_loadgen_experiment/code_axs.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,11 @@ def parse_summary(abs_log_summary_path):
k = k.replace(' ', '_').replace('/', '_').replace('*', '').replace(')', '').replace('(', '')

parsed_summary[k] = to_num_or_not_to_num(v)

return parsed_summary


def beautify_summary(parsed_summary):

ureg = UnitRegistry()

Expand Down Expand Up @@ -42,7 +47,7 @@ def parse_summary(abs_log_summary_path):

v = (v*unit).to_compact()

if v.u == ureg.us:
if v.u == ureg.us:
v.ito(ureg.ms) # Keep everything in milliseconds

kv_with_units[k] = v
Expand All @@ -61,21 +66,41 @@ def parse_summary(abs_log_summary_path):
return beautified_summary


def parse_performance(summary, scenario_performance_map, raw=False):
def calc_latency_cutoff_ratio(parsed_summary):

scenario = parsed_summary["Scenario"]

if scenario == "Server":
return parsed_summary["99.00_percentile_latency_ns"]/parsed_summary["target_latency_ns"]


#returns list of formatted performance metrics (as strings) for given experiment
def parse_performance(beautified_summary, latency_cutoff_ratio, scenario_performance_map, raw=False):

scenario = summary["Scenario"]
validity = summary["Result_is"]
scenario = beautified_summary["Scenario"]
validity = beautified_summary["Result_is"]

if raw and validity == "INVALID":
return None
return None

key_name, multiplier, formatting, units = scenario_performance_map[scenario][validity]
if raw:
return summary[key_name]
else:
formatted_value = ('{:'+formatting+'}').format(summary[key_name]*multiplier)
display_key_name = key_name.replace('_ns', '')
return '{} : {}={}{}'.format(validity, display_key_name, formatted_value, units)
performance_metrics = scenario_performance_map[scenario][validity]
formatted_performance_metrics = ['{}'.format(validity)] # set first element

for key_name in performance_metrics:

if raw:
if key_name == "latency_cutoff_ratio":
formatted_performance_metrics.append(latency_cutoff_ratio)
else:
formatted_performance_metrics.append(beautified_summary[key_name])

else: #no need for multiplier, formatting, units in scenario_performance_map - the beautify_summary function does all of this already
if key_name == "latency_cutoff_ratio":
formatted_performance_metrics.append('{}={:.2f}'.format(key_name, latency_cutoff_ratio))
else:
formatted_performance_metrics.append('{}={}'.format(key_name, beautified_summary[key_name]))

return formatted_performance_metrics


def unpack_accuracy_log(raw_accuracy_log):
Expand Down Expand Up @@ -124,7 +149,7 @@ def guess_command(tags, framework, loadgen_scenario, loadgen_mode, model_name, l
return "axs byquery "+','.join(terms_list)


def validate_accuracy(accuracy_dict, accuracy_range_dict ):
def validate_accuracy(accuracy_dict, accuracy_range_dict):
result_list = []
for key in accuracy_dict:
if key not in accuracy_range_dict:
Expand Down
25 changes: 15 additions & 10 deletions base_loadgen_experiment/data_axs.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,28 +9,32 @@
"rel_log_summary_path": "mlperf_log_summary.txt",
"abs_log_summary_path": [ "^^", "get_path_from", "rel_log_summary_path" ],

"summary": [ "^^", "parse_summary" ],
"parsed_summary": [ "^^", "parse_summary" ],

"beautified_summary": [ "^^", "beautify_summary" ],

"latency_cutoff_ratio": [ "^^", "calc_latency_cutoff_ratio" ],

"scenario_performance_map": {
"Offline": {
"VALID": ["Samples_per_second", 1, ".3f", ""],
"INVALID": ["Samples_per_second", 1, ".3f", ""]
"VALID": ["Samples_per_second", "target_qps"],
"INVALID": ["Samples_per_second", "target_qps"]
},
"SingleStream": {
"VALID": ["_Early_stopping_90th_percentile_estimate", 1e-6, ".3f", " (milliseconds)"],
"INVALID": ["90th_percentile_latency_ns", 1e-6, ".3f", " (milliseconds)"]
"VALID": ["90th_percentile_latency", "_Early_stopping_90th_percentile_estimate"],
"INVALID": ["90th_percentile_latency", "_Early_stopping_90th_percentile_estimate"]
},
"MultiStream": {
"VALID": ["_Early_stopping_99th_percentile_estimate", 1e-6, ".3f", " (milliseconds)"],
"INVALID": ["99th_percentile_latency_ns", 1e-6, ".3f", " (milliseconds)"]
"VALID": ["99th_percentile_latency", "_Early_stopping_99th_percentile_estimate"],
"INVALID": ["99th_percentile_latency", "_Early_stopping_99th_percentile_estimate"]
},
"Server": {
"VALID": ["Scheduled_samples_per_second", 1, ".3f", ""],
"INVALID": ["99.00_percentile_latency_ns", 1e-6, ".3f", " (milliseconds)"]
"VALID": ["target_qps", "99.00_percentile_latency", "target_latency", "latency_cutoff_ratio", "Completed_samples_per_second"],
"INVALID": ["target_qps", "99.00_percentile_latency", "target_latency", "latency_cutoff_ratio", "Completed_samples_per_second"]
}
},

"performance": ["^^", "parse_performance"],
"performance": ["^^", "parse_performance"],

"accuracy_report": [ "^^", "execute", [[
[ "plant", ["accuracy_report", [ "^^", "get", "extract_accuracy_report" ]] ],
Expand All @@ -45,6 +49,7 @@
0,
[ "func", "ufun.load_json" ]
]] ],

"readable_accuracy_log": [ "^^", "unpack_accuracy_log" ],

"accuracy_range_dict": {},
Expand Down

0 comments on commit b1bca67

Please sign in to comment.