Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve test perfomance granularity2 #86

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 46 additions & 19 deletions testsubmit.py
Original file line number Diff line number Diff line change
Expand Up @@ -779,10 +779,12 @@ def check_file(file_id):
content = response.read(4096)
output(f"{response.status} {response.reason} {content}")

connect.close()

except HTTPError:
return -1
finally:
if connect:
connect.close()

return response.status

Expand Down Expand Up @@ -883,7 +885,6 @@ def do_http(method, resource, data=None):
result = json.loads(content)
if isinstance(result, str):
result = str(response.status) + ': ' + result
connect.close()

except (HTTPError, ValueError) as e:
output("\n***************** HTTP ERROR ******************\n")
Expand All @@ -892,6 +893,10 @@ def do_http(method, resource, data=None):
else:
output(e)
ok = False
finally:
if connect:
connect.close()

return (ok, result)


Expand Down Expand Up @@ -1058,6 +1063,12 @@ def check_performance(lang):
outcome = check_multiple_submissions(job, num_submits, 0)
t1 = perf_counter()
print(f"{num_submits} parallel submits: ", end='')
sys.stdout.flush()

if int(t1 - t0) > 30:
print("TIMEOUT.")
break

if outcome == GOOD_TEST:
rate = int(num_submits / (t1 - t0))
print(f"OK. {rate} jobs/sec")
Expand All @@ -1070,26 +1081,38 @@ def check_performance(lang):
# Backtrack and find the exact limit
lower_limit = num_submits // 2
upper_limit = num_submits
num_bisects = 0
max_bisects = int(ARGS.bisect)

if max_bisects > 0:
while (lower_limit < upper_limit) and (num_bisects < max_bisects) :
num_submits = (lower_limit + upper_limit) // 2
t0 = perf_counter()
outcome = check_multiple_submissions(job, num_submits, 0)
t1 = perf_counter()
print(f"{num_submits} parallel submits: ", end='')
sys.stdout.flush()

if int(t1 - t0) > 30:
print("TIMEOUT.")
num_bisects += 1
upper_limit = num_submits
continue

if outcome == GOOD_TEST:
rate = int(num_submits / (t1 - t0))
print(f"OK. {rate} jobs/sec")
best_rate = max(rate, best_rate)
lower_limit = num_submits
else:
print("FAIL.")
upper_limit = num_submits

while lower_limit < upper_limit - 1:
num_submits = (lower_limit + upper_limit) // 2
t0 = perf_counter()
outcome = check_multiple_submissions(job, num_submits, 0)
t1 = perf_counter()
print(f"{num_submits} parallel submits: ", end='')

if outcome == GOOD_TEST:
rate = int(num_submits / (t1 - t0))
print(f"OK. {rate} jobs/sec")
best_rate = max(rate, best_rate)
lower_limit = num_submits
else:
print("FAIL.")
upper_limit = num_submits
num_bisects += 1


print()
print(f"Maximum burst handled with no errors = {lower_limit} jobs")
print(f"Maximum burst handled with no errors = {lower_limit} jobs at a rate of {rate} jobs/sec")
print(f"\nChecking maximum sustained throughput over {ARGS.window} sec window")
check_sustained_load(lang, best_rate)

Expand Down Expand Up @@ -1118,6 +1141,10 @@ def main():
help='Print extra info during tests')
parser.add_argument('langs', nargs='*',
help='Language(s) to check. One or more of: c cpp python3 java php pascal octave nodejs')
parser.add_argument('-b', '--bisect',
default='5',
help='''Number of bisects do to after a fail to get maximum burst-handling rate..
Default 5. Use only with --perf. A value of 0 disables the bisect backtrack after a failure.''')
parser.add_argument('-w', '--window',
default='30',
help='''The time window in secs over which to measure sustainable throughput.
Expand All @@ -1134,7 +1161,7 @@ def main():
return normal_testing(langs_to_run)
else:
for lang in langs_to_run:
print(f"Measuring performance in {lang}")
print(f"Measuring performance in {lang} with {ARGS.bisect} bisects.")
check_performance(lang)
return 0

Expand Down