diff --git a/testsubmit.py b/testsubmit.py index 5e842d0..280a0b2 100755 --- a/testsubmit.py +++ b/testsubmit.py @@ -779,10 +779,12 @@ def check_file(file_id): content = response.read(4096) output(f"{response.status} {response.reason} {content}") - connect.close() except HTTPError: return -1 + finally: + if connect: + connect.close() return response.status @@ -883,7 +885,6 @@ def do_http(method, resource, data=None): result = json.loads(content) if isinstance(result, str): result = str(response.status) + ': ' + result - connect.close() except (HTTPError, ValueError) as e: output("\n***************** HTTP ERROR ******************\n") @@ -892,6 +893,10 @@ def do_http(method, resource, data=None): else: output(e) ok = False + finally: + if connect: + connect.close() + return (ok, result) @@ -1058,6 +1063,12 @@ def check_performance(lang): outcome = check_multiple_submissions(job, num_submits, 0) t1 = perf_counter() print(f"{num_submits} parallel submits: ", end='') + sys.stdout.flush() + + if int(t1 - t0) > 30: + print("TIMEOUT.") + break + if outcome == GOOD_TEST: rate = int(num_submits / (t1 - t0)) print(f"OK. {rate} jobs/sec") @@ -1070,26 +1081,38 @@ def check_performance(lang): # Backtrack and find the exact limit lower_limit = num_submits // 2 upper_limit = num_submits + num_bisects = 0 + max_bisects = int(ARGS.bisect) + + if max_bisects > 0: + while (lower_limit < upper_limit) and (num_bisects < max_bisects) : + num_submits = (lower_limit + upper_limit) // 2 + t0 = perf_counter() + outcome = check_multiple_submissions(job, num_submits, 0) + t1 = perf_counter() + print(f"{num_submits} parallel submits: ", end='') + sys.stdout.flush() + + if int(t1 - t0) > 30: + print("TIMEOUT.") + num_bisects += 1 + upper_limit = num_submits + continue + + if outcome == GOOD_TEST: + rate = int(num_submits / (t1 - t0)) + print(f"OK. {rate} jobs/sec") + best_rate = max(rate, best_rate) + lower_limit = num_submits + else: + print("FAIL.") + upper_limit = num_submits - while lower_limit < upper_limit - 1: - num_submits = (lower_limit + upper_limit) // 2 - t0 = perf_counter() - outcome = check_multiple_submissions(job, num_submits, 0) - t1 = perf_counter() - print(f"{num_submits} parallel submits: ", end='') - - if outcome == GOOD_TEST: - rate = int(num_submits / (t1 - t0)) - print(f"OK. {rate} jobs/sec") - best_rate = max(rate, best_rate) - lower_limit = num_submits - else: - print("FAIL.") - upper_limit = num_submits + num_bisects += 1 print() - print(f"Maximum burst handled with no errors = {lower_limit} jobs") + print(f"Maximum burst handled with no errors = {lower_limit} jobs at a rate of {rate} jobs/sec") print(f"\nChecking maximum sustained throughput over {ARGS.window} sec window") check_sustained_load(lang, best_rate) @@ -1118,6 +1141,10 @@ def main(): help='Print extra info during tests') parser.add_argument('langs', nargs='*', help='Language(s) to check. One or more of: c cpp python3 java php pascal octave nodejs') + parser.add_argument('-b', '--bisect', + default='5', + help='''Number of bisects do to after a fail to get maximum burst-handling rate.. +Default 5. Use only with --perf. A value of 0 disables the bisect backtrack after a failure.''') parser.add_argument('-w', '--window', default='30', help='''The time window in secs over which to measure sustainable throughput. @@ -1134,7 +1161,7 @@ def main(): return normal_testing(langs_to_run) else: for lang in langs_to_run: - print(f"Measuring performance in {lang}") + print(f"Measuring performance in {lang} with {ARGS.bisect} bisects.") check_performance(lang) return 0