|
@@ -248,8 +248,8 @@ def run_file_fixed_output_count(files, parameters, mode, output_count, optimizat
|
|
|
results = []
|
|
|
def handle_output(output):
|
|
|
"""Appends the given output to the list of results."""
|
|
|
+ results.append(output)
|
|
|
if len(results) < output_count:
|
|
|
- results.append(output)
|
|
|
return True
|
|
|
else:
|
|
|
return False
|
|
@@ -275,7 +275,7 @@ def run_perf_test(files, parameters, optimization_level, n_iterations=1):
|
|
|
for _ in xrange(n_iterations):
|
|
|
start_time = time.time()
|
|
|
test_time = run_file_single_output(
|
|
|
- files, parameters + [0], 'CO',
|
|
|
+ files, parameters, 'CO',
|
|
|
optimization_level)
|
|
|
end_time = time.time()
|
|
|
total_time = end_time - start_time
|
|
@@ -308,17 +308,19 @@ def run_correctness_test(files, parameters, expected, optimization_level):
|
|
|
and then compares the output with the expected output. The return value is the total
|
|
|
run-time of the test."""
|
|
|
checks = iter(list(get_expectation_checks(expected)))
|
|
|
+ next_check = [next(checks)]
|
|
|
def handle_output(output):
|
|
|
"""Checks the given output against the expected output."""
|
|
|
+ check, expectation = next_check[0]
|
|
|
+ print("Got %s, expect %s" % (output, expectation))
|
|
|
+ assert check(output)
|
|
|
+
|
|
|
try:
|
|
|
- check, expectation = next(checks)
|
|
|
+ next_check[0] = next(checks)
|
|
|
+ return True
|
|
|
except StopIteration:
|
|
|
return False
|
|
|
|
|
|
- print("Got %s, expect %s" % (output, expectation))
|
|
|
- assert check(output)
|
|
|
- return True
|
|
|
-
|
|
|
start_time = time.time()
|
|
|
try:
|
|
|
run_file(files, parameters, 'CO', handle_output, optimization_level)
|