Browse Source

Fix a bug in the performance correctness test runner

jonathanvdc 8 years ago
parent
commit
cf274a786f
1 changed files with 5 additions and 6 deletions
  1. 5 6
      performance/utils.py

+ 5 - 6
performance/utils.py

@@ -292,22 +292,22 @@ def get_expectation_checks(expected_values):
             # We expect to receive a number of outputs equal to the size of the set, but their
             # order does not matter.
             for _ in xrange(len(expectation)):
-                yield (lambda val: val in expectation), expectation
+                yield lambda val: val in expectation
         elif expectation is None:
             # Skip output value
-            yield (lambda _: True), expectation
+            yield lambda _: True
         else:
-            yield (lambda val: val == expectation), expectation
+            yield lambda val: val == expectation
 
     for expectation in expected_values:
         for checker in get_single_expectation_checks(expectation):
-            yield checker
+            yield checker, expectation
 
 def run_correctness_test(files, parameters, expected, optimization_level):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
        and then compares the output with the expected output. The return value is the total
        run-time of the test."""
-    checks = get_expectation_checks(expected)
+    checks = iter(list(get_expectation_checks(expected)))
     def handle_output(output):
         """Checks the given output against the expected output."""
         try:
@@ -317,7 +317,6 @@ def run_correctness_test(files, parameters, expected, optimization_level):
 
         print("Got %s, expect %s" % (output, expectation))
         assert check(output)
-
         return True
 
     start_time = time.time()