Browse Source

Measure compile time for a bunch of perf tests

jonathanvdc 8 years ago
parent
commit
a385c96b8b

+ 1 - 1
performance/test_mvc_model_overwrite.py

@@ -22,7 +22,7 @@ all_files = [
 
 class TestMvCModelOverwrite(unittest.TestCase):
     def model_overwrite(self, optimization_level):
-        utils.write_total_runtime_to_file(
+        utils.write_perf_to_file(
             'mvc_model_overwrite', optimization_level,
             utils.run_correctness_test(all_files, [
                 "root",

+ 1 - 1
performance/test_mvc_print_upload.py

@@ -22,7 +22,7 @@ all_files = [
 
 class TestMvCPrintUpload(unittest.TestCase):
     def print_upload(self, optimization_level):
-        utils.write_total_runtime_to_file(
+        utils.write_perf_to_file(
             'mvc_print_upload', optimization_level,
             utils.run_correctness_test(all_files, [
                 "root",

+ 7 - 2
performance/test_mvc_simulate.py

@@ -22,7 +22,7 @@ all_files = [
 
 class TestMvCSimulate(unittest.TestCase):
     def transform_add_MT_pn_simulate(self, optimization_level):
-        utils.write_total_runtime_to_file(
+        utils.write_perf_to_file(
             'mvc_simulate', optimization_level,
             utils.run_correctness_test(all_files,
             [ "root", "root", "root",
@@ -331,8 +331,13 @@ utils.define_perf_tests(
     TestMvCSimulate,
     TestMvCSimulate.transform_add_MT_pn_simulate,
     optimization_levels=[
+        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS,
         utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
         utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS,
+        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS,
         utils.OPTIMIZATION_LEVEL_BASELINE_JIT,
-        utils.OPTIMIZATION_LEVEL_FAST_JIT
+        utils.OPTIMIZATION_LEVEL_FAST_JIT,
+        utils.OPTIMIZATION_LEVEL_BYTECODE_INTERPRETER,
+        utils.OPTIMIZATION_LEVEL_INTERPRETER,
+        utils.OPTIMIZATION_LEVEL_LEGACY_INTERPRETER
     ])

+ 1 - 1
performance/test_mvc_simulate_larger.py

@@ -46,7 +46,7 @@ class TestMvCSimulateLarger(unittest.TestCase):
                 "Ready for command...",
             ]
 
-        utils.write_total_runtime_to_file(
+        utils.write_perf_to_file(
             'mvc_large', optimization_level,
             utils.run_correctness_test(all_files,
             ["root", "root", "root",

+ 14 - 4
performance/utils.py

@@ -337,8 +337,8 @@ def get_expectation_checks(expected_values):
 
 def run_correctness_test(files, parameters, expected, optimization_level):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
-       and then compares the output with the expected output. The return value is the total
-       run-time of the test."""
+       and then compares the output with the expected output. The return value is a dictionary
+       of measured quantities."""
     checks = iter(list(get_expectation_checks(expected)))
     next_check = [next(checks)]
     def handle_output(output):
@@ -353,13 +353,23 @@ def run_correctness_test(files, parameters, expected, optimization_level):
         except StopIteration:
             return False
 
+    timing_log = tempfile.mktemp()
     start_time = time.time()
     try:
-        run_file(files, parameters, 'CO', handle_output, optimization_level)
+        run_file(files, parameters, 'CO', handle_output, optimization_level, timing_log)
+        with open(timing_log, 'r') as log_file:
+            parsed_times = parse_jit_timing_log(log_file)
+
+        compile_time = sum([data for _, data in parsed_times])
     except ModelverseTerminated:
         return
+    finally:
+        os.remove(timing_log)
     end_time = time.time()
-    return end_time - start_time
+    return {
+        TOTAL_TIME_QUANTITY: end_time - start_time,
+        COMPILE_TIME_QUANTITY: compile_time
+    }
 
 def format_output(output):
     """Formats the output of `run_file_to_completion` as a string."""