Browse Source

Include legacy interpreter in perf tests

jonathanvdc 8 years ago
parent
commit
391b5dbbbf

+ 2 - 2
performance/code/test_harness.alc

@@ -15,8 +15,8 @@ Void function main():
 	Integer start_time
 	Integer start_time
 	Integer end_time
 	Integer end_time
 	config = input()
 	config = input()
-	if (config == "interpreter"):
-		set_jit_enabled(False)
+	// if (config == "interpreter"):
+		// set_jit_enabled(False)
 
 
 	start_time = time()
 	start_time = time()
 	// HACK: use `call_function` to hide what would otherwise be a direct call to `test_main`
 	// HACK: use `call_function` to hide what would otherwise be a direct call to `test_main`

+ 2 - 7
performance/test_fibonacci.py

@@ -1,14 +1,7 @@
 import unittest
 import unittest
 import utils
 import utils
 
 
-
 class TestFibonacci(unittest.TestCase):
 class TestFibonacci(unittest.TestCase):
-    def test_interpreter_fibonacci(self):
-        self.fibonacci(utils.OPTIMIZATION_LEVEL_INTERPRETER)
-
-    def test_baseline_jit_fibonacci(self):
-        self.fibonacci(utils.OPTIMIZATION_LEVEL_BASELINE_JIT)
-
     def fibonacci(self, optimization_level):
     def fibonacci(self, optimization_level):
         utils.write_perf_to_file(
         utils.write_perf_to_file(
             'fibonacci', optimization_level,
             'fibonacci', optimization_level,
@@ -16,3 +9,5 @@ class TestFibonacci(unittest.TestCase):
                 ["test_harness.alc", "fibonacci.alc", "primitives.alc", "jit.alc"],
                 ["test_harness.alc", "fibonacci.alc", "primitives.alc", "jit.alc"],
                 [20],
                 [20],
                 optimization_level))
                 optimization_level))
+
+utils.define_perf_tests(TestFibonacci, TestFibonacci.fibonacci)

+ 2 - 6
performance/test_matrix_create.py

@@ -3,12 +3,6 @@ import utils
 
 
 
 
 class TestMatrixCreate(unittest.TestCase):
 class TestMatrixCreate(unittest.TestCase):
-    def test_interpreter_matrix_create(self):
-        self.create_matrix(utils.OPTIMIZATION_LEVEL_INTERPRETER)
-
-    def test_baseline_jit_matrix_create(self):
-        self.create_matrix(utils.OPTIMIZATION_LEVEL_BASELINE_JIT)
-
     def create_matrix(self, optimization_level):
     def create_matrix(self, optimization_level):
         utils.write_perf_to_file(
         utils.write_perf_to_file(
             'matrix_create', optimization_level,
             'matrix_create', optimization_level,
@@ -18,3 +12,5 @@ class TestMatrixCreate(unittest.TestCase):
                  "random.alc", "jit.alc"],
                  "random.alc", "jit.alc"],
                 [100],
                 [100],
                 optimization_level))
                 optimization_level))
+
+utils.define_perf_tests(TestMatrixCreate, TestMatrixCreate.create_matrix)

+ 2 - 6
performance/test_matrix_gauss_jordan.py

@@ -3,12 +3,6 @@ import utils
 
 
 
 
 class TestMatrixGaussJordan(unittest.TestCase):
 class TestMatrixGaussJordan(unittest.TestCase):
-    def test_interpreter_matrix_gauss_jordan(self):
-        self.matrix_gauss_jordan(utils.OPTIMIZATION_LEVEL_INTERPRETER)
-
-    def test_baseline_jit_matrix_gauss_jordan(self):
-        self.matrix_gauss_jordan(utils.OPTIMIZATION_LEVEL_BASELINE_JIT)
-
     def matrix_gauss_jordan(self, optimization_level):
     def matrix_gauss_jordan(self, optimization_level):
         utils.write_perf_to_file(
         utils.write_perf_to_file(
             'matrix_gauss_jordan', optimization_level,
             'matrix_gauss_jordan', optimization_level,
@@ -18,3 +12,5 @@ class TestMatrixGaussJordan(unittest.TestCase):
                  "random.alc", "jit.alc"],
                  "random.alc", "jit.alc"],
                 [25],
                 [25],
                 optimization_level))
                 optimization_level))
+
+utils.define_perf_tests(TestMatrixGaussJordan, TestMatrixGaussJordan.matrix_gauss_jordan)

+ 30 - 7
performance/utils.py

@@ -25,8 +25,14 @@ CURRENT_FOLDER_NAME = "performance"
 
 
 PORTS = set()
 PORTS = set()
 
 
+OPTIMIZATION_LEVEL_LEGACY_INTERPRETER = "legacy-interpreter"
 OPTIMIZATION_LEVEL_INTERPRETER = "interpreter"
 OPTIMIZATION_LEVEL_INTERPRETER = "interpreter"
 OPTIMIZATION_LEVEL_BASELINE_JIT = "baseline-jit"
 OPTIMIZATION_LEVEL_BASELINE_JIT = "baseline-jit"
+ALL_OPTIMIZATION_LEVELS = [
+    OPTIMIZATION_LEVEL_LEGACY_INTERPRETER,
+    OPTIMIZATION_LEVEL_INTERPRETER,
+    OPTIMIZATION_LEVEL_BASELINE_JIT
+]
 
 
 class ModelverseTerminated(Exception):
 class ModelverseTerminated(Exception):
     """An exception that tells the user that the Modelverse has terminated."""
     """An exception that tells the user that the Modelverse has terminated."""
@@ -117,7 +123,7 @@ def compile_file(address, mod_filename, filename, mode, proc):
         except UnboundLocalError:
         except UnboundLocalError:
             pass
             pass
 
 
-def run_file(files, parameters, mode, handle_output):
+def run_file(files, parameters, mode, handle_output, optimization_level=None):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
     """Compiles the given sequence of files, feeds them the given input in the given mode,
        and handles their output."""
        and handles their output."""
     # Resolve file
     # Resolve file
@@ -128,7 +134,10 @@ def run_file(files, parameters, mode, handle_output):
     address = "http://127.0.0.1:%i" % port
     address = "http://127.0.0.1:%i" % port
     try:
     try:
         # Run Modelverse server
         # Run Modelverse server
-        proc = execute("run_local_modelverse", [str(port)], wait=False)
+        modelverse_args = [str(port)]
+        if optimization_level is not None:
+            modelverse_args.append('--kernel=%s' % optimization_level)
+        proc = execute("run_local_modelverse", modelverse_args, wait=False)
 
 
         threads = []
         threads = []
         mod_files = []
         mod_files = []
@@ -207,7 +216,7 @@ def run_file_to_completion(files, parameters, mode):
     except ModelverseTerminated:
     except ModelverseTerminated:
         return results
         return results
 
 
-def run_file_fixed_output_count(files, parameters, mode, output_count):
+def run_file_fixed_output_count(files, parameters, mode, output_count, optimization_level=None):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
     """Compiles the given sequence of files, feeds them the given input in the given mode,
        and then collects and returns a fixed number of outputs."""
        and then collects and returns a fixed number of outputs."""
     results = []
     results = []
@@ -219,13 +228,13 @@ def run_file_fixed_output_count(files, parameters, mode, output_count):
         else:
         else:
             return False
             return False
 
 
-    run_file(files, parameters, mode, handle_output)
+    run_file(files, parameters, mode, handle_output, optimization_level)
     return results
     return results
 
 
-def run_file_single_output(files, parameters, mode):
+def run_file_single_output(files, parameters, mode, optimization_level=None):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
     """Compiles the given sequence of files, feeds them the given input in the given mode,
        and then collects and returns a single output."""
        and then collects and returns a single output."""
-    return run_file_fixed_output_count(files, parameters, mode, 1)[0]
+    return run_file_fixed_output_count(files, parameters, mode, 1, optimization_level)[0]
 
 
 def run_perf_test(files, parameters, optimization_level, n_iterations=1):
 def run_perf_test(files, parameters, optimization_level, n_iterations=1):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
     """Compiles the given sequence of files, feeds them the given input in the given mode,
@@ -235,13 +244,27 @@ def run_perf_test(files, parameters, optimization_level, n_iterations=1):
     for _ in xrange(n_iterations):
     for _ in xrange(n_iterations):
         result += float(
         result += float(
             run_file_single_output(
             run_file_single_output(
-                files, [optimization_level] + parameters + [0], 'CO')) / float(n_iterations)
+                files, [optimization_level] + parameters + [0], 'CO',
+                optimization_level)) / float(n_iterations)
     return result
     return result
 
 
 def format_output(output):
 def format_output(output):
     """Formats the output of `run_file_to_completion` as a string."""
     """Formats the output of `run_file_to_completion` as a string."""
     return '\n'.join(output)
     return '\n'.join(output)
 
 
+def define_perf_test(target_class, test_function, optimization_level):
+    """Defines a performance test in the given class. The performance test calls the given function
+       at the given optimization level."""
+    setattr(
+        target_class,
+        'test_%s' % optimization_level.replace('-', '_').lower(),
+        lambda self: test_function(self, optimization_level))
+
+def define_perf_tests(target_class, test_function):
+    """Defines performance tests in the given class. Each test calls the given function."""
+    for optimization_level in ALL_OPTIMIZATION_LEVELS:
+        define_perf_test(target_class, test_function, optimization_level)
+
 DEFAULT_PERF_FILE_NAME = 'perf_data.txt'
 DEFAULT_PERF_FILE_NAME = 'perf_data.txt'
 
 
 def write_perf_to_file(test_name, optimization_level, result, file_name=DEFAULT_PERF_FILE_NAME):
 def write_perf_to_file(test_name, optimization_level, result, file_name=DEFAULT_PERF_FILE_NAME):