Pārlūkot izejas kodu

Merge branch 'jit' of msdl.uantwerpen.be:jonathanvdc/modelverse into testing

Yentl Van Tendeloo 8 gadi atpakaļ
vecāks
revīzija
6906f9bf61

+ 13 - 3
hybrid_server/classes/mvkcontroller.xml

@@ -34,12 +34,18 @@
                     self.mvk.jit.enable_source_maps(False)
                     self.mvk.jit.enable_source_maps(False)
                 elif opt == 'source-maps':
                 elif opt == 'source-maps':
                     self.mvk.jit.enable_source_maps()
                     self.mvk.jit.enable_source_maps()
+                elif opt == 'no-insert-nops':
+                    self.mvk.jit.enable_nop_insertion(False)
+                elif opt == 'insert-nops':
+                    self.mvk.jit.enable_nop_insertion()
                 elif opt == 'trace':
                 elif opt == 'trace':
                     self.mvk.jit.enable_tracing()
                     self.mvk.jit.enable_tracing()
-                elif opt == 'fast-jit':
-                    self.mvk.jit.set_function_body_compiler(jit.compile_function_body_fast)
+                elif opt == 'bytecode-interpreter':
+                    self.mvk.jit.set_function_body_compiler(jit.compile_function_body_interpret)
                 elif opt == 'baseline-jit':
                 elif opt == 'baseline-jit':
                     self.mvk.jit.set_function_body_compiler(jit.compile_function_body_baseline)
                     self.mvk.jit.set_function_body_compiler(jit.compile_function_body_baseline)
+                elif opt == 'fast-jit':
+                    self.mvk.jit.set_function_body_compiler(jit.compile_function_body_fast)
                 elif opt == 'adaptive-jit-favor-large-functions':
                 elif opt == 'adaptive-jit-favor-large-functions':
                     self.mvk.jit.set_function_body_compiler(
                     self.mvk.jit.set_function_body_compiler(
                             lambda *args: jit.compile_function_body_adaptive(
                             lambda *args: jit.compile_function_body_adaptive(
@@ -48,10 +54,14 @@
                     self.mvk.jit.set_function_body_compiler(
                     self.mvk.jit.set_function_body_compiler(
                         lambda *args: jit.compile_function_body_adaptive(
                         lambda *args: jit.compile_function_body_adaptive(
                             *args, temperature_heuristic=jit.favor_small_functions))
                             *args, temperature_heuristic=jit.favor_small_functions))
-                elif opt == 'adaptive-jit' or opt == 'adaptive-jit-favor-loops':
+                elif opt == 'adaptive-jit-favor-loops':
                     self.mvk.jit.set_function_body_compiler(
                     self.mvk.jit.set_function_body_compiler(
                         lambda *args: jit.compile_function_body_adaptive(
                         lambda *args: jit.compile_function_body_adaptive(
                             *args, temperature_heuristic=jit.favor_loops))
                             *args, temperature_heuristic=jit.favor_loops))
+                elif opt == 'adaptive-jit' or opt == 'adaptive-jit-favor-small-loops':
+                    self.mvk.jit.set_function_body_compiler(
+                        lambda *args: jit.compile_function_body_adaptive(
+                            *args, temperature_heuristic=jit.favor_small_loops))
                 else:
                 else:
                     print("warning: unknown kernel option '%s'." % opt)
                     print("warning: unknown kernel option '%s'." % opt)
 
 

+ 317 - 0
kernel/modelverse_jit/bytecode_interpreter.py

@@ -0,0 +1,317 @@
+"""Interprets parsed bytecode graphs."""
+
+import modelverse_jit.bytecode_ir as bytecode_ir
+import modelverse_jit.runtime as jit_runtime
+import modelverse_kernel.primitives as primitive_functions
+
+class BreakException(Exception):
+    """A type of exception that is used to interpret 'break' instructions:
+       the 'break' instructions throw a BreakException, which is then handled
+       by the appropriate 'while' instruction."""
+    def __init__(self, loop):
+        Exception.__init__(self)
+        self.loop = loop
+
+class ContinueException(Exception):
+    """A type of exception that is used to interpret 'continue' instructions:
+       the 'continue' instructions throw a ContinueException, which is then handled
+       by the appropriate 'while' instruction."""
+    def __init__(self, loop):
+        Exception.__init__(self)
+        self.loop = loop
+
+class InterpreterState(object):
+    """The state of the bytecode interpreter."""
+    def __init__(self, gc_root_node, keyword_arg_dict, nop_period=20):
+        self.gc_root_node = gc_root_node
+        self.nop_period = nop_period
+        self.keyword_arg_dict = keyword_arg_dict
+        self.current_result = None
+        self.nop_phase = 0
+        self.local_vars = {}
+
+    def import_local(self, node_id, value):
+        """Imports the given value as a local in this interpreter state."""
+        local_node, = yield [("CN", [])]
+        yield [
+            ("CE", [self.gc_root_node, local_node]),
+            ("CD", [local_node, "value", value])]
+        self.local_vars[node_id] = local_node
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def schedule_nop(self):
+        """Increments the nop-phase. If a nop should be performed, then True is returned.
+           Otherwise, False."""
+        self.nop_phase += 1
+        if self.nop_phase == self.nop_period:
+            self.nop_phase = 0
+            return True
+        else:
+            return False
+
+    def update_result(self, new_result):
+        """Sets the current result to the given value, if it is not None."""
+        if new_result is not None:
+            self.current_result = new_result
+
+    def get_task_root(self):
+        """Gets the task root node id."""
+        return self.keyword_arg_dict['task_root']
+
+    def get_kernel(self):
+        """Gets the Modelverse kernel instance."""
+        return self.keyword_arg_dict['mvk']
+
+    def interpret(self, instruction):
+        """Interprets the given instruction and returns the current result."""
+        instruction_type = type(instruction)
+        if instruction_type in InterpreterState.INTERPRETERS:
+            # Interpret the instruction.
+            yield [("CALL_ARGS",
+                    [InterpreterState.INTERPRETERS[instruction_type], (self, instruction)])]
+
+            # Maybe perform a nop.
+            if self.schedule_nop():
+                yield None
+
+            # Interpret the next instruction.
+            next_instruction = instruction.next_instruction
+            if next_instruction is not None:
+                yield [("TAIL_CALL_ARGS", [self.interpret, (next_instruction,)])]
+            else:
+                raise primitive_functions.PrimitiveFinished(self.current_result)
+        else:
+            raise jit_runtime.JitCompilationFailedException(
+                'Unknown bytecode instruction: %r' % instruction)
+
+    def interpret_select(self, instruction):
+        """Interprets the given 'select' instruction."""
+        cond_node, = yield [("CALL_ARGS", [self.interpret, (instruction.condition,)])]
+        cond_val, = yield [("RV", [cond_node])]
+        if cond_val:
+            yield [("TAIL_CALL_ARGS", [self.interpret, (instruction.if_clause,)])]
+        elif instruction.else_clause is not None:
+            yield [("TAIL_CALL_ARGS", [self.interpret, (instruction.else_clause,)])]
+        else:
+            raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_while(self, instruction):
+        """Interprets the given 'while' instruction."""
+        def __handle_break(exception):
+            if exception.loop == instruction:
+                # End the loop.
+                raise primitive_functions.PrimitiveFinished(None)
+            else:
+                # Propagate the exception to the next 'while' loop.
+                raise exception
+
+        def __handle_continue(exception):
+            if exception.loop == instruction:
+                # Restart the loop.
+                yield [("TAIL_CALL_ARGS", [self.interpret, (instruction,)])]
+            else:
+                # Propagate the exception to the next 'while' loop.
+                raise exception
+
+        yield [("TRY", [])]
+        yield [("CATCH", [BreakException, __handle_break])]
+        yield [("CATCH", [ContinueException, __handle_continue])]
+        while 1:
+            cond_node, = yield [("CALL_ARGS", [self.interpret, (instruction.condition,)])]
+            cond_val, = yield [("RV", [cond_node])]
+            if cond_val:
+                yield [("CALL_ARGS", [self.interpret, (instruction.body,)])]
+            else:
+                break
+        yield [("END_TRY", [])]
+
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_break(self, instruction):
+        """Interprets the given 'break' instruction."""
+        raise BreakException(instruction.loop)
+
+    def interpret_continue(self, instruction):
+        """Interprets the given 'continue' instruction."""
+        raise ContinueException(instruction.loop)
+
+    def interpret_return(self, instruction):
+        """Interprets the given 'return' instruction."""
+        if instruction.value is None:
+            raise primitive_functions.InterpretedFunctionFinished(None)
+        else:
+            return_node, = yield [("CALL_ARGS", [self.interpret, (instruction.value,)])]
+            raise primitive_functions.InterpretedFunctionFinished(return_node)
+
+    def interpret_call(self, instruction):
+        """Interprets the given 'call' instruction."""
+        target, = yield [("CALL_ARGS", [self.interpret, (instruction.target,)])]
+        named_args = {}
+        for name, arg_instruction in instruction.argument_list:
+            arg, = yield [("CALL_ARGS", [self.interpret, (arg_instruction,)])]
+            named_args[name] = arg
+
+        kwargs = {'function_id': target, 'named_arguments': named_args}
+        kwargs.update(self.keyword_arg_dict)
+        result, = yield [("CALL_KWARGS", [jit_runtime.call_function, kwargs])]
+        if result is not None:
+            yield [("CE", [self.gc_root_node, result])]
+            self.update_result(result)
+
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_constant(self, instruction):
+        """Interprets the given 'constant' instruction."""
+        self.update_result(instruction.constant_id)
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_input(self, instruction):
+        """Interprets the given 'input' instruction."""
+        result, = yield [("CALL_KWARGS", [jit_runtime.get_input, self.keyword_arg_dict])]
+        self.update_result(result)
+        yield [("CE", [self.gc_root_node, result])]
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_output(self, instruction):
+        """Interprets the given 'output' instruction."""
+        output_value, = yield [("CALL_ARGS", [self.interpret, (instruction.value,)])]
+        task_root = self.get_task_root()
+        last_output, last_output_link, new_last_output = yield [
+            ("RD", [task_root, "last_output"]),
+            ("RDE", [task_root, "last_output"]),
+            ("CN", [])
+        ]
+        yield [
+            ("CD", [last_output, "value", output_value]),
+            ("CD", [last_output, "next", new_last_output]),
+            ("CD", [task_root, "last_output", new_last_output]),
+            ("DE", [last_output_link])
+        ]
+        yield None
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_declare(self, instruction):
+        """Interprets a 'declare' (local) instruction."""
+        node_id = instruction.variable.node_id
+        if node_id in self.local_vars:
+            self.update_result(self.local_vars[node_id])
+            raise primitive_functions.PrimitiveFinished(None)
+        else:
+            local_node, = yield [("CN", [])]
+            yield [("CE", [self.gc_root_node, local_node])]
+            self.update_result(local_node)
+            self.local_vars[node_id] = local_node
+            raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_global(self, instruction):
+        """Interprets a (declare) 'global' instruction."""
+        var_name = instruction.variable.name
+        task_root = self.get_task_root()
+        _globals, = yield [("RD", [task_root, "globals"])]
+        global_var, = yield [("RD", [_globals, var_name])]
+
+        if global_var is None:
+            global_var, = yield [("CN", [])]
+            yield [("CD", [_globals, var_name, global_var])]
+
+        self.update_result(global_var)
+        yield [("CE", [self.gc_root_node, global_var])]
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_resolve(self, instruction):
+        """Interprets a 'resolve' instruction."""
+        node_id = instruction.variable.node_id
+        if node_id in self.local_vars:
+            self.update_result(self.local_vars[node_id])
+            raise primitive_functions.PrimitiveFinished(None)
+        else:
+            task_root = self.get_task_root()
+            var_name = instruction.variable.name
+            _globals, = yield [("RD", [task_root, "globals"])]
+            global_var, = yield [("RD", [_globals, var_name])]
+
+            if global_var is None:
+                raise Exception(jit_runtime.GLOBAL_NOT_FOUND_MESSAGE_FORMAT % var_name)
+
+            mvk = self.get_kernel()
+            if mvk.suggest_function_names and mvk.jit.get_global_body_id(var_name) is None:
+                global_val, = yield [("RD", [global_var, "value"])]
+                if global_val is not None:
+                    func_body, = yield [("RD", [global_val, "body"])]
+                    if func_body is not None:
+                        mvk.jit.register_global(func_body, var_name)
+
+            self.update_result(global_var)
+            yield [("CE", [self.gc_root_node, global_var])]
+            raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_access(self, instruction):
+        """Interprets an 'access' instruction."""
+        pointer_node, = yield [("CALL_ARGS", [self.interpret, (instruction.pointer,)])]
+        value_node, = yield [("RD", [pointer_node, "value"])]
+        self.update_result(value_node)
+        yield [("CE", [self.gc_root_node, value_node])]
+        raise primitive_functions.PrimitiveFinished(None)
+
+    def interpret_assign(self, instruction):
+        """Interprets an 'assign' instruction."""
+        pointer_node, = yield [("CALL_ARGS", [self.interpret, (instruction.pointer,)])]
+        value_node, = yield [("CALL_ARGS", [self.interpret, (instruction.value,)])]
+        value_link, = yield [("RDE", [pointer_node, "value"])]
+        yield [
+            ("CD", [pointer_node, "value", value_node]),
+            ("DE", [value_link])]
+        raise primitive_functions.PrimitiveFinished(None)
+
+    INTERPRETERS = {
+        bytecode_ir.SelectInstruction: interpret_select,
+        bytecode_ir.WhileInstruction: interpret_while,
+        bytecode_ir.BreakInstruction: interpret_break,
+        bytecode_ir.ContinueInstruction: interpret_continue,
+        bytecode_ir.ReturnInstruction: interpret_return,
+        bytecode_ir.CallInstruction: interpret_call,
+        bytecode_ir.ConstantInstruction: interpret_constant,
+        bytecode_ir.InputInstruction: interpret_input,
+        bytecode_ir.OutputInstruction: interpret_output,
+        bytecode_ir.DeclareInstruction: interpret_declare,
+        bytecode_ir.GlobalInstruction: interpret_global,
+        bytecode_ir.ResolveInstruction: interpret_resolve,
+        bytecode_ir.AccessInstruction: interpret_access,
+        bytecode_ir.AssignInstruction: interpret_assign
+    }
+
+def interpret_bytecode_function(function_name, body_bytecode, local_arguments, keyword_arguments):
+    """Interprets the bytecode function with the given name, body, named arguments and
+       keyword arguments."""
+    yield [("DEBUG_INFO", [function_name, None, jit_runtime.BYTECODE_INTERPRETER_ORIGIN_NAME])]
+    task_root = keyword_arguments['task_root']
+    gc_root_node, = yield [("CN", [])]
+    gc_root_edge, = yield [("CE", [task_root, gc_root_node])]
+    interpreter = InterpreterState(gc_root_node, keyword_arguments)
+    for param_id, arg_node in local_arguments.items():
+        yield [("CALL_ARGS", [interpreter.import_local, (param_id, arg_node)])]
+
+    def __handle_return(exception):
+        yield [("DE", [gc_root_edge])]
+        raise primitive_functions.PrimitiveFinished(exception.result)
+
+    def __handle_break(_):
+        raise jit_runtime.UnreachableCodeException(
+            "Function '%s' tries to break out of a loop that is not currently executing." %
+            function_name)
+
+    def __handle_continue(_):
+        raise jit_runtime.UnreachableCodeException(
+            "Function '%s' tries to continue a loop that is not currently executing." %
+            function_name)
+
+    # Perform a nop before interpreting the function.
+    yield None
+
+    yield [("TRY", [])]
+    yield [("CATCH", [primitive_functions.InterpretedFunctionFinished, __handle_return])]
+    yield [("CATCH", [BreakException, __handle_break])]
+    yield [("CATCH", [ContinueException, __handle_continue])]
+    yield [("CALL_ARGS", [interpreter.interpret, (body_bytecode,)])]
+    yield [("END_TRY", [])]
+    raise jit_runtime.UnreachableCodeException("Function '%s' failed to return." % function_name)

+ 8 - 6
kernel/modelverse_jit/bytecode_ir.py

@@ -11,9 +11,9 @@ class Instruction(object):
            next instruction."""
            next instruction."""
         raise NotImplementedError()
         raise NotImplementedError()
 
 
-    def get_reachable(self):
+    def get_reachable(self, filter_children=lambda _: True):
         """Gets the set of all instructions that are reachable from the given instruction, including
         """Gets the set of all instructions that are reachable from the given instruction, including
-           this instruction."""
+           this instruction. A function can be used to filter out certain instructions' children."""
         results = set()
         results = set()
         stack = [self]
         stack = [self]
         while len(stack) > 0:
         while len(stack) > 0:
@@ -22,10 +22,12 @@ class Instruction(object):
             next_instr = instr.next_instruction
             next_instr = instr.next_instruction
             if next_instr is not None and next_instr not in results:
             if next_instr is not None and next_instr not in results:
                 stack.append(next_instr)
                 stack.append(next_instr)
-            for other in instr.get_directly_reachable():
-                if other not in results:
-                    assert other is not None
-                    stack.append(other)
+
+            if filter_children(instr):
+                for other in instr.get_directly_reachable():
+                    if other not in results:
+                        assert other is not None
+                        stack.append(other)
 
 
         return results
         return results
 
 

+ 9 - 8
kernel/modelverse_jit/bytecode_to_tree.py

@@ -233,6 +233,14 @@ def create_indirect_call(target, argument_list):
         [('function_id', target), ('named_arguments', dict_literal)],
         [('function_id', target), ('named_arguments', dict_literal)],
         tree_ir.LoadLocalInstruction(jit_runtime.KWARGS_PARAMETER_NAME))
         tree_ir.LoadLocalInstruction(jit_runtime.KWARGS_PARAMETER_NAME))
 
 
+def create_return(return_value):
+    """Creates a return statement that returns the given return value."""
+    return tree_ir.ReturnInstruction(
+        tree_ir.CompoundInstruction(
+            return_value,
+            tree_ir.DeleteEdgeInstruction(
+                tree_ir.LoadLocalInstruction(jit_runtime.LOCALS_EDGE_NAME))))
+
 def with_debug_info_trace(instruction, debug_info, function_name):
 def with_debug_info_trace(instruction, debug_info, function_name):
     """Prepends the given instruction with a tracing instruction that prints
     """Prepends the given instruction with a tracing instruction that prints
        the given debug information and function name."""
        the given debug information and function name."""
@@ -340,13 +348,6 @@ class AnalysisState(object):
 
 
     def analyze_return(self, instruction):
     def analyze_return(self, instruction):
         """Tries to analyze the given 'return' instruction."""
         """Tries to analyze the given 'return' instruction."""
-        def create_return(return_value):
-            return tree_ir.ReturnInstruction(
-                tree_ir.CompoundInstruction(
-                    return_value,
-                    tree_ir.DeleteEdgeInstruction(
-                        tree_ir.LoadLocalInstruction(jit_runtime.LOCALS_EDGE_NAME))))
-
         if instruction.value is None:
         if instruction.value is None:
             raise primitive_functions.PrimitiveFinished(
             raise primitive_functions.PrimitiveFinished(
                 create_return(
                 create_return(
@@ -499,7 +500,7 @@ class AnalysisState(object):
         # lines of:
         # lines of:
         #
         #
         #     _globals, = yield [("RD", [task_root, "globals"])]
         #     _globals, = yield [("RD", [task_root, "globals"])]
-        #     global_var = yield [("RD", [_globals, var_name])]
+        #     global_var, = yield [("RD", [_globals, var_name])]
         #
         #
         #     if global_var is None:
         #     if global_var is None:
         #         global_var, = yield [("CN", [])]
         #         global_var, = yield [("CN", [])]

+ 2 - 0
kernel/modelverse_jit/cfg_optimization.py

@@ -522,6 +522,7 @@ def optimize(entry_point, jit):
     optimize_graph_flow(entry_point)
     optimize_graph_flow(entry_point)
     elide_local_checks(entry_point)
     elide_local_checks(entry_point)
     optimize_graph_flow(entry_point)
     optimize_graph_flow(entry_point)
+    merge_blocks(entry_point)
     eliminate_trivial_phis(entry_point)
     eliminate_trivial_phis(entry_point)
     entry_point = cfg_ssa_construction.construct_ssa_form(entry_point)
     entry_point = cfg_ssa_construction.construct_ssa_form(entry_point)
     if jit.direct_calls_allowed:
     if jit.direct_calls_allowed:
@@ -536,6 +537,7 @@ def optimize(entry_point, jit):
     expand_indirect_definitions(entry_point)
     expand_indirect_definitions(entry_point)
     eliminate_unused_definitions(entry_point)
     eliminate_unused_definitions(entry_point)
     merge_blocks(entry_point)
     merge_blocks(entry_point)
+    expand_indirect_definitions(entry_point)
     protect_from_gc(entry_point)
     protect_from_gc(entry_point)
     elide_gc_protects(entry_point)
     elide_gc_protects(entry_point)
     eliminate_unused_definitions(entry_point)
     eliminate_unused_definitions(entry_point)

+ 4 - 5
kernel/modelverse_jit/cfg_to_tree.py

@@ -83,16 +83,15 @@ class MultipleBlock(object):
 
 
     def lower_handled_blocks(self, state):
     def lower_handled_blocks(self, state):
         """Lowers the handled blocks of this 'multiple' block to a tree."""
         """Lowers the handled blocks of this 'multiple' block to a tree."""
-        result = tree_ir.EmptyInstruction()
+        cond_clause_pairs = []
         for entry, block in self.handled_blocks:
         for entry, block in self.handled_blocks:
-            result = tree_ir.SelectInstruction(
+            cond_clause_pairs.append((
                 tree_ir.BinaryInstruction(
                 tree_ir.BinaryInstruction(
                     tree_ir.LoadLocalInstruction(state.label_variable),
                     tree_ir.LoadLocalInstruction(state.label_variable),
                     '==',
                     '==',
                     tree_ir.LiteralInstruction(entry.index)),
                     tree_ir.LiteralInstruction(entry.index)),
-                block.lower(state),
-                result)
-        return result
+                block.lower(state)))
+        return tree_ir.SwitchInstruction(cond_clause_pairs)
 
 
     def lower(self, state):
     def lower(self, state):
         """Lowers this 'multiple' block to a tree."""
         """Lowers this 'multiple' block to a tree."""

+ 211 - 95
kernel/modelverse_jit/jit.py

@@ -1,3 +1,4 @@
+import math
 import keyword
 import keyword
 from collections import defaultdict
 from collections import defaultdict
 import modelverse_kernel.primitives as primitive_functions
 import modelverse_kernel.primitives as primitive_functions
@@ -5,6 +6,7 @@ import modelverse_jit.bytecode_parser as bytecode_parser
 import modelverse_jit.bytecode_to_tree as bytecode_to_tree
 import modelverse_jit.bytecode_to_tree as bytecode_to_tree
 import modelverse_jit.bytecode_to_cfg as bytecode_to_cfg
 import modelverse_jit.bytecode_to_cfg as bytecode_to_cfg
 import modelverse_jit.bytecode_ir as bytecode_ir
 import modelverse_jit.bytecode_ir as bytecode_ir
+import modelverse_jit.bytecode_interpreter as bytecode_interpreter
 import modelverse_jit.cfg_optimization as cfg_optimization
 import modelverse_jit.cfg_optimization as cfg_optimization
 import modelverse_jit.cfg_to_tree as cfg_to_tree
 import modelverse_jit.cfg_to_tree as cfg_to_tree
 import modelverse_jit.cfg_ir as cfg_ir
 import modelverse_jit.cfg_ir as cfg_ir
@@ -60,7 +62,8 @@ def create_bare_function(function_name, parameter_list, function_body):
 
 
 def create_function(
 def create_function(
         function_name, parameter_list, param_dict,
         function_name, parameter_list, param_dict,
-        body_param_dict, function_body, source_map_name=None):
+        body_param_dict, function_body, source_map_name=None,
+        compatible_temporary_protects=False):
     """Creates a function from the given function name, parameter list,
     """Creates a function from the given function name, parameter list,
        variable-to-parameter name map, variable-to-local name map and
        variable-to-parameter name map, variable-to-local name map and
        function body. An optional source map can be included, too."""
        function body. An optional source map can be included, too."""
@@ -99,7 +102,9 @@ def create_function(
 
 
     # Shield temporaries from the GC.
     # Shield temporaries from the GC.
     constructed_body = tree_ir.protect_temporaries_from_gc(
     constructed_body = tree_ir.protect_temporaries_from_gc(
-        constructed_body, tree_ir.LoadLocalInstruction(jit_runtime.LOCALS_NODE_NAME))
+        constructed_body,
+        tree_ir.LoadLocalInstruction(jit_runtime.LOCALS_NODE_NAME),
+        compatible_temporary_protects)
 
 
     return create_bare_function(function_name, parameter_list, constructed_body)
     return create_bare_function(function_name, parameter_list, constructed_body)
 
 
@@ -514,16 +519,14 @@ class ModelverseJit(object):
             raise JitCompilationFailedException(
             raise JitCompilationFailedException(
                 "Function was marked '%s'." % jit_runtime.MUTABLE_FUNCTION_KEY)
                 "Function was marked '%s'." % jit_runtime.MUTABLE_FUNCTION_KEY)
 
 
-        constructed_function, = yield [
+        compiled_function, = yield [
             ("CALL_ARGS", [compile_function_body, (self, function_name, body_id, task_root)])]
             ("CALL_ARGS", [compile_function_body, (self, function_name, body_id, task_root)])]
 
 
         yield [("END_TRY", [])]
         yield [("END_TRY", [])]
         del self.compilation_dependencies[body_id]
         del self.compilation_dependencies[body_id]
 
 
-        # Convert the function definition to Python code, and compile it.
-        compiled_function = self.jit_define_function(function_name, constructed_function)
-
         if self.jit_success_log_function is not None:
         if self.jit_success_log_function is not None:
+            assert self.jitted_entry_points[body_id] == function_name
             self.jit_success_log_function(
             self.jit_success_log_function(
                 "JIT compilation successful: (function '%s' at %d)" % (function_name, body_id))
                 "JIT compilation successful: (function '%s' at %d)" % (function_name, body_id))
 
 
@@ -546,7 +549,6 @@ class ModelverseJit(object):
            and extracts the resulting function."""
            and extracts the resulting function."""
         # The comment below makes pylint shut up about our (hopefully benign) use of exec here.
         # The comment below makes pylint shut up about our (hopefully benign) use of exec here.
         # pylint: disable=I0011,W0122
         # pylint: disable=I0011,W0122
-
         if self.jit_code_log_function is not None:
         if self.jit_code_log_function is not None:
             self.jit_code_log_function(function_def)
             self.jit_code_log_function(function_def)
 
 
@@ -570,10 +572,13 @@ class ModelverseJit(object):
         if body_id is None:
         if body_id is None:
             raise ValueError('body_id cannot be None')
             raise ValueError('body_id cannot be None')
         elif body_id in self.jitted_entry_points:
         elif body_id in self.jitted_entry_points:
-            # We have already compiled this function.
             raise primitive_functions.PrimitiveFinished(
             raise primitive_functions.PrimitiveFinished(
                 self.jit_globals[self.jitted_entry_points[body_id]])
                 self.jit_globals[self.jitted_entry_points[body_id]])
 
 
+        compiled_func = self.lookup_compiled_body(body_id)
+        if compiled_func is not None:
+            raise primitive_functions.PrimitiveFinished(compiled_func)
+
         # Generate a name for the function we're about to analyze, and 're-compile'
         # Generate a name for the function we're about to analyze, and 're-compile'
         # it for the first time.
         # it for the first time.
         function_name = self.generate_function_name(body_id, suggested_name)
         function_name = self.generate_function_name(body_id, suggested_name)
@@ -718,7 +723,34 @@ class ModelverseJit(object):
                 tree_ir.LiteralInstruction(jit_runtime.FUNCTION_BODY_KEY)),
                 tree_ir.LiteralInstruction(jit_runtime.FUNCTION_BODY_KEY)),
             global_name)
             global_name)
 
 
-def compile_function_body_baseline(jit, function_name, body_id, task_root, header=None):
+def compile_function_body_interpret(jit, function_name, body_id, task_root, header=None):
+    """Create a function that invokes the interpreter on the given function."""
+    (parameter_ids, parameter_list, _), = yield [
+        ("CALL_ARGS", [jit.jit_signature, (body_id,)])]
+    param_dict = dict(zip(parameter_ids, parameter_list))
+    body_bytecode, = yield [("CALL_ARGS", [jit.jit_parse_bytecode, (body_id,)])]
+    def __interpret_function(**kwargs):
+        if header is not None:
+            (done, result), = yield [("CALL_KWARGS", [header, kwargs])]
+            if done:
+                raise primitive_functions.PrimitiveFinished(result)
+
+        local_args = {}
+        inner_kwargs = dict(kwargs)
+        for param_id, name in param_dict.items():
+            local_args[param_id] = inner_kwargs[name]
+            del inner_kwargs[name]
+
+        yield [("TAIL_CALL_ARGS",
+                [bytecode_interpreter.interpret_bytecode_function,
+                 (function_name, body_bytecode, local_args, inner_kwargs)])]
+
+    jit.jit_globals[function_name] = __interpret_function
+    raise primitive_functions.PrimitiveFinished(__interpret_function)
+
+def compile_function_body_baseline(
+        jit, function_name, body_id, task_root,
+        header=None, compatible_temporary_protects=False):
     """Have the baseline JIT compile the function with the given name and body id."""
     """Have the baseline JIT compile the function with the given name and body id."""
     (parameter_ids, parameter_list, _), = yield [
     (parameter_ids, parameter_list, _), = yield [
         ("CALL_ARGS", [jit.jit_signature, (body_id,)])]
         ("CALL_ARGS", [jit.jit_signature, (body_id,)])]
@@ -737,10 +769,14 @@ def compile_function_body_baseline(jit, function_name, body_id, task_root, heade
     constructed_body, = yield [("CALL_ARGS", [optimize_tree_ir, (constructed_body,)])]
     constructed_body, = yield [("CALL_ARGS", [optimize_tree_ir, (constructed_body,)])]
 
 
     # Wrap the tree IR in a function definition.
     # Wrap the tree IR in a function definition.
+    constructed_function = create_function(
+        function_name, parameter_list, param_dict,
+        body_param_dict, constructed_body, jit.get_source_map_name(function_name),
+        compatible_temporary_protects)
+
+    # Convert the function definition to Python code, and compile it.
     raise primitive_functions.PrimitiveFinished(
     raise primitive_functions.PrimitiveFinished(
-        create_function(
-            function_name, parameter_list, param_dict,
-            body_param_dict, constructed_body, jit.get_source_map_name(function_name)))
+        jit.jit_define_function(function_name, constructed_function))
 
 
 def compile_function_body_fast(jit, function_name, body_id, _):
 def compile_function_body_fast(jit, function_name, body_id, _):
     """Have the fast JIT compile the function with the given name and body id."""
     """Have the fast JIT compile the function with the given name and body id."""
@@ -763,10 +799,11 @@ def compile_function_body_fast(jit, function_name, body_id, _):
 
 
     # Optimize the tree that was generated.
     # Optimize the tree that was generated.
     constructed_body, = yield [("CALL_ARGS", [optimize_tree_ir, (constructed_body,)])]
     constructed_body, = yield [("CALL_ARGS", [optimize_tree_ir, (constructed_body,)])]
+    constructed_function = create_bare_function(function_name, parameter_list, constructed_body)
+
+    # Convert the function definition to Python code, and compile it.
     raise primitive_functions.PrimitiveFinished(
     raise primitive_functions.PrimitiveFinished(
-        create_bare_function(
-            function_name, parameter_list,
-            constructed_body))
+        jit.jit_define_function(function_name, constructed_function))
 
 
 def favor_large_functions(body_bytecode):
 def favor_large_functions(body_bytecode):
     """Computes the initial temperature of a function based on the size of
     """Computes the initial temperature of a function based on the size of
@@ -782,13 +819,7 @@ def favor_large_functions(body_bytecode):
     # in a situation where said function runs for a long time before we
     # in a situation where said function runs for a long time before we
     # realize that we really should have jitted it. And that's exactly what
     # realize that we really should have jitted it. And that's exactly what
     # this heuristic tries to avoid.
     # this heuristic tries to avoid.
-    return (
-        len(body_bytecode.get_reachable()),
-        lambda old_value:
-        tree_ir.BinaryInstruction(
-            old_value,
-            '+',
-            tree_ir.LiteralInstruction(1)))
+    return len(body_bytecode.get_reachable()), 1
 
 
 def favor_small_functions(body_bytecode):
 def favor_small_functions(body_bytecode):
     """Computes the initial temperature of a function based on the size of
     """Computes the initial temperature of a function based on the size of
@@ -799,18 +830,15 @@ def favor_small_functions(body_bytecode):
     # of fast-jit's algorithms. So it might be cheaper to fast-jit small
     # of fast-jit's algorithms. So it might be cheaper to fast-jit small
     # functions and get a performance boost from that than to fast-jit large
     # functions and get a performance boost from that than to fast-jit large
     # functions.
     # functions.
-    return (
-        ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD - len(body_bytecode.get_reachable()),
-        lambda old_value:
-        tree_ir.BinaryInstruction(
-            old_value,
-            '+',
-            tree_ir.LiteralInstruction(1)))
+    return ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD - len(body_bytecode.get_reachable()), 1
 
 
 ADAPTIVE_JIT_LOOP_INSTRUCTION_MULTIPLIER = 4
 ADAPTIVE_JIT_LOOP_INSTRUCTION_MULTIPLIER = 4
 
 
-ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD = 200
-"""The threshold temperature at which fast-jit will be used."""
+ADAPTIVE_BASELINE_JIT_TEMPERATURE_THRESHOLD = 100
+"""The threshold temperature at which the adaptive JIT will use the baseline JIT."""
+
+ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD = 250
+"""The threshold temperature at which the adaptive JIT will use the fast JIT."""
 
 
 def favor_loops(body_bytecode):
 def favor_loops(body_bytecode):
     """Computes the initial temperature of a function. Code within a loop makes
     """Computes the initial temperature of a function. Code within a loop makes
@@ -818,22 +846,139 @@ def favor_loops(body_bytecode):
        temperature is incremented by one on every call."""
        temperature is incremented by one on every call."""
     reachable_instructions = body_bytecode.get_reachable()
     reachable_instructions = body_bytecode.get_reachable()
     # First set the temperature to the negative number of instructions.
     # First set the temperature to the negative number of instructions.
-    temperature = ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD - len(reachable_instructions)
+    temperature = ADAPTIVE_BASELINE_JIT_TEMPERATURE_THRESHOLD - len(reachable_instructions)
     for instruction in reachable_instructions:
     for instruction in reachable_instructions:
         if isinstance(instruction, bytecode_ir.WhileInstruction):
         if isinstance(instruction, bytecode_ir.WhileInstruction):
             # Then increase the temperature by the number of instructions reachable
             # Then increase the temperature by the number of instructions reachable
             # from loop bodies. Note that the algorithm will count nested loops twice.
             # from loop bodies. Note that the algorithm will count nested loops twice.
             # This is actually by design.
             # This is actually by design.
-            loop_body_instructions = instruction.body.get_reachable()
+            loop_body_instructions = instruction.body.get_reachable(
+                lambda x: not isinstance(
+                    x, (bytecode_ir.BreakInstruction, bytecode_ir.ContinueInstruction)))
             temperature += ADAPTIVE_JIT_LOOP_INSTRUCTION_MULTIPLIER * len(loop_body_instructions)
             temperature += ADAPTIVE_JIT_LOOP_INSTRUCTION_MULTIPLIER * len(loop_body_instructions)
 
 
-    return (
-        temperature,
-        lambda old_value:
-        tree_ir.BinaryInstruction(
-            old_value,
-            '+',
-            tree_ir.LiteralInstruction(1)))
+    return temperature, 1
+
+def favor_small_loops(body_bytecode):
+    """Computes the initial temperature of a function. Code within a loop makes
+       the function hotter; code outside loops makes the function colder. The
+       temperature is incremented by one on every call."""
+    reachable_instructions = body_bytecode.get_reachable()
+    # First set the temperature to the negative number of instructions.
+    temperature = ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD - 50 - len(reachable_instructions)
+    for instruction in reachable_instructions:
+        if isinstance(instruction, bytecode_ir.WhileInstruction):
+            # Then increase the temperature by the number of instructions reachable
+            # from loop bodies. Note that the algorithm will count nested loops twice.
+            # This is actually by design.
+            loop_body_instructions = instruction.body.get_reachable(
+                lambda x: not isinstance(
+                    x, (bytecode_ir.BreakInstruction, bytecode_ir.ContinueInstruction)))
+            temperature += (
+                (ADAPTIVE_JIT_LOOP_INSTRUCTION_MULTIPLIER ** 2) *
+                int(math.sqrt(len(loop_body_instructions))))
+
+    return temperature, max(int(math.log(len(reachable_instructions), 2)), 1)
+
+class AdaptiveJitState(object):
+    """Shared state for adaptive JIT compilation."""
+    def __init__(
+            self, temperature_counter_name,
+            temperature_increment, can_rejit_name):
+        self.temperature_counter_name = temperature_counter_name
+        self.temperature_increment = temperature_increment
+        self.can_rejit_name = can_rejit_name
+
+    def compile_interpreter(
+            self, jit, function_name, body_id, task_root):
+        """Compiles the given function as a function that controls the temperature counter
+           and calls the interpreter."""
+        def __increment_temperature(**kwargs):
+            if jit.jit_globals[self.can_rejit_name]:
+                temperature_counter_val = jit.jit_globals[self.temperature_counter_name]
+                temperature_counter_val += self.temperature_increment
+                jit.jit_globals[self.temperature_counter_name] = temperature_counter_val
+                if temperature_counter_val >= ADAPTIVE_BASELINE_JIT_TEMPERATURE_THRESHOLD:
+                    if temperature_counter_val >= ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD:
+                        yield [
+                            ("CALL_ARGS",
+                             [jit.jit_rejit,
+                              (task_root, body_id, function_name, compile_function_body_fast)])]
+                    else:
+                        yield [
+                            ("CALL_ARGS",
+                             [jit.jit_rejit,
+                              (task_root, body_id, function_name, self.compile_baseline)])]
+                    result, = yield [("CALL_KWARGS", [jit.jit_globals[function_name], kwargs])]
+                    raise primitive_functions.PrimitiveFinished((True, result))
+
+            raise primitive_functions.PrimitiveFinished((False, None))
+
+        yield [
+            ("TAIL_CALL_ARGS",
+             [compile_function_body_interpret,
+              (jit, function_name, body_id, task_root, __increment_temperature)])]
+
+    def compile_baseline(
+            self, jit, function_name, body_id, task_root):
+        """Compiles the given function with the baseline JIT, and inserts logic that controls
+           the temperature counter."""
+        (_, parameter_list, _), = yield [
+            ("CALL_ARGS", [jit.jit_signature, (body_id,)])]
+
+        # This tree represents the following logic:
+        #
+        # if can_rejit:
+        #     global temperature_counter
+        #     temperature_counter = temperature_counter + temperature_increment
+        #     if temperature_counter >= ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD:
+        #         yield [("CALL_KWARGS", [jit_runtime.JIT_REJIT_FUNCTION_NAME, {...}])]
+        #         yield [("TAIL_CALL_KWARGS", [function_name, {...}])]
+
+        header = tree_ir.SelectInstruction(
+            tree_ir.LoadGlobalInstruction(self.can_rejit_name),
+            tree_ir.create_block(
+                tree_ir.DeclareGlobalInstruction(self.temperature_counter_name),
+                tree_ir.IgnoreInstruction(
+                    tree_ir.StoreGlobalInstruction(
+                        self.temperature_counter_name,
+                        tree_ir.BinaryInstruction(
+                            tree_ir.LoadGlobalInstruction(self.temperature_counter_name),
+                            '+',
+                            tree_ir.LiteralInstruction(self.temperature_increment)))),
+                tree_ir.SelectInstruction(
+                    tree_ir.BinaryInstruction(
+                        tree_ir.LoadGlobalInstruction(self.temperature_counter_name),
+                        '>=',
+                        tree_ir.LiteralInstruction(ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD)),
+                    tree_ir.create_block(
+                        tree_ir.RunGeneratorFunctionInstruction(
+                            tree_ir.LoadGlobalInstruction(jit_runtime.JIT_REJIT_FUNCTION_NAME),
+                            tree_ir.DictionaryLiteralInstruction([
+                                (tree_ir.LiteralInstruction('task_root'),
+                                 bytecode_to_tree.load_task_root()),
+                                (tree_ir.LiteralInstruction('body_id'),
+                                 tree_ir.LiteralInstruction(body_id)),
+                                (tree_ir.LiteralInstruction('function_name'),
+                                 tree_ir.LiteralInstruction(function_name)),
+                                (tree_ir.LiteralInstruction('compile_function_body'),
+                                 tree_ir.LoadGlobalInstruction(
+                                     jit_runtime.JIT_COMPILE_FUNCTION_BODY_FAST_FUNCTION_NAME))]),
+                            result_type=tree_ir.NO_RESULT_TYPE),
+                        bytecode_to_tree.create_return(
+                            tree_ir.create_jit_call(
+                                tree_ir.LoadGlobalInstruction(function_name),
+                                [(name, tree_ir.LoadLocalInstruction(name))
+                                 for name in parameter_list],
+                                tree_ir.LoadLocalInstruction(jit_runtime.KWARGS_PARAMETER_NAME)))),
+                    tree_ir.EmptyInstruction())),
+            tree_ir.EmptyInstruction())
+
+        # Compile with the baseline JIT, and insert the header.
+        yield [
+            ("TAIL_CALL_ARGS",
+             [compile_function_body_baseline,
+              (jit, function_name, body_id, task_root, header, True)])]
 
 
 def compile_function_body_adaptive(
 def compile_function_body_adaptive(
         jit, function_name, body_id, task_root,
         jit, function_name, body_id, task_root,
@@ -846,72 +991,43 @@ def compile_function_body_adaptive(
     # and gets incremented every time the function is executed.
     # and gets incremented every time the function is executed.
 
 
     body_bytecode, = yield [("CALL_ARGS", [jit.jit_parse_bytecode, (body_id,)])]
     body_bytecode, = yield [("CALL_ARGS", [jit.jit_parse_bytecode, (body_id,)])]
-    initial_temperature, increment_temperature = temperature_heuristic(body_bytecode)
-    print('Initial temperature for %s: %d' % (function_name, initial_temperature))
+    initial_temperature, temperature_increment = temperature_heuristic(body_bytecode)
+    if jit.jit_success_log_function is not None:
+        jit.jit_success_log_function(
+            "Initial temperature for '%s': %d" % (function_name, initial_temperature))
 
 
     if initial_temperature >= ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD:
     if initial_temperature >= ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD:
         # Initial temperature exceeds the fast-jit threshold.
         # Initial temperature exceeds the fast-jit threshold.
         # Compile this thing with fast-jit right away.
         # Compile this thing with fast-jit right away.
+        if jit.jit_success_log_function is not None:
+            jit.jit_success_log_function(
+                "Compiling '%s' with fast-jit." % function_name)
         yield [
         yield [
             ("TAIL_CALL_ARGS",
             ("TAIL_CALL_ARGS",
              [compile_function_body_fast, (jit, function_name, body_id, task_root)])]
              [compile_function_body_fast, (jit, function_name, body_id, task_root)])]
 
 
-    (_, parameter_list, _), = yield [
-        ("CALL_ARGS", [jit.jit_signature, (body_id,)])]
-
     temperature_counter_name = jit.import_value(
     temperature_counter_name = jit.import_value(
         initial_temperature, function_name + "_temperature_counter")
         initial_temperature, function_name + "_temperature_counter")
 
 
     can_rejit_name = jit.get_can_rejit_name(function_name)
     can_rejit_name = jit.get_can_rejit_name(function_name)
     jit.jit_globals[can_rejit_name] = True
     jit.jit_globals[can_rejit_name] = True
 
 
-    # This tree represents the following logic:
-    #
-    # if can_rejit:
-    #     global temperature_counter
-    #     temperature_counter = increment_temperature(temperature_counter)
-    #     if temperature_counter >= ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD:
-    #         yield [("CALL_KWARGS", [jit_runtime.JIT_REJIT_FUNCTION_NAME, {...}])]
-    #         yield [("TAIL_CALL_KWARGS", [function_name, {...}])]
-
-    header = tree_ir.SelectInstruction(
-        tree_ir.LoadGlobalInstruction(can_rejit_name),
-        tree_ir.create_block(
-            tree_ir.DeclareGlobalInstruction(temperature_counter_name),
-            tree_ir.IgnoreInstruction(
-                tree_ir.StoreGlobalInstruction(
-                    temperature_counter_name,
-                    increment_temperature(
-                        tree_ir.LoadGlobalInstruction(temperature_counter_name)))),
-            tree_ir.SelectInstruction(
-                tree_ir.BinaryInstruction(
-                    tree_ir.LoadGlobalInstruction(temperature_counter_name),
-                    '>=',
-                    tree_ir.LiteralInstruction(ADAPTIVE_FAST_JIT_TEMPERATURE_THRESHOLD)),
-                tree_ir.create_block(
-                    tree_ir.RunGeneratorFunctionInstruction(
-                        tree_ir.LoadGlobalInstruction(jit_runtime.JIT_REJIT_FUNCTION_NAME),
-                        tree_ir.DictionaryLiteralInstruction([
-                            (tree_ir.LiteralInstruction('task_root'),
-                             bytecode_to_tree.load_task_root()),
-                            (tree_ir.LiteralInstruction('body_id'),
-                             tree_ir.LiteralInstruction(body_id)),
-                            (tree_ir.LiteralInstruction('function_name'),
-                             tree_ir.LiteralInstruction(function_name)),
-                            (tree_ir.LiteralInstruction('compile_function_body'),
-                             tree_ir.LoadGlobalInstruction(
-                                 jit_runtime.JIT_COMPILE_FUNCTION_BODY_FAST_FUNCTION_NAME))]),
-                        result_type=tree_ir.NO_RESULT_TYPE),
-                    tree_ir.create_jit_call(
-                        tree_ir.LoadGlobalInstruction(function_name),
-                        [(name, tree_ir.LoadLocalInstruction(name)) for name in parameter_list],
-                        tree_ir.LoadLocalInstruction(jit_runtime.KWARGS_PARAMETER_NAME),
-                        tree_ir.RunTailGeneratorFunctionInstruction)),
-                tree_ir.EmptyInstruction())),
-        tree_ir.EmptyInstruction())
-
-    # Compile with the baseline JIT, and insert the header.
-    yield [
-        ("TAIL_CALL_ARGS",
-         [compile_function_body_baseline,
-          (jit, function_name, body_id, task_root, header)])]
+    state = AdaptiveJitState(temperature_counter_name, temperature_increment, can_rejit_name)
+
+    if initial_temperature >= ADAPTIVE_BASELINE_JIT_TEMPERATURE_THRESHOLD:
+        # Initial temperature exceeds the baseline JIT threshold.
+        # Compile this thing with baseline JIT right away.
+        if jit.jit_success_log_function is not None:
+            jit.jit_success_log_function(
+                "Compiling '%s' with baseline-jit." % function_name)
+        yield [
+            ("TAIL_CALL_ARGS",
+             [state.compile_baseline, (jit, function_name, body_id, task_root)])]
+    else:
+        # Looks like we'll use the interpreter initially.
+        if jit.jit_success_log_function is not None:
+            jit.jit_success_log_function(
+                "Compiling '%s' with bytecode-interpreter." % function_name)
+        yield [
+            ("TAIL_CALL_ARGS",
+             [state.compile_interpreter, (jit, function_name, body_id, task_root)])]

+ 3 - 0
kernel/modelverse_jit/runtime.py

@@ -43,6 +43,9 @@ LOCALS_EDGE_NAME = "jit_locals_edge"
 GLOBAL_NOT_FOUND_MESSAGE_FORMAT = "Not found as global: %s"
 GLOBAL_NOT_FOUND_MESSAGE_FORMAT = "Not found as global: %s"
 """The format of the 'not found as global' message. Takes a single argument."""
 """The format of the 'not found as global' message. Takes a single argument."""
 
 
+BYTECODE_INTERPRETER_ORIGIN_NAME = "bytecode-interpreter"
+"""The origin name for functions that were produced by the bytecode interpreter."""
+
 BASELINE_JIT_ORIGIN_NAME = "baseline-jit"
 BASELINE_JIT_ORIGIN_NAME = "baseline-jit"
 """The origin name for functions that were produced by the baseline JIT."""
 """The origin name for functions that were produced by the baseline JIT."""
 
 

+ 60 - 2
kernel/modelverse_jit/tree_ir.py

@@ -112,7 +112,7 @@ class Instruction(object):
 
 
     def has_result_temporary(self):
     def has_result_temporary(self):
         """Tells if this instruction stores its result in a temporary."""
         """Tells if this instruction stores its result in a temporary."""
-        return True
+        return self.has_result()
 
 
     def generate_python_def(self, code_generator):
     def generate_python_def(self, code_generator):
         """Generates a Python statement that executes this instruction.
         """Generates a Python statement that executes this instruction.
@@ -366,6 +366,52 @@ class SelectInstruction(Instruction):
     def __repr__(self):
     def __repr__(self):
         return "SelectInstruction(%r, %r, %r)" % (self.condition, self.if_clause, self.else_clause)
         return "SelectInstruction(%r, %r, %r)" % (self.condition, self.if_clause, self.else_clause)
 
 
+class SwitchInstruction(VoidInstruction):
+    """An instruction that evaluates an instruction based on which condition matches."""
+    def __init__(self, conditions_and_clauses):
+        VoidInstruction.__init__(self)
+        self.conditions_and_clauses = conditions_and_clauses
+        assert not any([cond.has_definition() for cond, _ in self.conditions_and_clauses])
+
+    def simplify_node(self):
+        """Applies basic simplification to this instruction only."""
+        if len(self.conditions_and_clauses) == 0:
+            return EmptyInstruction()
+        elif len(self.conditions_and_clauses) == 1:
+            cond, clause = next(iter(self.conditions_and_clauses))
+            return SelectInstruction(cond, IgnoreInstruction(clause), EmptyInstruction())
+        else:
+            return self
+
+    def get_children(self):
+        """Gets this instruction's sequence of child instructions."""
+        results = []
+        for cond, body in self.conditions_and_clauses:
+            results.append(cond)
+            results.append(body)
+        return results
+
+    def create(self, new_children):
+        """Creates a new instruction of this type from the given sequence of child instructions."""
+        new_pairs = []
+        for i in xrange(len(self.conditions_and_clauses)):
+            new_pairs.append((new_children[2 * i], new_children[2 * i + 1]))
+        return SwitchInstruction(new_pairs)
+
+    def generate_python_def(self, code_generator):
+        """Generates Python code for this instruction."""
+        if_keyword = 'if '
+        for condition, clause in self.conditions_and_clauses:
+            code_generator.append_line(
+                if_keyword + condition.generate_python_use(code_generator) + ':')
+            code_generator.increase_indentation()
+            clause.generate_python_def(code_generator)
+            code_generator.decrease_indentation()
+            if_keyword = 'elif '
+
+    def __repr__(self):
+        return "SwitchInstruction(%r)" % self.conditions_and_clauses
+
 class ReturnInstruction(VoidInstruction):
 class ReturnInstruction(VoidInstruction):
     """Represents a return-instruction."""
     """Represents a return-instruction."""
     def __init__(self, value):
     def __init__(self, value):
@@ -1750,6 +1796,8 @@ def iterate_as_stack(instruction, stack_iterator):
         for child in children:
         for child in children:
             # Push all children onto the stack.
             # Push all children onto the stack.
             iterate_as_stack(child, stack_iterator)
             iterate_as_stack(child, stack_iterator)
+
+        stack_iterator.before_pop_args(instruction)
         for child in children:
         for child in children:
             # Pop all children from the stack.
             # Pop all children from the stack.
             stack_iterator.pop()
             stack_iterator.pop()
@@ -1761,6 +1809,10 @@ class StackIterator(object):
     def __init__(self, stack=None):
     def __init__(self, stack=None):
         self.stack = [] if stack is None else stack
         self.stack = [] if stack is None else stack
 
 
+    def before_pop_args(self, instruction):
+        """Performs an action before the given instruction's arguments are popped."""
+        pass
+
     def pop(self):
     def pop(self):
         """Pops an instruction from the stack."""
         """Pops an instruction from the stack."""
         self.stack.pop()
         self.stack.pop()
@@ -1797,7 +1849,7 @@ class StackIterator(object):
 
 
         return results
         return results
 
 
-def protect_temporaries_from_gc(instruction, connected_node):
+def protect_temporaries_from_gc(instruction, connected_node, fast_jit_compat=False):
     """Protects temporaries from the garbage collector by connecting them to the given node."""
     """Protects temporaries from the garbage collector by connecting them to the given node."""
     # # The reasoning behind this function
     # # The reasoning behind this function
     #
     #
@@ -1821,6 +1873,12 @@ def protect_temporaries_from_gc(instruction, connected_node):
             StackIterator.__init__(self, stack)
             StackIterator.__init__(self, stack)
             self.gc_temporaries = set() if gc_temporaries is None else gc_temporaries
             self.gc_temporaries = set() if gc_temporaries is None else gc_temporaries
 
 
+        def before_pop_args(self, instruction):
+            """Performs an action before the given instruction's arguments are popped."""
+            if fast_jit_compat and isinstance(instruction, DictionaryLiteralInstruction):
+                for instruction_set in self.stack:
+                    self.gc_temporaries.update(instruction_set)
+
         def push(self, instruction):
         def push(self, instruction):
             """Pushes an instruction onto the stack."""
             """Pushes an instruction onto the stack."""
             if isinstance(instruction, (
             if isinstance(instruction, (

+ 1 - 1
kernel/modelverse_kernel/primitives.py

@@ -18,7 +18,7 @@ class InterpretedFunctionFinished(Exception):
 #
 #
 # ### Rationale for __exception_return
 # ### Rationale for __exception_return
 #
 #
-# __exception_return is a useful mechanism because it allows us to have an __call_function
+# __exception_return is a useful mechanism because it allows us to have a __call_function
 # implementation that has O(1) state read overhead. A previous implementation of
 # implementation that has O(1) state read overhead. A previous implementation of
 # __call_function checked if the caller's frame had been popped whenever
 # __call_function checked if the caller's frame had been popped whenever
 # ModelverseKernel.execute_yield threw a StopIteration exception. However, that incurs O(n) overhead
 # ModelverseKernel.execute_yield threw a StopIteration exception. However, that incurs O(n) overhead

+ 148 - 17
performance/perf2tex.py

@@ -1,5 +1,7 @@
 """Converts performance data files (as produced by utils.py) to LaTeX charts."""
 """Converts performance data files (as produced by utils.py) to LaTeX charts."""
 
 
+import argparse
+import colorsys
 import utils
 import utils
 
 
 # Generated LaTeX is based on the accepted answer to
 # Generated LaTeX is based on the accepted answer to
@@ -7,15 +9,11 @@ import utils
 
 
 # pylint: disable=I0011,W0141
 # pylint: disable=I0011,W0141
 
 
-LATEX_COLORS = [
-    ('chartBlue', 0x4F81BD),
-    ('chartRed', 0xC0504D),
-    ('chartGreen', 0x9BBB59),
-    ('chartPurple', 0x9F4C7C),
-    ('chartDarkYellow', 0xCCCC00)
-]
+COLOR_SCHEME_MIN_COLOR = (36, 255, 106)
+COLOR_SCHEME_MAX_COLOR = (216, 33, 0)
 
 
 LATEX_HEADER = r"""\documentclass[12pt,a4paper,onecolumn,openright]{report}
 LATEX_HEADER = r"""\documentclass[12pt,a4paper,onecolumn,openright]{report}
+\usepackage[landscape]{geometry}
 \usepackage{xcolor}
 \usepackage{xcolor}
 \usepackage{pgfplots}
 \usepackage{pgfplots}
 \usepackage{tikz}
 \usepackage{tikz}
@@ -40,8 +38,8 @@ def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
     """Assembles a LaTeX chart from the given components."""
     """Assembles a LaTeX chart from the given components."""
     lines = []
     lines = []
     lines.append(LATEX_HEADER)
     lines.append(LATEX_HEADER)
-    for color in color_defs:
-        lines.append(r'\definecolor{%s}{HTML}{%X}' % color)
+    for color_name, (red, green, blue) in color_defs:
+        lines.append(r'\definecolor{%s}{HTML}{%02X%02X%02X}' % (color_name, red, green, blue))
     lines.append(LATEX_DOCUMENT_HEADER)
     lines.append(LATEX_DOCUMENT_HEADER)
     lines.append(r"""
     lines.append(r"""
     \begin{axis}[
     \begin{axis}[
@@ -81,22 +79,155 @@ def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
 
 
 def create_latex_chart(perf_data):
 def create_latex_chart(perf_data):
     """Creates a LaTeX chart for the given performance data."""
     """Creates a LaTeX chart for the given performance data."""
-    unused_colors = LATEX_COLORS[:]
+    sorted_opt_levels = sort_by_runtime(perf_data)
+    color_scheme = generate_color_scheme(sorted_opt_levels)
     opt_levels = []
     opt_levels = []
     color_defs = []
     color_defs = []
     test_names = []
     test_names = []
     data = []
     data = []
-    for optimization_level, measurements in perf_data:
-        color = unused_colors.pop(0)
-        color_name, _ = color
+    for i, optimization_level in enumerate(sorted_opt_levels):
+        measurements = perf_data[optimization_level]
+        color = color_scheme[optimization_level]
+        color_name = 'chartColor%d' % i
         opt_levels.append(optimization_level)
         opt_levels.append(optimization_level)
-        color_defs.append(color)
-        data.append((color_name, measurements))
-        for name, _ in measurements:
+        color_defs.append((color_name, color))
+        data.append((color_name, measurements.items()))
+        for name, _ in measurements.items():
             if name not in test_names:
             if name not in test_names:
                 test_names.append(name)
                 test_names.append(name)
 
 
     return assemble_latex_chart(opt_levels, color_defs, test_names, data)
     return assemble_latex_chart(opt_levels, color_defs, test_names, data)
 
 
+def get_mean_runtimes(perf_data):
+    """Computes the mean run-time of every optimization level in the given
+       performance data."""
+    return {
+        opt_level: utils.mean(perf_data[opt_level].values())
+        for opt_level in perf_data.keys()
+    }
+
+def get_baseline_optimization_level(perf_data):
+    """Gets a baseline optimization level from the given performance data.
+       This baseline optimization level is guaranteed to be for every test case.
+       If no baseline optimization level can be found, then None is returned."""
+    # First find the name of all test cases.
+    all_test_names = set()
+    for optimization_level, measurements in perf_data.items():
+        all_test_names.update(measurements.keys())
+
+    # Filter optimization levels which are used for every test case.
+    candidate_opt_levels = []
+    for optimization_level, measurements in perf_data.items():
+        if len(all_test_names) == len(measurements):
+            candidate_opt_levels.append(optimization_level)
+
+    if len(candidate_opt_levels) == 0:
+        # Looks like there is no baseline optimization level.
+        return None
+
+    # Pick the optimization level with the lowest total run-time as the baseline.
+    return min(candidate_opt_levels, key=lambda opt_level: sum(perf_data[opt_level].values()))
+
+def get_relative_measurements(perf_data, baseline_optimization_level):
+    """Computes a map of measurements that are relative to the given optimization level."""
+    results = {}
+    for optimization_level, measurements in perf_data.items():
+        results[optimization_level] = {}
+        for test_name, data_point in measurements.items():
+            results[optimization_level][test_name] = (
+                data_point / perf_data[baseline_optimization_level][test_name])
+
+    return results
+
+def perf_list_to_dict(perf_list):
+    """Converts performance data from a list representation to a dictionary representation."""
+    return {opt_level: dict(tests) for opt_level, tests in perf_list}
+
+def perf_dict_to_list(perf_dict):
+    """Converts performance data from a dictionary representation to a list representation."""
+    return [(opt_level, tests.items()) for opt_level, tests in perf_dict.items()]
+
+def interpolate(value_range, index, length):
+    """Uses an index and a length to interpolate in the given range."""
+    min_val, max_val = value_range
+    if length == 1:
+        return max_val
+    else:
+        return min_val + float(index) * (max_val - min_val) / float(length - 1)
+
+def sort_by_runtime(perf_data):
+    """Sorts the optimization levels by mean relative runtimes."""
+    baseline_opt_level = get_baseline_optimization_level(perf_data)
+    relative_perf = get_relative_measurements(perf_data, baseline_opt_level)
+    # Sort the optimization levels by their mean runtimes.
+    mean_runtimes = get_mean_runtimes(relative_perf)
+    return list(sorted(mean_runtimes.keys(), key=lambda opt_level: mean_runtimes[opt_level]))
+
+def generate_color_scheme(sorted_opt_levels):
+    """Assigns a color to every optimization level in the given performance data."""
+    # Assign colors to the optimization levels.
+    color_scheme = {}
+    min_hue, min_sat, min_val = colorsys.rgb_to_hsv(
+        *[c / float(255) for c in COLOR_SCHEME_MIN_COLOR])
+    max_hue, max_sat, max_val = colorsys.rgb_to_hsv(
+        *[c / float(255) for c in COLOR_SCHEME_MAX_COLOR])
+    for i, opt_level in enumerate(sorted_opt_levels):
+        hue = interpolate((min_hue, max_hue), i, len(sorted_opt_levels))
+        sat = interpolate((min_sat, max_sat), i, len(sorted_opt_levels))
+        val = interpolate((min_val, max_val), i, len(sorted_opt_levels))
+        color = [component * 255 for component in colorsys.hsv_to_rgb(hue, sat, val)]
+        color_scheme[opt_level] = color
+
+    return color_scheme
+
+def main():
+    arg_parser = argparse.ArgumentParser()
+    arg_parser.add_argument('input', help='The performance data file.')
+    arg_parser.add_argument(
+        '-q', '--quantity', type=str,
+        help="The quantity to build a bar chart for. Defaults to '%s'" % utils.TOTAL_TIME_QUANTITY,
+        default=utils.TOTAL_TIME_QUANTITY)
+    arg_parser.add_argument(
+        '-O', '--opt', type=str, nargs='*',
+        help="Filters on optimization levels.")
+    arg_parser.add_argument(
+        '-t', '--test', type=str, nargs='*',
+        help="Filters on tests.")
+    arg_parser.add_argument(
+        '-r', '--relative', action='store_const', const=True,
+        help="Produce bars that are relative to some baseline.", default=False)
+
+    args = arg_parser.parse_args()
+
+    perf_data = utils.parse_perf_data(args.input)[args.quantity]
+
+    if args.opt:
+        optimization_set = set(args.opt)
+        perf_data = [
+            (optimization_level, measurements)
+            for optimization_level, measurements in perf_data
+            if optimization_level in optimization_set]
+
+    if args.test:
+        test_set = set(args.test)
+        new_perf_data = []
+        for optimization_level, measurements in perf_data:
+            new_measurements = []
+            for test_name, data_point in measurements:
+                if test_name in test_set:
+                    new_measurements.append((test_name, data_point))
+
+            if len(new_measurements) > 0:
+                new_perf_data.append((optimization_level, new_measurements))
+        perf_data = new_perf_data
+
+    perf_data_dict = perf_list_to_dict(perf_data)
+
+    if args.relative:
+        baseline_opt_level = get_baseline_optimization_level(perf_data_dict)
+        perf_data_dict = get_relative_measurements(perf_data_dict, baseline_opt_level)
+
+    print(create_latex_chart(perf_data_dict))
+
 if __name__ == '__main__':
 if __name__ == '__main__':
-    print(create_latex_chart(utils.parse_perf_data(utils.DEFAULT_PERF_FILE_NAME)))
+    main()

+ 119 - 0
performance/test_mvc_model_overwrite.py

@@ -0,0 +1,119 @@
+import unittest
+import utils
+
+all_files = [
+    "core/mini_modify.alc",
+    "core/core_formalism.mvc",
+    "core/core_algorithm.alc",
+    "primitives.alc",
+    "object_operations.alc",
+    "conformance_scd.alc",
+    "library.alc",
+    "transform.alc",
+    "model_management.alc",
+    "ramify.alc",
+    "metamodels.alc",
+    "random.alc",
+    "constructors.alc",
+    "modelling.alc",
+    "compilation_manager.alc",
+]
+
+
+class TestMvCModelOverwrite(unittest.TestCase):
+    def model_overwrite(self, optimization_level):
+        utils.write_total_runtime_to_file(
+            'mvc_model_overwrite', optimization_level,
+            utils.run_correctness_test(all_files, [
+                "root",
+                "root",
+                "root",
+                "model_add",
+                "SimpleClassDiagrams",
+                "Empty",
+                "exit",
+                "model_list_full",
+                "model_modify",
+                "Empty",
+                "instantiate",
+                "Class",
+                "A",
+                "exit",
+                "model_list_full",
+                "model_overwrite",
+                "Empty",
+                "instantiate_node",
+                "",
+                "Class",
+                "B",
+                "instantiate_node",
+                "",
+                "Class",
+                "C",
+                "exit",
+                "model_list_full",
+                "model_modify",
+                "Empty",
+                "list",
+                "exit",
+            ], [
+                "Desired username for admin user?",
+                "Desired password for admin user?",
+                "Please repeat the password",
+                "Passwords match!",
+                "Welcome to the Model Management Interface v2.0!",
+                "Use the 'help' command for a list of possible commands",
+                "Ready for command...",
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                set([
+                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
+                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
+                    "  200  root nobody   Empty : SimpleClassDiagrams",
+                    "  200  root admin   core : CoreFormalism"
+                ]),
+                "Ready for command...",
+                "Which model do you want to modify?",
+                "Model loaded, ready for commands!",
+                "Use 'help' command for a list of possible commands",
+                "Please give your command.",
+                "Type to instantiate?",
+                "Name of new element?",
+                "Instantiation successful!",
+                "Please give your command.",
+                "Ready for command...",
+                set([
+                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
+                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
+                    "  200  root nobody   Empty : SimpleClassDiagrams",
+                    "  200  root admin   core : CoreFormalism"
+                ]),
+                "Ready for command...",
+                "Which model to overwrite?",
+                "Waiting for model constructors...",
+                "Model overwrite success!",
+                "Ready for command...",
+                set([
+                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
+                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
+                    "  200  root nobody   Empty : SimpleClassDiagrams",
+                    "  200  root admin   core : CoreFormalism"
+                ]),
+                "Ready for command...",
+                "Which model do you want to modify?",
+                "Model loaded, ready for commands!",
+                "Use 'help' command for a list of possible commands",
+                "Please give your command.",
+                "List of all elements:",
+                set(["  B : Class", "  C : Class"]),
+                "Please give your command.",
+                "Ready for command...",
+            ], optimization_level))
+
+
+utils.define_perf_tests(TestMvCModelOverwrite,
+                        TestMvCModelOverwrite.model_overwrite)

+ 105 - 0
performance/test_mvc_print_upload.py

@@ -0,0 +1,105 @@
+import unittest
+import utils
+
+all_files = [
+    "core/mini_modify.alc",
+    "core/core_formalism.mvc",
+    "core/core_algorithm.alc",
+    "primitives.alc",
+    "object_operations.alc",
+    "conformance_scd.alc",
+    "library.alc",
+    "transform.alc",
+    "model_management.alc",
+    "ramify.alc",
+    "metamodels.alc",
+    "random.alc",
+    "constructors.alc",
+    "modelling.alc",
+    "compilation_manager.alc",
+]
+
+
+class TestMvCPrintUpload(unittest.TestCase):
+    def print_upload(self, optimization_level):
+        utils.write_total_runtime_to_file(
+            'mvc_print_upload', optimization_level,
+            utils.run_correctness_test(all_files, [
+                "root",
+                "root",
+                "root",
+                "model_add",
+                "SimpleClassDiagrams",
+                "PetriNets",
+            ] + utils.get_model_constructor(
+                open("integration/code/pn_runtime.mvc", "r").read()
+            ) + [
+                "model_list_full",
+                "transformation_add_MT_language",
+                "PetriNets",
+                "",
+                "PetriNets_RAM",
+                "model_list_full",
+                "transformation_add_MT",
+                "PetriNets_RAM",
+                "PetriNets",
+                "",
+                "",
+                "PetriNets_Print",
+            ] + utils.get_model_constructor(
+                open("integration/code/pn_print.mvc", "r").read()
+            ) + [
+                "transformation_list_full",
+            ], [
+                "Desired username for admin user?",
+                "Desired password for admin user?",
+                "Please repeat the password",
+                "Passwords match!",
+                "Welcome to the Model Management Interface v2.0!",
+                "Use the 'help' command for a list of possible commands",
+                "Ready for command...",
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                set([
+                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
+                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
+                    "  200  root nobody   PetriNets : SimpleClassDiagrams",
+                    "  200  root admin   core : CoreFormalism"
+                ]),
+                "Ready for command...",
+                "Formalisms to include (terminate with empty string)?",
+                "Name of the RAMified transformation metamodel?",
+                "Ready for command...",
+                set([
+                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
+                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
+                    "  200  root nobody   PetriNets : SimpleClassDiagrams",
+                    "  200  root nobody   __merged_PetriNets_RAM : SimpleClassDiagrams",
+                    "  200  root nobody   PetriNets_RAM : SimpleClassDiagrams",
+                    "  200  root admin   core : CoreFormalism"
+                ]),
+                "Ready for command...",
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set([
+                    "  PetriNets",
+                ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                set([
+                    "  200  root nobody   [ModelTransformation] PetriNets_Print : PetriNets_RAM"
+                ]),
+                "Ready for command...",
+            ], optimization_level))
+
+
+utils.define_perf_tests(TestMvCPrintUpload, TestMvCPrintUpload.print_upload)

+ 338 - 0
performance/test_mvc_simulate.py

@@ -0,0 +1,338 @@
+import unittest
+import utils
+
+all_files = [
+    "core/mini_modify.alc",
+    "core/core_formalism.mvc",
+    "core/core_algorithm.alc",
+    "primitives.alc",
+    "object_operations.alc",
+    "conformance_scd.alc",
+    "library.alc",
+    "transform.alc",
+    "model_management.alc",
+    "ramify.alc",
+    "metamodels.alc",
+    "random.alc",
+    "constructors.alc",
+    "modelling.alc",
+    "compilation_manager.alc",
+]
+
+
+class TestMvCSimulate(unittest.TestCase):
+    def transform_add_MT_pn_simulate(self, optimization_level):
+        utils.write_total_runtime_to_file(
+            'mvc_simulate', optimization_level,
+            utils.run_correctness_test(all_files,
+            [ "root", "root", "root",
+                "model_add",
+                    "SimpleClassDiagrams",
+                    "PetriNets",
+                    ] + utils.get_model_constructor(open("integration/code/pn_design.mvc", "r").read()) + [
+                "model_add",
+                    "SimpleClassDiagrams",
+                    "PetriNets_Runtime",
+                    ] + utils.get_model_constructor(open("integration/code/pn_runtime.mvc", "r").read()) + [
+                "model_add",
+                    "PetriNets",
+                    "my_pn",
+                    ] + utils.get_model_constructor(open("integration/code/pn_design_model.mvc", "r").read()) + [
+                "model_list",
+                "transformation_add_MT_language",
+                    "PetriNets_Runtime",
+                    "PetriNets",
+                    "",
+                    "PetriNets_RAM",
+                "model_list",
+                "model_modify",
+                    "__merged_PetriNets_RAM",
+                        "instantiate",
+                            "Association",
+                            "D2R_PlaceLink",
+                            "PetriNets/Place",
+                            "PetriNets_Runtime/Place",
+                        "instantiate",
+                            "Association",
+                            "D2R_TransitionLink",
+                            "PetriNets/Transition",
+                            "PetriNets_Runtime/Transition",
+                        "instantiate",
+                            "Association",
+                            "R2D_PlaceLink",
+                            "PetriNets_Runtime/Place",
+                            "PetriNets/Place",
+                        "instantiate",
+                            "Association",
+                            "R2D_TransitionLink",
+                            "PetriNets_Runtime/Transition",
+                            "PetriNets/Transition",
+                        "exit",
+                "transformation_RAMify",
+                    "__merged_PetriNets_RAM",
+                    "PetriNets_RAM",
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets",
+                    "",
+                    "PetriNets_Runtime",
+                    "",
+                    "pn_design_to_runtime",
+                    ] + utils.get_model_constructor(open("integration/code/pn_design_to_runtime.mvc", "r").read()) + [
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets_Runtime",
+                    "",
+                    "PetriNets",
+                    "",
+                    "pn_runtime_to_design",
+                    ] + utils.get_model_constructor(open("integration/code/pn_runtime_to_design.mvc", "r").read()) + [
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets_Runtime",
+                    "",
+                    "PetriNets_Runtime",
+                    "",
+                    "pn_step",
+                    ] + utils.get_model_constructor(open("integration/code/pn_simulate.mvc", "r").read()) + [
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets",
+                    "",
+                    "",
+                    "pn_print",
+                    ] + utils.get_model_constructor(open("integration/code/pn_print.mvc", "r").read()) + [
+                "model_list",
+                "transformation_list",
+                "transformation_execute",
+                "pn_print",
+                "my_pn",
+                "transformation_execute",
+                "pn_design_to_runtime",
+                "my_pn",
+                "my_pn_runtime",
+                "transformation_execute",
+                "pn_step",
+                "my_pn_runtime",
+                "my_pn_runtime",
+                "transformation_execute",
+                "pn_runtime_to_design",
+                "my_pn_runtime",
+                "my_pn",
+                "transformation_execute",
+                "pn_print",
+                "my_pn",
+            ],
+            [   # bootup phase
+                "Desired username for admin user?",
+                "Desired password for admin user?",
+                "Please repeat the password",
+                "Passwords match!",
+                "Welcome to the Model Management Interface v2.0!",
+                "Use the 'help' command for a list of possible commands",
+                "Ready for command...",
+                # model_add
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                # model_add
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                # model_add
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                # model_list
+                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
+                     "  CoreFormalism : SimpleClassDiagrams",
+                     "  PetriNets : SimpleClassDiagrams",
+                     "  my_pn : PetriNets",
+                     "  PetriNets_Runtime : SimpleClassDiagrams",
+                     "  core : CoreFormalism"]),
+                "Ready for command...",
+                # transformation_add_MT_language
+                "Formalisms to include (terminate with empty string)?",
+                "Name of the RAMified transformation metamodel?",
+                "Ready for command...",
+                # model_list
+                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
+                     "  CoreFormalism : SimpleClassDiagrams",
+                     "  PetriNets_Runtime : SimpleClassDiagrams",
+                     "  PetriNets : SimpleClassDiagrams",
+                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
+                     "  PetriNets_RAM : SimpleClassDiagrams",
+                     "  my_pn : PetriNets",
+                     "  core : CoreFormalism"]),
+                "Ready for command...",
+                # model_modify
+                "Which model do you want to modify?",
+                "Model loaded, ready for commands!",
+                "Use 'help' command for a list of possible commands",
+                "Please give your command.",
+                # instantiate 1
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                # instantiate 2
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                # instantiate 3
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                # instantiate 4
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                "Ready for command...",
+                # transformation_RAMify
+                "Which metamodel do you want to RAMify?",
+                "Where do you want to store the RAMified metamodel?",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Model added as target",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Model added as target",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Model added as target",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # model_list
+                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
+                     "  CoreFormalism : SimpleClassDiagrams",
+                     "  PetriNets_Runtime : SimpleClassDiagrams",
+                     "  PetriNets : SimpleClassDiagrams",
+                     "  pn_print : PetriNets_RAM",
+                     "  pn_design_to_runtime : PetriNets_RAM",
+                     "  pn_runtime_to_design : PetriNets_RAM",
+                     "  pn_step : PetriNets_RAM",
+                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
+                     "  PetriNets_RAM : SimpleClassDiagrams",
+                     "  my_pn : PetriNets",
+                     "  core : CoreFormalism"]),
+                "Ready for command...",
+                # transformation_list
+                set(["[ModelTransformation] pn_print : PetriNets_RAM",
+                     "[ModelTransformation] pn_design_to_runtime : PetriNets_RAM",
+                     "[ModelTransformation] pn_runtime_to_design : PetriNets_RAM",
+                     "[ModelTransformation] pn_step : PetriNets_RAM"]),
+                "Ready for command...",
+                # transformation_execute (pn_print)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets",
+                set(['"p1" --> 1',
+                     '"p2" --> 2',
+                     '"p3" --> 3',
+                    ]),
+                "Transformation executed with result: True",
+                "Ready for command...",
+                # transformation_execute (pn_design_to_runtime)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets",
+                "Which model to create for target element PetriNets_Runtime",
+                "Transformation executed with result: True",
+                "Ready for command...",
+                # transformation_execute (pn_step)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets_Runtime",
+                "Which model to create for target element PetriNets_Runtime",
+                "Transformation executed with result: True",
+                "Ready for command...",
+                # transformation_execute (pn_runtime_to_design)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets_Runtime",
+                "Which model to create for target element PetriNets",
+                "Transformation executed with result: True",
+                "Ready for command...",
+                # transformation_execute (pn_print)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets",
+                set(['"p1" --> 0',
+                     '"p2" --> 1',
+                     '"p3" --> 5',
+                    ]),
+                "Transformation executed with result: True",
+                "Ready for command...",
+            ],
+            optimization_level))
+
+
+utils.define_perf_tests(
+    TestMvCSimulate,
+    TestMvCSimulate.transform_add_MT_pn_simulate,
+    optimization_levels=[
+        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
+        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS,
+        utils.OPTIMIZATION_LEVEL_BASELINE_JIT,
+        utils.OPTIMIZATION_LEVEL_FAST_JIT
+    ])

+ 344 - 0
performance/test_mvc_simulate_larger.py

@@ -0,0 +1,344 @@
+import unittest
+import utils
+
+all_files = [
+    "core/mini_modify.alc",
+    "core/core_formalism.mvc",
+    "core/core_algorithm.alc",
+    "primitives.alc",
+    "object_operations.alc",
+    "conformance_scd.alc",
+    "library.alc",
+    "transform.alc",
+    "model_management.alc",
+    "ramify.alc",
+    "metamodels.alc",
+    "random.alc",
+    "constructors.alc",
+    "modelling.alc",
+    "compilation_manager.alc",
+]
+
+
+class TestMvCSimulateLarger(unittest.TestCase):
+    def transform_add_MT_pn_simulate_larger(self, optimization_level):
+        def step_and_print():
+            return [
+                # transformation_execute (pn_step)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets_Runtime",
+                "Which model to create for target element PetriNets_Runtime",
+                "Transformation executed with result: True",
+                "Ready for command...",
+                # transformation_execute (pn_runtime_to_design)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets_Runtime",
+                "Which model to create for target element PetriNets",
+                "Transformation executed with result: True",
+                "Ready for command...",
+                # transformation_execute (pn_print)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets",
+                None,
+                None,
+                None,
+                "Transformation executed with result: True",
+                "Ready for command...",
+            ]
+
+        utils.write_total_runtime_to_file(
+            'mvc_large', optimization_level,
+            utils.run_correctness_test(all_files,
+            ["root", "root", "root",
+                "model_add",
+                    "SimpleClassDiagrams",
+                    "PetriNets",
+                    ] + utils.get_model_constructor(open("integration/code/pn_design.mvc", "r").read()) + [
+                "model_add",
+                    "SimpleClassDiagrams",
+                    "PetriNets_Runtime",
+                    ] + utils.get_model_constructor(open("integration/code/pn_runtime.mvc", "r").read()) + [
+                "model_add",
+                    "PetriNets",
+                    "my_pn",
+                    ] + utils.get_model_constructor(open("integration/code/pn_design_model_larger.mvc", "r").read()) + [
+                "model_list",
+                "transformation_add_MT_language",
+                    "PetriNets_Runtime",
+                    "PetriNets",
+                    "",
+                    "PetriNets_RAM",
+                "model_list",
+                "model_modify",
+                    "__merged_PetriNets_RAM",
+                        "instantiate",
+                            "Association",
+                            "D2R_PlaceLink",
+                            "PetriNets/Place",
+                            "PetriNets_Runtime/Place",
+                        "instantiate",
+                            "Association",
+                            "D2R_TransitionLink",
+                            "PetriNets/Transition",
+                            "PetriNets_Runtime/Transition",
+                        "instantiate",
+                            "Association",
+                            "R2D_PlaceLink",
+                            "PetriNets_Runtime/Place",
+                            "PetriNets/Place",
+                        "instantiate",
+                            "Association",
+                            "R2D_TransitionLink",
+                            "PetriNets_Runtime/Transition",
+                            "PetriNets/Transition",
+                        "exit",
+                "transformation_RAMify",
+                    "__merged_PetriNets_RAM",
+                    "PetriNets_RAM",
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets",
+                    "",
+                    "PetriNets_Runtime",
+                    "",
+                    "pn_design_to_runtime",
+                    ] + utils.get_model_constructor(open("integration/code/pn_design_to_runtime.mvc", "r").read()) + [
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets_Runtime",
+                    "",
+                    "PetriNets",
+                    "",
+                    "pn_runtime_to_design",
+                    ] + utils.get_model_constructor(open("integration/code/pn_runtime_to_design.mvc", "r").read()) + [
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets_Runtime",
+                    "",
+                    "PetriNets_Runtime",
+                    "",
+                    "pn_step",
+                    ] + utils.get_model_constructor(open("integration/code/pn_simulate.mvc", "r").read()) + [
+                "transformation_add_MT",
+                    "PetriNets_RAM",
+                    "PetriNets",
+                    "",
+                    "",
+                    "pn_print",
+                    ] + utils.get_model_constructor(open("integration/code/pn_print.mvc", "r").read()) + [
+                "model_list",
+                "transformation_list",
+                "transformation_execute",
+                "pn_print",
+                "my_pn",
+                "transformation_execute",
+                "pn_design_to_runtime",
+                "my_pn",
+                "my_pn_runtime", ] + [
+                    "transformation_execute",
+                    "pn_step",
+                    "my_pn_runtime",
+                    "my_pn_runtime",
+                    "transformation_execute",
+                    "pn_runtime_to_design",
+                    "my_pn_runtime",
+                    "my_pn",
+                    "transformation_execute",
+                    "pn_print",
+                    "my_pn",
+                    ] * 10 + [
+            ],
+            [   # bootup phase
+                "Desired username for admin user?",
+                "Desired password for admin user?",
+                "Please repeat the password",
+                "Passwords match!",
+                "Welcome to the Model Management Interface v2.0!",
+                "Use the 'help' command for a list of possible commands",
+                "Ready for command...",
+                # model_add
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                # model_add
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                # model_add
+                "Creating new model!",
+                "Model type?",
+                "Model name?",
+                "Waiting for model constructors...",
+                "Model upload success!",
+                "Ready for command...",
+                # model_list
+                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
+                     "  CoreFormalism : SimpleClassDiagrams",
+                     "  PetriNets : SimpleClassDiagrams",
+                     "  my_pn : PetriNets",
+                     "  PetriNets_Runtime : SimpleClassDiagrams",
+                     "  core : CoreFormalism"]),
+                "Ready for command...",
+                # transformation_add_MT_language
+                "Formalisms to include (terminate with empty string)?",
+                "Name of the RAMified transformation metamodel?",
+                "Ready for command...",
+                # model_list
+                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
+                     "  CoreFormalism : SimpleClassDiagrams",
+                     "  PetriNets_Runtime : SimpleClassDiagrams",
+                     "  PetriNets : SimpleClassDiagrams",
+                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
+                     "  PetriNets_RAM : SimpleClassDiagrams",
+                     "  my_pn : PetriNets",
+                     "  core : CoreFormalism"]),
+                "Ready for command...",
+                # model_modify
+                "Which model do you want to modify?",
+                "Model loaded, ready for commands!",
+                "Use 'help' command for a list of possible commands",
+                "Please give your command.",
+                # instantiate 1
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                # instantiate 2
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                # instantiate 3
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                # instantiate 4
+                "Type to instantiate?",
+                "Name of new element?",
+                "Source name?",
+                "Destination name?",
+                "Instantiation successful!",
+                "Please give your command.",
+                "Ready for command...",
+                # transformation_RAMify
+                "Which metamodel do you want to RAMify?",
+                "Where do you want to store the RAMified metamodel?",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Model added as target",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Model added as target",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Model added as target",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # transformation_add_MT
+                "RAMified metamodel to use?",
+                "Supported metamodels:",
+                set(["  PetriNets",
+                     "  PetriNets_Runtime",
+                    ]),
+                "",
+                "Which ones do you want to use as source (empty string to finish)?",
+                "Model added as source",
+                "Which ones do you want to use as target (empty string to finish)?",
+                "Name of new transformation?",
+                "Waiting for model constructors...",
+                "Ready for command...",
+                # model_list
+                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
+                     "  CoreFormalism : SimpleClassDiagrams",
+                     "  PetriNets_Runtime : SimpleClassDiagrams",
+                     "  PetriNets : SimpleClassDiagrams",
+                     "  pn_print : PetriNets_RAM",
+                     "  pn_design_to_runtime : PetriNets_RAM",
+                     "  pn_runtime_to_design : PetriNets_RAM",
+                     "  pn_step : PetriNets_RAM",
+                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
+                     "  PetriNets_RAM : SimpleClassDiagrams",
+                     "  my_pn : PetriNets",
+                     "  core : CoreFormalism"]),
+                "Ready for command...",
+                # transformation_list
+                set(["[ModelTransformation] pn_print : PetriNets_RAM",
+                     "[ModelTransformation] pn_design_to_runtime : PetriNets_RAM",
+                     "[ModelTransformation] pn_runtime_to_design : PetriNets_RAM",
+                     "[ModelTransformation] pn_step : PetriNets_RAM"]),
+                "Ready for command...",
+                # transformation_execute (pn_print)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets",
+                set(['"lock_available" --> 1',
+                     '"critical_section_1" --> 0',
+                     '"critical_section_2" --> 0',
+                    ]),
+                "Transformation executed with result: True",
+                "Ready for command...",
+                # transformation_execute (pn_design_to_runtime)
+                "Which transformation do you want to execute?",
+                "Which model to bind for source element PetriNets",
+                "Which model to create for target element PetriNets_Runtime",
+                "Transformation executed with result: True",
+                "Ready for command...",
+                ] + \
+                    step_and_print() * 10 +
+                [],
+            optimization_level))
+
+
+utils.define_perf_tests(
+    TestMvCSimulateLarger,
+    TestMvCSimulateLarger.transform_add_MT_pn_simulate_larger,
+    optimization_levels=[
+        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
+        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS,
+        utils.OPTIMIZATION_LEVEL_FAST_JIT,
+        utils.OPTIMIZATION_LEVEL_BASELINE_JIT
+    ])

+ 169 - 53
performance/utils.py

@@ -12,6 +12,8 @@ import signal
 import random
 import random
 import operator
 import operator
 
 
+from collections import defaultdict
+
 sys.path.append("interface/HUTN")
 sys.path.append("interface/HUTN")
 sys.path.append("scripts")
 sys.path.append("scripts")
 from hutn_compiler.compiler import main as do_compile
 from hutn_compiler.compiler import main as do_compile
@@ -27,21 +29,27 @@ PORTS = set()
 
 
 OPTIMIZATION_LEVEL_LEGACY_INTERPRETER = "legacy-interpreter"
 OPTIMIZATION_LEVEL_LEGACY_INTERPRETER = "legacy-interpreter"
 OPTIMIZATION_LEVEL_INTERPRETER = "interpreter"
 OPTIMIZATION_LEVEL_INTERPRETER = "interpreter"
+OPTIMIZATION_LEVEL_BYTECODE_INTERPRETER = "bytecode-interpreter"
 OPTIMIZATION_LEVEL_BASELINE_JIT = "baseline-jit"
 OPTIMIZATION_LEVEL_BASELINE_JIT = "baseline-jit"
 OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS = "baseline-jit,no-thunks"
 OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS = "baseline-jit,no-thunks"
 OPTIMIZATION_LEVEL_FAST_JIT = "fast-jit"
 OPTIMIZATION_LEVEL_FAST_JIT = "fast-jit"
+OPTIMIZATION_LEVEL_FAST_JIT_NO_NOPS = "fast-jit,no-insert-nops"
 OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS = "adaptive-jit-favor-large-functions"
 OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS = "adaptive-jit-favor-large-functions"
 OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS = "adaptive-jit-favor-small-functions"
 OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS = "adaptive-jit-favor-small-functions"
 OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS = "adaptive-jit-favor-loops"
 OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS = "adaptive-jit-favor-loops"
+OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS = "adaptive-jit-favor-small-loops"
 ALL_OPTIMIZATION_LEVELS = [
 ALL_OPTIMIZATION_LEVELS = [
     OPTIMIZATION_LEVEL_LEGACY_INTERPRETER,
     OPTIMIZATION_LEVEL_LEGACY_INTERPRETER,
     OPTIMIZATION_LEVEL_INTERPRETER,
     OPTIMIZATION_LEVEL_INTERPRETER,
+    OPTIMIZATION_LEVEL_BYTECODE_INTERPRETER,
     OPTIMIZATION_LEVEL_BASELINE_JIT,
     OPTIMIZATION_LEVEL_BASELINE_JIT,
     OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS,
     OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS,
     OPTIMIZATION_LEVEL_FAST_JIT,
     OPTIMIZATION_LEVEL_FAST_JIT,
+    OPTIMIZATION_LEVEL_FAST_JIT_NO_NOPS,
     OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS,
     OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS,
     OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS,
     OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS,
-    OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS
+    OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
+    OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS
 ]
 ]
 
 
 class ModelverseTerminated(Exception):
 class ModelverseTerminated(Exception):
@@ -133,6 +141,49 @@ def compile_file(address, mod_filename, filename, mode, proc):
         except UnboundLocalError:
         except UnboundLocalError:
             pass
             pass
 
 
+def compile_files(address, process, files, mode):
+    """Compiles the given files in the given mode."""
+    threads = []
+    mod_files = []
+    for filename in files:
+        if os.path.isfile(filename):
+            mod_filename = filename
+        elif os.path.isfile("%s/%s" % (get_code_folder_name(), filename)):
+            mod_filename = "%s/%s" % (get_code_folder_name(), filename)
+        elif os.path.isfile("%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)):
+            mod_filename = "%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)
+        else:
+            raise Exception("File not found: %s" % filename)
+        mod_files.append(mod_filename)
+
+    to_compile = to_recompile(address, mod_files)
+
+    for mod_filename in to_compile:
+        if mod_filename.endswith(".mvc"):
+            model_mode = "MO"
+            mod_files.remove(mod_filename)
+        else:
+            model_mode = mode
+        if PARALLEL_PUSH:
+            import threading
+            threads.append(
+                threading.Thread(
+                    target=compile_file,
+                    args=[address, mod_filename, mod_filename, model_mode, process]))
+            threads[-1].start()
+        else:
+            compile_file(address, mod_filename, mod_filename, model_mode, process)
+
+    if PARALLEL_PUSH:
+        for t in threads:
+            t.join()
+
+    if mode[-1] == "O":
+        # Fire up the linker
+        val = execute("link_and_load", [address, USERNAME] + mod_files, wait=True)
+        if val != 0:
+            raise Exception("Linking error")
+
 def run_file(files, parameters, mode, handle_output, optimization_level=None):
 def run_file(files, parameters, mode, handle_output, optimization_level=None):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
     """Compiles the given sequence of files, feeds them the given input in the given mode,
        and handles their output."""
        and handles their output."""
@@ -149,39 +200,8 @@ def run_file(files, parameters, mode, handle_output, optimization_level=None):
             modelverse_args.append('--kernel=%s' % optimization_level)
             modelverse_args.append('--kernel=%s' % optimization_level)
         proc = execute("run_local_modelverse", modelverse_args, wait=False)
         proc = execute("run_local_modelverse", modelverse_args, wait=False)
 
 
-        threads = []
-        mod_files = []
-        for filename in files:
-            if os.path.isfile("%s/%s" % (get_code_folder_name(), filename)):
-                mod_filename = "%s/%s" % (get_code_folder_name(), filename)
-            elif os.path.isfile("%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)):
-                mod_filename = "%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)
-            else:
-                raise Exception("File not found: %s" % filename)
-            mod_files.append(mod_filename)
-
-        to_compile = to_recompile(address, mod_files)
-
-        for mod_filename in to_compile:
-            if PARALLEL_PUSH:
-                import threading
-                threads.append(
-                    threading.Thread(
-                        target=compile_file,
-                        args=[address, mod_filename, mod_filename, mode, proc]))
-                threads[-1].start()
-            else:
-                compile_file(address, mod_filename, mod_filename, mode, proc)
-
-        if PARALLEL_PUSH:
-            for t in threads:
-                t.join()
-
-        if mode[-1] == "O":
-            # Fire up the linker
-            val = execute("link_and_load", [address, USERNAME] + mod_files, wait=True)
-            if val != 0:
-                raise Exception("Linking error")
+        # Compile, push and link the source code files.
+        compile_files(address, proc, files, mode)
 
 
         # Send the request ...
         # Send the request ...
         set_input_data(address, parameters)
         set_input_data(address, parameters)
@@ -232,8 +252,8 @@ def run_file_fixed_output_count(files, parameters, mode, output_count, optimizat
     results = []
     results = []
     def handle_output(output):
     def handle_output(output):
         """Appends the given output to the list of results."""
         """Appends the given output to the list of results."""
+        results.append(output)
         if len(results) < output_count:
         if len(results) < output_count:
-            results.append(output)
             return True
             return True
         else:
         else:
             return False
             return False
@@ -246,17 +266,72 @@ def run_file_single_output(files, parameters, mode, optimization_level=None):
        and then collects and returns a single output."""
        and then collects and returns a single output."""
     return run_file_fixed_output_count(files, parameters, mode, 1, optimization_level)[0]
     return run_file_fixed_output_count(files, parameters, mode, 1, optimization_level)[0]
 
 
+def mean(values):
+    """Computes the arithmetic mean of the given values."""
+    return float(sum(values)) / max(len(values), 1)
+
 def run_perf_test(files, parameters, optimization_level, n_iterations=1):
 def run_perf_test(files, parameters, optimization_level, n_iterations=1):
     """Compiles the given sequence of files, feeds them the given input in the given mode,
     """Compiles the given sequence of files, feeds them the given input in the given mode,
        and then collects their output. This process is repeated n_iterations times. The
        and then collects their output. This process is repeated n_iterations times. The
-       return value is the average of all outputs."""
-    result = 0.0
+       return value is the average of all outputs, along with the mean total run-time."""
+    test_runtimes = []
+    total_runtimes = []
     for _ in xrange(n_iterations):
     for _ in xrange(n_iterations):
-        result += float(
-            run_file_single_output(
-                files, parameters + [0], 'CO',
-                optimization_level)) / float(n_iterations)
-    return result
+        start_time = time.time()
+        test_time = run_file_single_output(
+            files, parameters, 'CO',
+            optimization_level)
+        end_time = time.time()
+        total_time = end_time - start_time
+        test_runtimes.append(test_time)
+        total_runtimes.append(total_time)
+    return mean(test_runtimes), mean(total_runtimes)
+
+def get_expectation_checks(expected_values):
+    """Converts the given sequence of expected values to a sequence of functions which tell
+       if an input is allowed. Every function is accompanied by an expected value."""
+    def get_single_expectation_checks(expectation):
+        """Gets an expectation checker for a single expected value."""
+        if isinstance(expectation, set):
+            # We expect to receive a number of outputs equal to the size of the set, but their
+            # order does not matter.
+            for _ in xrange(len(expectation)):
+                yield lambda val: val in expectation
+        elif expectation is None:
+            # Skip output value
+            yield lambda _: True
+        else:
+            yield lambda val: val == expectation
+
+    for expectation in expected_values:
+        for checker in get_single_expectation_checks(expectation):
+            yield checker, expectation
+
+def run_correctness_test(files, parameters, expected, optimization_level):
+    """Compiles the given sequence of files, feeds them the given input in the given mode,
+       and then compares the output with the expected output. The return value is the total
+       run-time of the test."""
+    checks = iter(list(get_expectation_checks(expected)))
+    next_check = [next(checks)]
+    def handle_output(output):
+        """Checks the given output against the expected output."""
+        check, expectation = next_check[0]
+        print("Got %s, expect %s" % (output, expectation))
+        assert check(output)
+
+        try:
+            next_check[0] = next(checks)
+            return True
+        except StopIteration:
+            return False
+
+    start_time = time.time()
+    try:
+        run_file(files, parameters, 'CO', handle_output, optimization_level)
+    except ModelverseTerminated:
+        return
+    end_time = time.time()
+    return end_time - start_time
 
 
 def format_output(output):
 def format_output(output):
     """Formats the output of `run_file_to_completion` as a string."""
     """Formats the output of `run_file_to_completion` as a string."""
@@ -270,25 +345,66 @@ def define_perf_test(target_class, test_function, optimization_level):
         'test_%s' % optimization_level.replace('-', '_').lower(),
         'test_%s' % optimization_level.replace('-', '_').lower(),
         lambda self: test_function(self, optimization_level))
         lambda self: test_function(self, optimization_level))
 
 
-def define_perf_tests(target_class, test_function):
+def define_perf_tests(target_class, test_function, optimization_levels=None):
     """Defines performance tests in the given class. Each test calls the given function."""
     """Defines performance tests in the given class. Each test calls the given function."""
-    for optimization_level in ALL_OPTIMIZATION_LEVELS:
-        define_perf_test(target_class, test_function, optimization_level)
+    if optimization_levels is None:
+        optimization_levels = ALL_OPTIMIZATION_LEVELS
+    for opt_level in optimization_levels:
+        define_perf_test(target_class, test_function, opt_level)
+
+def get_model_constructor(code):
+    # First change multiple spaces to a tab
+    code_fragments = code.split("\n")
+    code_fragments = [i for i in code_fragments if i.strip() != ""]
+    code_fragments = [i.replace("    ", "\t") for i in code_fragments]
+    initial_tabs = min([len(i) - len(i.lstrip("\t")) for i in code_fragments])
+    code_fragments = [i[initial_tabs:] for i in code_fragments]
+    code = "\n".join(code_fragments)
+
+    with open("__model.mvc", "w") as f:
+        f.write(code)
+        f.flush()
+
+    constructors = do_compile("__model.mvc", "interface/HUTN/grammars/modelling.g", "M") + ["exit"]
+
+    return constructors
 
 
 DEFAULT_PERF_FILE_NAME = 'perf_data.txt'
 DEFAULT_PERF_FILE_NAME = 'perf_data.txt'
 
 
-def write_perf_to_file(test_name, optimization_level, result, file_name=DEFAULT_PERF_FILE_NAME):
+TOTAL_TIME_QUANTITY = 'total-runtime'
+TEST_TIME_QUANTITY = 'test-runtime'
+
+def write_perf_entry_to_stream(
+        test_name, optimization_level, quantity,
+        result, output_stream):
+    """Writes a performance measurement entry to the given stream."""
+    output_stream.write('%s:%s:%s:%f\n' % (test_name, optimization_level, quantity, result))
+
+def write_perf_to_file(
+        test_name, optimization_level, runtimes, file_name=DEFAULT_PERF_FILE_NAME):
     """Writes performance data to a file."""
     """Writes performance data to a file."""
+    test_runtime, total_runtime = runtimes
+    with open(file_name, "a") as perf_file:
+        write_perf_entry_to_stream(
+            test_name, optimization_level, TEST_TIME_QUANTITY, test_runtime, perf_file)
+        write_perf_entry_to_stream(
+            test_name, optimization_level, TOTAL_TIME_QUANTITY, total_runtime, perf_file)
+
+def write_total_runtime_to_file(
+        test_name, optimization_level, total_runtime, file_name=DEFAULT_PERF_FILE_NAME):
+    """Writes a total runtime entry to a file."""
     with open(file_name, "a") as perf_file:
     with open(file_name, "a") as perf_file:
-        perf_file.write('%s:%s:%f\n' % (test_name, optimization_level, result))
+        write_perf_entry_to_stream(
+            test_name, optimization_level, TOTAL_TIME_QUANTITY, total_runtime, perf_file)
 
 
 def parse_perf_data(file_name):
 def parse_perf_data(file_name):
     """Parses the performance data in the given file."""
     """Parses the performance data in the given file."""
-    results = {}
+    results = defaultdict(lambda: defaultdict(list))
     with open(file_name, 'r') as perf_file:
     with open(file_name, 'r') as perf_file:
         for line in perf_file.readlines():
         for line in perf_file.readlines():
-            test_name, optimization_level, result = line.strip().split(':')
-            if optimization_level not in results:
-                results[optimization_level] = []
-            results[optimization_level].append((test_name, result))
-    return sorted(results.items(), key=operator.itemgetter(1))
+            test_name, optimization_level, quantity, result = line.strip().split(':')
+            results[quantity][optimization_level].append((test_name, float(result)))
+    return {
+        quantity: sorted(result_dict.items(), key=operator.itemgetter(0))
+        for quantity, result_dict in results.items()
+    }