Browse Source

Remove deprecated code + do away with the __constraint and __model: .code and .model now, so they are hidden mostly

Yentl Van Tendeloo 8 years ago
parent
commit
51a3c0869d

+ 0 - 36
integration/utils.py

@@ -79,39 +79,3 @@ def start_mvc():
     address = "http://127.0.0.1:%s" % port
     proc = execute("run_local_modelverse", [str(port)], wait=False)
     return proc, address
-
-def get_constructor(code):
-    code_fragments = code.split("\n")
-    code_fragments = [i for i in code_fragments if i.strip() != ""]
-    code_fragments = [i.replace("    ", "\t") for i in code_fragments]
-    initial_tabs = min([len(i) - len(i.lstrip("\t")) for i in code_fragments])
-    code_fragments = [i[initial_tabs:] for i in code_fragments]
-    code_fragments.append("")
-    code = "\n".join(code_fragments)
-
-    with open("__constraint.alc", "w") as f:
-        f.write(code)
-        f.flush()
-
-    constructors = do_compile("__constraint.alc", "interface/HUTN/grammars/actionlanguage.g", "CS")
-
-    return constructors
-
-def get_model_constructor(code):
-    # First change multiple spaces to a tab
-    code_fragments = code.split("\n")
-    code_fragments = [i for i in code_fragments if i.strip() != ""]
-    code_fragments = [i.replace("    ", "\t") for i in code_fragments]
-    initial_tabs = min([len(i) - len(i.lstrip("\t")) for i in code_fragments])
-    code_fragments = [i[initial_tabs:] for i in code_fragments]
-    code_fragments.append("")
-    code = "\n".join(code_fragments)
-
-    with open("__model.mvc", "w") as f:
-        f.write(code)
-        f.flush()
-
-    return get_model_constructor_2("__model.mvc")
-
-def get_model_constructor_2(f):
-    return do_compile(f, "interface/HUTN/grammars/modelling.g", "M") + ["exit"]

+ 0 - 47
performance/code/dict_iterate.alc

@@ -1,47 +0,0 @@
-include "primitives.alh"
-
-Void function log_dict_keys(dict : Element, n : Integer):
-	Element keys
-	Element k
-	Integer i
-
-	i = 0
-	keys = dict_keys(dict)
-	while (read_nr_out(keys) > 0):
-		k = set_pop(keys)
-		if (i == n):
-			log(k)
-			i = 0
-
-		i = i + 1
-
-	return!
-
-Element function create_dict(n : Integer):
-	Integer i
-	Element dict
-
-	i = 0
-	dict = create_node()
-
-	while (i < n):
-		dict_add(dict, i, 0)
-		i = i + 1
-
-	return dict!
-
-Void function test_main():
-	Integer size
-	Integer log_skip
-	Integer trip_count
-	Integer i
-
-	size = input()
-	log_skip = input()
-	trip_count = input()
-	i = 0
-	while (i < trip_count):
-		log_dict_keys(create_dict(size), log_skip)
-		i = i + 1
-
-	return!

+ 0 - 11
performance/code/fibonacci.alc

@@ -1,11 +0,0 @@
-include "primitives.alh"
-
-Integer function fib(param : Integer):
-	if (param <= 2):
-		return 1!
-	else:
-		return fib(param - 1) + fib(param - 2)!
-
-Void function test_main():
-	fib(input())
-	return!

+ 0 - 86
performance/code/matrix.alc

@@ -1,86 +0,0 @@
-include "primitives.alh"
-include "random.alh"
-
-Float function v2f(i : Element):
-	return cast_s2f(cast_v2s(i))!
-
-Element function create_random_matrix(n : Integer):
-	Element m
-	Integer i
-	Integer j
-	Element t
-
-	// Construct the matrix first, with as many rows as there are variables
-	// Number of columns is 1 higher
-	i = 0
-	m = create_node()
-	while (i < n):
-		j = 0
-		t = create_node()
-		while (j < (n + 1)):
-			list_append(t, random())
-			j = j + 1
-		list_append(m, t)
-		i = i + 1
-
-	return m!
-
-Void function eliminateGaussJordan(m : Element):
-	Integer i
-	Integer j
-	Integer f
-	Integer g
-	Boolean searching
-	Element t
-	Float divisor
-
-	i = 0
-	j = 0
-
-	while (i < read_nr_out(m)):
-		// Make sure pivot m[i][j] != 0, swapping if necessary
-		while (v2f(m[i][j]) == 0.0):
-			// Is zero, so find row which is not zero
-			f = i + 1
-			searching = True
-			while (searching):
-				if (f >= read_nr_out(m)):
-					// No longer any rows left, so just increase column counter
-					searching = False
-					j = j + 1
-				else:
-					if (v2f(m[f][j]) == 0.0):
-						// Also zero, so continue
-						f = f + 1
-					else:
-						// Found non-zero, so swap row
-						t = v2f(m[f])
-						dict_overwrite(m, f, v2f(m[i]))
-						dict_overwrite(m, i, t)
-						searching = False
-			// If we have increased j, we will just start the loop again (possibly), as m[i][j] might be zero again
-
-		// Pivot in m[i][j] guaranteed to not be 0
-		// Now divide complete row by value of m[i][j] to make it equal 1
-		f = j
-		divisor = v2f(m[i][j])
-		while (f < read_nr_out(m[i])):
-			dict_overwrite(m[i], f, float_division(v2f(m[i][f]), divisor))
-			f = f + 1
-
-		// Eliminate all rows in the j-th column, except the i-th row
-		f = 0
-		while (f < read_nr_out(m)):
-			if (bool_not(f == i)):
-				g = j
-				divisor = v2f(m[f][j])
-				while (g < read_nr_out(m[f])):
-					dict_overwrite(m[f], g, v2f(m[f][g]) - (divisor * v2f(m[i][g])))
-					g = g + 1
-			f = f + 1
-
-		// Increase row and column
-		i = i + 1
-		j = j + 1
-
-	return !

+ 0 - 7
performance/code/matrix_create.alc

@@ -1,7 +0,0 @@
-include "primitives.alh"
-
-Element function create_random_matrix(n : Integer)
-
-Void function test_main():
-	create_random_matrix(input())
-	return!

+ 0 - 10
performance/code/matrix_gauss_jordan.alc

@@ -1,10 +0,0 @@
-include "primitives.alh"
-
-Element function create_random_matrix(n : Integer)
-Void function eliminateGaussJordan(m : Element)
-
-Void function test_main():
-	Element m
-	m = create_random_matrix(input())
-	eliminateGaussJordan(m)
-	return!

+ 0 - 17
performance/code/test_harness.alc

@@ -1,17 +0,0 @@
-include "primitives.alh"
-include "jit.alh"
-
-Void function test_main()
-
-Void function main():
-	Integer start_time
-	Integer end_time
-	start_time = time()
-	test_main()
-	end_time = time()
-	output(end_time - start_time)
-	
-	while (True):
-		output(input())
-	
-	return!

+ 0 - 233
performance/perf2tex.py

@@ -1,233 +0,0 @@
-"""Converts performance data files (as produced by utils.py) to LaTeX charts."""
-
-import argparse
-import colorsys
-import utils
-
-# Generated LaTeX is based on the accepted answer to
-# http://tex.stackexchange.com/questions/101320/grouped-bar-chart
-
-# pylint: disable=I0011,W0141
-
-COLOR_SCHEME_MIN_COLOR = (36, 255, 106)
-COLOR_SCHEME_MAX_COLOR = (216, 33, 0)
-
-LATEX_HEADER = r"""\documentclass[12pt,a4paper,onecolumn,openright]{report}
-\usepackage[landscape]{geometry}
-\usepackage{xcolor}
-\usepackage{pgfplots}
-\usepackage{tikz}
-\usepgfplotslibrary{units}
-
-% Define bar chart colors
-%"""
-
-LATEX_DOCUMENT_HEADER = r"""\begin{document}
-\begin{tikzpicture}"""
-
-LATEX_DOCUMENT_FOOTER = r"""\end{tikzpicture}
-\end{document}"""
-
-def encode_latex_string(value):
-    """Encodes the given string as a LaTeX string."""
-    # I guess this is good enough for now. This may need to be
-    # revisited if we encounter more complicated names.
-    return '{%s}' % value.replace('_', '\\_')
-
-def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
-    """Assembles a LaTeX chart from the given components."""
-    lines = []
-    lines.append(LATEX_HEADER)
-    for color_name, (red, green, blue) in color_defs:
-        lines.append(r'\definecolor{%s}{HTML}{%02X%02X%02X}' % (color_name, red, green, blue))
-    lines.append(LATEX_DOCUMENT_HEADER)
-    lines.append(r"""
-    \begin{axis}[
-        width = 0.85*\textwidth,
-        height = 8cm,
-        major x tick style = transparent,
-        ybar=2*\pgflinewidth,
-        bar width=14pt,
-        ymajorgrids = true,
-        ylabel = {Run time},
-        symbolic x coords={%s},
-        xtick = data,
-        scaled y ticks = false,
-        enlarge x limits=0.25,
-        ymin=0,
-        y unit=s,
-        legend cell align=left,
-        legend style={
-                at={(1,1.05)},
-                anchor=south east,
-                column sep=1ex
-        }
-    ]""" % ','.join(map(encode_latex_string, test_names)))
-    for color_name, points in data:
-        lines.append(r"""
-        \addplot[style={%s,fill=%s,mark=none}]
-            coordinates {%s};""" % (
-                color_name, color_name,
-                ' '.join([('(%s,%s)' % (encode_latex_string(name), measurement))
-                          for name, measurement in points])))
-    lines.append(r"""
-        \legend{%s}""" % ','.join(map(encode_latex_string, optimization_levels)))
-    lines.append(r"""
-    \end{axis}""")
-    lines.append(LATEX_DOCUMENT_FOOTER)
-    return '\n'.join(lines)
-
-def create_latex_chart(perf_data):
-    """Creates a LaTeX chart for the given performance data."""
-    sorted_opt_levels = sort_by_runtime(perf_data)
-    color_scheme = generate_color_scheme(sorted_opt_levels)
-    opt_levels = []
-    color_defs = []
-    test_names = []
-    data = []
-    for i, optimization_level in enumerate(sorted_opt_levels):
-        measurements = perf_data[optimization_level]
-        color = color_scheme[optimization_level]
-        color_name = 'chartColor%d' % i
-        opt_levels.append(optimization_level)
-        color_defs.append((color_name, color))
-        data.append((color_name, measurements.items()))
-        for name, _ in measurements.items():
-            if name not in test_names:
-                test_names.append(name)
-
-    return assemble_latex_chart(opt_levels, color_defs, test_names, data)
-
-def get_mean_runtimes(perf_data):
-    """Computes the mean run-time of every optimization level in the given
-       performance data."""
-    return {
-        opt_level: utils.mean(perf_data[opt_level].values())
-        for opt_level in perf_data.keys()
-    }
-
-def get_baseline_optimization_level(perf_data):
-    """Gets a baseline optimization level from the given performance data.
-       This baseline optimization level is guaranteed to be for every test case.
-       If no baseline optimization level can be found, then None is returned."""
-    # First find the name of all test cases.
-    all_test_names = set()
-    for optimization_level, measurements in perf_data.items():
-        all_test_names.update(measurements.keys())
-
-    # Filter optimization levels which are used for every test case.
-    candidate_opt_levels = []
-    for optimization_level, measurements in perf_data.items():
-        if len(all_test_names) == len(measurements):
-            candidate_opt_levels.append(optimization_level)
-
-    if len(candidate_opt_levels) == 0:
-        # Looks like there is no baseline optimization level.
-        return None
-
-    # Pick the optimization level with the lowest total run-time as the baseline.
-    return min(candidate_opt_levels, key=lambda opt_level: sum(perf_data[opt_level].values()))
-
-def get_relative_measurements(perf_data, baseline_optimization_level):
-    """Computes a map of measurements that are relative to the given optimization level."""
-    results = {}
-    for optimization_level, measurements in perf_data.items():
-        results[optimization_level] = {}
-        for test_name, data_point in measurements.items():
-            results[optimization_level][test_name] = (
-                data_point / perf_data[baseline_optimization_level][test_name])
-
-    return results
-
-def perf_list_to_dict(perf_list):
-    """Converts performance data from a list representation to a dictionary representation."""
-    return {opt_level: dict(tests) for opt_level, tests in perf_list}
-
-def perf_dict_to_list(perf_dict):
-    """Converts performance data from a dictionary representation to a list representation."""
-    return [(opt_level, tests.items()) for opt_level, tests in perf_dict.items()]
-
-def interpolate(value_range, index, length):
-    """Uses an index and a length to interpolate in the given range."""
-    min_val, max_val = value_range
-    if length == 1:
-        return max_val
-    else:
-        return min_val + float(index) * (max_val - min_val) / float(length - 1)
-
-def sort_by_runtime(perf_data):
-    """Sorts the optimization levels by mean relative runtimes."""
-    baseline_opt_level = get_baseline_optimization_level(perf_data)
-    relative_perf = get_relative_measurements(perf_data, baseline_opt_level)
-    # Sort the optimization levels by their mean runtimes.
-    mean_runtimes = get_mean_runtimes(relative_perf)
-    return list(sorted(mean_runtimes.keys(), key=lambda opt_level: mean_runtimes[opt_level]))
-
-def generate_color_scheme(sorted_opt_levels):
-    """Assigns a color to every optimization level in the given performance data."""
-    # Assign colors to the optimization levels.
-    color_scheme = {}
-    min_hue, min_sat, min_val = colorsys.rgb_to_hsv(
-        *[c / float(255) for c in COLOR_SCHEME_MIN_COLOR])
-    max_hue, max_sat, max_val = colorsys.rgb_to_hsv(
-        *[c / float(255) for c in COLOR_SCHEME_MAX_COLOR])
-    for i, opt_level in enumerate(sorted_opt_levels):
-        hue = interpolate((min_hue, max_hue), i, len(sorted_opt_levels))
-        sat = interpolate((min_sat, max_sat), i, len(sorted_opt_levels))
-        val = interpolate((min_val, max_val), i, len(sorted_opt_levels))
-        color = [component * 255 for component in colorsys.hsv_to_rgb(hue, sat, val)]
-        color_scheme[opt_level] = color
-
-    return color_scheme
-
-def main():
-    arg_parser = argparse.ArgumentParser()
-    arg_parser.add_argument('input', help='The performance data file.')
-    arg_parser.add_argument(
-        '-q', '--quantity', type=str,
-        help="The quantity to build a bar chart for. Defaults to '%s'" % utils.TOTAL_TIME_QUANTITY,
-        default=utils.TOTAL_TIME_QUANTITY)
-    arg_parser.add_argument(
-        '-O', '--opt', type=str, nargs='*',
-        help="Filters on optimization levels.")
-    arg_parser.add_argument(
-        '-t', '--test', type=str, nargs='*',
-        help="Filters on tests.")
-    arg_parser.add_argument(
-        '-r', '--relative', action='store_const', const=True,
-        help="Produce bars that are relative to some baseline.", default=False)
-
-    args = arg_parser.parse_args()
-
-    perf_data = utils.parse_perf_data(args.input)[args.quantity]
-
-    if args.opt:
-        optimization_set = set(args.opt)
-        perf_data = [
-            (optimization_level, measurements)
-            for optimization_level, measurements in perf_data
-            if optimization_level in optimization_set]
-
-    if args.test:
-        test_set = set(args.test)
-        new_perf_data = []
-        for optimization_level, measurements in perf_data:
-            new_measurements = []
-            for test_name, data_point in measurements:
-                if test_name in test_set:
-                    new_measurements.append((test_name, data_point))
-
-            if len(new_measurements) > 0:
-                new_perf_data.append((optimization_level, new_measurements))
-        perf_data = new_perf_data
-
-    perf_data_dict = perf_list_to_dict(perf_data)
-
-    if args.relative:
-        baseline_opt_level = get_baseline_optimization_level(perf_data_dict)
-        perf_data_dict = get_relative_measurements(perf_data_dict, baseline_opt_level)
-
-    print(create_latex_chart(perf_data_dict))
-
-if __name__ == '__main__':
-    main()

+ 0 - 13
performance/test_dict_iterate.py

@@ -1,13 +0,0 @@
-import unittest
-import utils
-
-class TestDictIterate(unittest.TestCase):
-    def dict_iterate(self, optimization_level):
-        utils.write_perf_to_file(
-            'dict_iterate', optimization_level,
-            utils.run_perf_test(
-                ["test_harness.alc", "dict_iterate.alc", "primitives.alc", "jit.alc"],
-                [50, 10, 100],
-                optimization_level))
-
-utils.define_perf_tests(TestDictIterate, TestDictIterate.dict_iterate)

+ 0 - 13
performance/test_fibonacci.py

@@ -1,13 +0,0 @@
-import unittest
-import utils
-
-class TestFibonacci(unittest.TestCase):
-    def fibonacci(self, optimization_level):
-        utils.write_perf_to_file(
-            'fibonacci', optimization_level,
-            utils.run_perf_test(
-                ["test_harness.alc", "fibonacci.alc", "primitives.alc", "jit.alc"],
-                [20],
-                optimization_level))
-
-utils.define_perf_tests(TestFibonacci, TestFibonacci.fibonacci)

+ 0 - 16
performance/test_matrix_create.py

@@ -1,16 +0,0 @@
-import unittest
-import utils
-
-
-class TestMatrixCreate(unittest.TestCase):
-    def create_matrix(self, optimization_level):
-        utils.write_perf_to_file(
-            'matrix_create', optimization_level,
-            utils.run_perf_test(
-                ["test_harness.alc", "matrix.alc",
-                 "matrix_create.alc", "primitives.alc",
-                 "random.alc", "jit.alc"],
-                [100],
-                optimization_level))
-
-utils.define_perf_tests(TestMatrixCreate, TestMatrixCreate.create_matrix)

+ 0 - 16
performance/test_matrix_gauss_jordan.py

@@ -1,16 +0,0 @@
-import unittest
-import utils
-
-
-class TestMatrixGaussJordan(unittest.TestCase):
-    def matrix_gauss_jordan(self, optimization_level):
-        utils.write_perf_to_file(
-            'matrix_gauss_jordan', optimization_level,
-            utils.run_perf_test(
-                ["test_harness.alc", "matrix.alc",
-                 "matrix_gauss_jordan.alc", "primitives.alc",
-                 "random.alc", "jit.alc"],
-                [25],
-                optimization_level))
-
-utils.define_perf_tests(TestMatrixGaussJordan, TestMatrixGaussJordan.matrix_gauss_jordan)

+ 0 - 119
performance/test_mvc_model_overwrite.py

@@ -1,119 +0,0 @@
-import unittest
-import utils
-
-all_files = [
-    "core/mini_modify.alc",
-    "core/core_formalism.mvc",
-    "core/core_algorithm.alc",
-    "primitives.alc",
-    "object_operations.alc",
-    "conformance_scd.alc",
-    "library.alc",
-    "transform.alc",
-    "model_management.alc",
-    "ramify.alc",
-    "metamodels.alc",
-    "random.alc",
-    "constructors.alc",
-    "modelling.alc",
-    "compilation_manager.alc",
-]
-
-
-class TestMvCModelOverwrite(unittest.TestCase):
-    def model_overwrite(self, optimization_level):
-        utils.write_total_runtime_to_file(
-            'mvc_model_overwrite', optimization_level,
-            utils.run_correctness_test(all_files, [
-                "root",
-                "root",
-                "root",
-                "model_add",
-                "SimpleClassDiagrams",
-                "Empty",
-                "exit",
-                "model_list_full",
-                "model_modify",
-                "Empty",
-                "instantiate",
-                "Class",
-                "A",
-                "exit",
-                "model_list_full",
-                "model_overwrite",
-                "Empty",
-                "instantiate_node",
-                "",
-                "Class",
-                "B",
-                "instantiate_node",
-                "",
-                "Class",
-                "C",
-                "exit",
-                "model_list_full",
-                "model_modify",
-                "Empty",
-                "list",
-                "exit",
-            ], [
-                "Desired username for admin user?",
-                "Desired password for admin user?",
-                "Please repeat the password",
-                "Passwords match!",
-                "Welcome to the Model Management Interface v2.0!",
-                "Use the 'help' command for a list of possible commands",
-                "Ready for command...",
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                set([
-                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
-                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
-                    "  200  root nobody   Empty : SimpleClassDiagrams",
-                    "  200  root admin   core : CoreFormalism"
-                ]),
-                "Ready for command...",
-                "Which model do you want to modify?",
-                "Model loaded, ready for commands!",
-                "Use 'help' command for a list of possible commands",
-                "Please give your command.",
-                "Type to instantiate?",
-                "Name of new element?",
-                "Instantiation successful!",
-                "Please give your command.",
-                "Ready for command...",
-                set([
-                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
-                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
-                    "  200  root nobody   Empty : SimpleClassDiagrams",
-                    "  200  root admin   core : CoreFormalism"
-                ]),
-                "Ready for command...",
-                "Which model to overwrite?",
-                "Waiting for model constructors...",
-                "Model overwrite success!",
-                "Ready for command...",
-                set([
-                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
-                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
-                    "  200  root nobody   Empty : SimpleClassDiagrams",
-                    "  200  root admin   core : CoreFormalism"
-                ]),
-                "Ready for command...",
-                "Which model do you want to modify?",
-                "Model loaded, ready for commands!",
-                "Use 'help' command for a list of possible commands",
-                "Please give your command.",
-                "List of all elements:",
-                set(["  B : Class", "  C : Class"]),
-                "Please give your command.",
-                "Ready for command...",
-            ], optimization_level))
-
-
-utils.define_perf_tests(TestMvCModelOverwrite,
-                        TestMvCModelOverwrite.model_overwrite)

+ 0 - 105
performance/test_mvc_print_upload.py

@@ -1,105 +0,0 @@
-import unittest
-import utils
-
-all_files = [
-    "core/mini_modify.alc",
-    "core/core_formalism.mvc",
-    "core/core_algorithm.alc",
-    "primitives.alc",
-    "object_operations.alc",
-    "conformance_scd.alc",
-    "library.alc",
-    "transform.alc",
-    "model_management.alc",
-    "ramify.alc",
-    "metamodels.alc",
-    "random.alc",
-    "constructors.alc",
-    "modelling.alc",
-    "compilation_manager.alc",
-]
-
-
-class TestMvCPrintUpload(unittest.TestCase):
-    def print_upload(self, optimization_level):
-        utils.write_total_runtime_to_file(
-            'mvc_print_upload', optimization_level,
-            utils.run_correctness_test(all_files, [
-                "root",
-                "root",
-                "root",
-                "model_add",
-                "SimpleClassDiagrams",
-                "PetriNets",
-            ] + utils.get_model_constructor(
-                open("integration/code/pn_runtime.mvc", "r").read()
-            ) + [
-                "model_list_full",
-                "transformation_add_MT_language",
-                "PetriNets",
-                "",
-                "PetriNets_RAM",
-                "model_list_full",
-                "transformation_add_MT",
-                "PetriNets_RAM",
-                "PetriNets",
-                "",
-                "",
-                "PetriNets_Print",
-            ] + utils.get_model_constructor(
-                open("integration/code/pn_print.mvc", "r").read()
-            ) + [
-                "transformation_list_full",
-            ], [
-                "Desired username for admin user?",
-                "Desired password for admin user?",
-                "Please repeat the password",
-                "Passwords match!",
-                "Welcome to the Model Management Interface v2.0!",
-                "Use the 'help' command for a list of possible commands",
-                "Ready for command...",
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                set([
-                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
-                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
-                    "  200  root nobody   PetriNets : SimpleClassDiagrams",
-                    "  200  root admin   core : CoreFormalism"
-                ]),
-                "Ready for command...",
-                "Formalisms to include (terminate with empty string)?",
-                "Name of the RAMified transformation metamodel?",
-                "Ready for command...",
-                set([
-                    "  221  root admin   SimpleClassDiagrams : SimpleClassDiagrams",
-                    "  221  root admin   CoreFormalism : SimpleClassDiagrams",
-                    "  200  root nobody   PetriNets : SimpleClassDiagrams",
-                    "  200  root nobody   __merged_PetriNets_RAM : SimpleClassDiagrams",
-                    "  200  root nobody   PetriNets_RAM : SimpleClassDiagrams",
-                    "  200  root admin   core : CoreFormalism"
-                ]),
-                "Ready for command...",
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set([
-                    "  PetriNets",
-                ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                set([
-                    "  200  root nobody   [ModelTransformation] PetriNets_Print : PetriNets_RAM"
-                ]),
-                "Ready for command...",
-            ], optimization_level))
-
-
-utils.define_perf_tests(TestMvCPrintUpload, TestMvCPrintUpload.print_upload)

+ 0 - 338
performance/test_mvc_simulate.py

@@ -1,338 +0,0 @@
-import unittest
-import utils
-
-all_files = [
-    "core/mini_modify.alc",
-    "core/core_formalism.mvc",
-    "core/core_algorithm.alc",
-    "primitives.alc",
-    "object_operations.alc",
-    "conformance_scd.alc",
-    "library.alc",
-    "transform.alc",
-    "model_management.alc",
-    "ramify.alc",
-    "metamodels.alc",
-    "random.alc",
-    "constructors.alc",
-    "modelling.alc",
-    "compilation_manager.alc",
-]
-
-
-class TestMvCSimulate(unittest.TestCase):
-    def transform_add_MT_pn_simulate(self, optimization_level):
-        utils.write_total_runtime_to_file(
-            'mvc_simulate', optimization_level,
-            utils.run_correctness_test(all_files,
-            [ "root", "root", "root",
-                "model_add",
-                    "SimpleClassDiagrams",
-                    "PetriNets",
-                    ] + utils.get_model_constructor(open("integration/code/pn_design.mvc", "r").read()) + [
-                "model_add",
-                    "SimpleClassDiagrams",
-                    "PetriNets_Runtime",
-                    ] + utils.get_model_constructor(open("integration/code/pn_runtime.mvc", "r").read()) + [
-                "model_add",
-                    "PetriNets",
-                    "my_pn",
-                    ] + utils.get_model_constructor(open("integration/code/pn_design_model.mvc", "r").read()) + [
-                "model_list",
-                "transformation_add_MT_language",
-                    "PetriNets_Runtime",
-                    "PetriNets",
-                    "",
-                    "PetriNets_RAM",
-                "model_list",
-                "model_modify",
-                    "__merged_PetriNets_RAM",
-                        "instantiate",
-                            "Association",
-                            "D2R_PlaceLink",
-                            "PetriNets/Place",
-                            "PetriNets_Runtime/Place",
-                        "instantiate",
-                            "Association",
-                            "D2R_TransitionLink",
-                            "PetriNets/Transition",
-                            "PetriNets_Runtime/Transition",
-                        "instantiate",
-                            "Association",
-                            "R2D_PlaceLink",
-                            "PetriNets_Runtime/Place",
-                            "PetriNets/Place",
-                        "instantiate",
-                            "Association",
-                            "R2D_TransitionLink",
-                            "PetriNets_Runtime/Transition",
-                            "PetriNets/Transition",
-                        "exit",
-                "transformation_RAMify",
-                    "__merged_PetriNets_RAM",
-                    "PetriNets_RAM",
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets",
-                    "",
-                    "PetriNets_Runtime",
-                    "",
-                    "pn_design_to_runtime",
-                    ] + utils.get_model_constructor(open("integration/code/pn_design_to_runtime.mvc", "r").read()) + [
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets_Runtime",
-                    "",
-                    "PetriNets",
-                    "",
-                    "pn_runtime_to_design",
-                    ] + utils.get_model_constructor(open("integration/code/pn_runtime_to_design.mvc", "r").read()) + [
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets_Runtime",
-                    "",
-                    "PetriNets_Runtime",
-                    "",
-                    "pn_step",
-                    ] + utils.get_model_constructor(open("integration/code/pn_simulate.mvc", "r").read()) + [
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets",
-                    "",
-                    "",
-                    "pn_print",
-                    ] + utils.get_model_constructor(open("integration/code/pn_print.mvc", "r").read()) + [
-                "model_list",
-                "transformation_list",
-                "transformation_execute",
-                "pn_print",
-                "my_pn",
-                "transformation_execute",
-                "pn_design_to_runtime",
-                "my_pn",
-                "my_pn_runtime",
-                "transformation_execute",
-                "pn_step",
-                "my_pn_runtime",
-                "my_pn_runtime",
-                "transformation_execute",
-                "pn_runtime_to_design",
-                "my_pn_runtime",
-                "my_pn",
-                "transformation_execute",
-                "pn_print",
-                "my_pn",
-            ],
-            [   # bootup phase
-                "Desired username for admin user?",
-                "Desired password for admin user?",
-                "Please repeat the password",
-                "Passwords match!",
-                "Welcome to the Model Management Interface v2.0!",
-                "Use the 'help' command for a list of possible commands",
-                "Ready for command...",
-                # model_add
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                # model_add
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                # model_add
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                # model_list
-                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
-                     "  CoreFormalism : SimpleClassDiagrams",
-                     "  PetriNets : SimpleClassDiagrams",
-                     "  my_pn : PetriNets",
-                     "  PetriNets_Runtime : SimpleClassDiagrams",
-                     "  core : CoreFormalism"]),
-                "Ready for command...",
-                # transformation_add_MT_language
-                "Formalisms to include (terminate with empty string)?",
-                "Name of the RAMified transformation metamodel?",
-                "Ready for command...",
-                # model_list
-                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
-                     "  CoreFormalism : SimpleClassDiagrams",
-                     "  PetriNets_Runtime : SimpleClassDiagrams",
-                     "  PetriNets : SimpleClassDiagrams",
-                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
-                     "  PetriNets_RAM : SimpleClassDiagrams",
-                     "  my_pn : PetriNets",
-                     "  core : CoreFormalism"]),
-                "Ready for command...",
-                # model_modify
-                "Which model do you want to modify?",
-                "Model loaded, ready for commands!",
-                "Use 'help' command for a list of possible commands",
-                "Please give your command.",
-                # instantiate 1
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                # instantiate 2
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                # instantiate 3
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                # instantiate 4
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                "Ready for command...",
-                # transformation_RAMify
-                "Which metamodel do you want to RAMify?",
-                "Where do you want to store the RAMified metamodel?",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Model added as target",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Model added as target",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Model added as target",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # model_list
-                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
-                     "  CoreFormalism : SimpleClassDiagrams",
-                     "  PetriNets_Runtime : SimpleClassDiagrams",
-                     "  PetriNets : SimpleClassDiagrams",
-                     "  pn_print : PetriNets_RAM",
-                     "  pn_design_to_runtime : PetriNets_RAM",
-                     "  pn_runtime_to_design : PetriNets_RAM",
-                     "  pn_step : PetriNets_RAM",
-                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
-                     "  PetriNets_RAM : SimpleClassDiagrams",
-                     "  my_pn : PetriNets",
-                     "  core : CoreFormalism"]),
-                "Ready for command...",
-                # transformation_list
-                set(["[ModelTransformation] pn_print : PetriNets_RAM",
-                     "[ModelTransformation] pn_design_to_runtime : PetriNets_RAM",
-                     "[ModelTransformation] pn_runtime_to_design : PetriNets_RAM",
-                     "[ModelTransformation] pn_step : PetriNets_RAM"]),
-                "Ready for command...",
-                # transformation_execute (pn_print)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets",
-                set(['"p1" --> 1',
-                     '"p2" --> 2',
-                     '"p3" --> 3',
-                    ]),
-                "Transformation executed with result: True",
-                "Ready for command...",
-                # transformation_execute (pn_design_to_runtime)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets",
-                "Which model to create for target element PetriNets_Runtime",
-                "Transformation executed with result: True",
-                "Ready for command...",
-                # transformation_execute (pn_step)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets_Runtime",
-                "Which model to create for target element PetriNets_Runtime",
-                "Transformation executed with result: True",
-                "Ready for command...",
-                # transformation_execute (pn_runtime_to_design)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets_Runtime",
-                "Which model to create for target element PetriNets",
-                "Transformation executed with result: True",
-                "Ready for command...",
-                # transformation_execute (pn_print)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets",
-                set(['"p1" --> 0',
-                     '"p2" --> 1',
-                     '"p3" --> 5',
-                    ]),
-                "Transformation executed with result: True",
-                "Ready for command...",
-            ],
-            optimization_level))
-
-
-utils.define_perf_tests(
-    TestMvCSimulate,
-    TestMvCSimulate.transform_add_MT_pn_simulate,
-    optimization_levels=[
-        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
-        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS,
-        utils.OPTIMIZATION_LEVEL_BASELINE_JIT,
-        utils.OPTIMIZATION_LEVEL_FAST_JIT
-    ])

+ 0 - 344
performance/test_mvc_simulate_larger.py

@@ -1,344 +0,0 @@
-import unittest
-import utils
-
-all_files = [
-    "core/mini_modify.alc",
-    "core/core_formalism.mvc",
-    "core/core_algorithm.alc",
-    "primitives.alc",
-    "object_operations.alc",
-    "conformance_scd.alc",
-    "library.alc",
-    "transform.alc",
-    "model_management.alc",
-    "ramify.alc",
-    "metamodels.alc",
-    "random.alc",
-    "constructors.alc",
-    "modelling.alc",
-    "compilation_manager.alc",
-]
-
-
-class TestMvCSimulateLarger(unittest.TestCase):
-    def transform_add_MT_pn_simulate_larger(self, optimization_level):
-        def step_and_print():
-            return [
-                # transformation_execute (pn_step)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets_Runtime",
-                "Which model to create for target element PetriNets_Runtime",
-                "Transformation executed with result: True",
-                "Ready for command...",
-                # transformation_execute (pn_runtime_to_design)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets_Runtime",
-                "Which model to create for target element PetriNets",
-                "Transformation executed with result: True",
-                "Ready for command...",
-                # transformation_execute (pn_print)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets",
-                None,
-                None,
-                None,
-                "Transformation executed with result: True",
-                "Ready for command...",
-            ]
-
-        utils.write_total_runtime_to_file(
-            'mvc_large', optimization_level,
-            utils.run_correctness_test(all_files,
-            ["root", "root", "root",
-                "model_add",
-                    "SimpleClassDiagrams",
-                    "PetriNets",
-                    ] + utils.get_model_constructor(open("integration/code/pn_design.mvc", "r").read()) + [
-                "model_add",
-                    "SimpleClassDiagrams",
-                    "PetriNets_Runtime",
-                    ] + utils.get_model_constructor(open("integration/code/pn_runtime.mvc", "r").read()) + [
-                "model_add",
-                    "PetriNets",
-                    "my_pn",
-                    ] + utils.get_model_constructor(open("integration/code/pn_design_model_larger.mvc", "r").read()) + [
-                "model_list",
-                "transformation_add_MT_language",
-                    "PetriNets_Runtime",
-                    "PetriNets",
-                    "",
-                    "PetriNets_RAM",
-                "model_list",
-                "model_modify",
-                    "__merged_PetriNets_RAM",
-                        "instantiate",
-                            "Association",
-                            "D2R_PlaceLink",
-                            "PetriNets/Place",
-                            "PetriNets_Runtime/Place",
-                        "instantiate",
-                            "Association",
-                            "D2R_TransitionLink",
-                            "PetriNets/Transition",
-                            "PetriNets_Runtime/Transition",
-                        "instantiate",
-                            "Association",
-                            "R2D_PlaceLink",
-                            "PetriNets_Runtime/Place",
-                            "PetriNets/Place",
-                        "instantiate",
-                            "Association",
-                            "R2D_TransitionLink",
-                            "PetriNets_Runtime/Transition",
-                            "PetriNets/Transition",
-                        "exit",
-                "transformation_RAMify",
-                    "__merged_PetriNets_RAM",
-                    "PetriNets_RAM",
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets",
-                    "",
-                    "PetriNets_Runtime",
-                    "",
-                    "pn_design_to_runtime",
-                    ] + utils.get_model_constructor(open("integration/code/pn_design_to_runtime.mvc", "r").read()) + [
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets_Runtime",
-                    "",
-                    "PetriNets",
-                    "",
-                    "pn_runtime_to_design",
-                    ] + utils.get_model_constructor(open("integration/code/pn_runtime_to_design.mvc", "r").read()) + [
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets_Runtime",
-                    "",
-                    "PetriNets_Runtime",
-                    "",
-                    "pn_step",
-                    ] + utils.get_model_constructor(open("integration/code/pn_simulate.mvc", "r").read()) + [
-                "transformation_add_MT",
-                    "PetriNets_RAM",
-                    "PetriNets",
-                    "",
-                    "",
-                    "pn_print",
-                    ] + utils.get_model_constructor(open("integration/code/pn_print.mvc", "r").read()) + [
-                "model_list",
-                "transformation_list",
-                "transformation_execute",
-                "pn_print",
-                "my_pn",
-                "transformation_execute",
-                "pn_design_to_runtime",
-                "my_pn",
-                "my_pn_runtime", ] + [
-                    "transformation_execute",
-                    "pn_step",
-                    "my_pn_runtime",
-                    "my_pn_runtime",
-                    "transformation_execute",
-                    "pn_runtime_to_design",
-                    "my_pn_runtime",
-                    "my_pn",
-                    "transformation_execute",
-                    "pn_print",
-                    "my_pn",
-                    ] * 10 + [
-            ],
-            [   # bootup phase
-                "Desired username for admin user?",
-                "Desired password for admin user?",
-                "Please repeat the password",
-                "Passwords match!",
-                "Welcome to the Model Management Interface v2.0!",
-                "Use the 'help' command for a list of possible commands",
-                "Ready for command...",
-                # model_add
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                # model_add
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                # model_add
-                "Creating new model!",
-                "Model type?",
-                "Model name?",
-                "Waiting for model constructors...",
-                "Model upload success!",
-                "Ready for command...",
-                # model_list
-                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
-                     "  CoreFormalism : SimpleClassDiagrams",
-                     "  PetriNets : SimpleClassDiagrams",
-                     "  my_pn : PetriNets",
-                     "  PetriNets_Runtime : SimpleClassDiagrams",
-                     "  core : CoreFormalism"]),
-                "Ready for command...",
-                # transformation_add_MT_language
-                "Formalisms to include (terminate with empty string)?",
-                "Name of the RAMified transformation metamodel?",
-                "Ready for command...",
-                # model_list
-                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
-                     "  CoreFormalism : SimpleClassDiagrams",
-                     "  PetriNets_Runtime : SimpleClassDiagrams",
-                     "  PetriNets : SimpleClassDiagrams",
-                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
-                     "  PetriNets_RAM : SimpleClassDiagrams",
-                     "  my_pn : PetriNets",
-                     "  core : CoreFormalism"]),
-                "Ready for command...",
-                # model_modify
-                "Which model do you want to modify?",
-                "Model loaded, ready for commands!",
-                "Use 'help' command for a list of possible commands",
-                "Please give your command.",
-                # instantiate 1
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                # instantiate 2
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                # instantiate 3
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                # instantiate 4
-                "Type to instantiate?",
-                "Name of new element?",
-                "Source name?",
-                "Destination name?",
-                "Instantiation successful!",
-                "Please give your command.",
-                "Ready for command...",
-                # transformation_RAMify
-                "Which metamodel do you want to RAMify?",
-                "Where do you want to store the RAMified metamodel?",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Model added as target",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Model added as target",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Model added as target",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # transformation_add_MT
-                "RAMified metamodel to use?",
-                "Supported metamodels:",
-                set(["  PetriNets",
-                     "  PetriNets_Runtime",
-                    ]),
-                "",
-                "Which ones do you want to use as source (empty string to finish)?",
-                "Model added as source",
-                "Which ones do you want to use as target (empty string to finish)?",
-                "Name of new transformation?",
-                "Waiting for model constructors...",
-                "Ready for command...",
-                # model_list
-                set(["  SimpleClassDiagrams : SimpleClassDiagrams",
-                     "  CoreFormalism : SimpleClassDiagrams",
-                     "  PetriNets_Runtime : SimpleClassDiagrams",
-                     "  PetriNets : SimpleClassDiagrams",
-                     "  pn_print : PetriNets_RAM",
-                     "  pn_design_to_runtime : PetriNets_RAM",
-                     "  pn_runtime_to_design : PetriNets_RAM",
-                     "  pn_step : PetriNets_RAM",
-                     "  __merged_PetriNets_RAM : SimpleClassDiagrams",
-                     "  PetriNets_RAM : SimpleClassDiagrams",
-                     "  my_pn : PetriNets",
-                     "  core : CoreFormalism"]),
-                "Ready for command...",
-                # transformation_list
-                set(["[ModelTransformation] pn_print : PetriNets_RAM",
-                     "[ModelTransformation] pn_design_to_runtime : PetriNets_RAM",
-                     "[ModelTransformation] pn_runtime_to_design : PetriNets_RAM",
-                     "[ModelTransformation] pn_step : PetriNets_RAM"]),
-                "Ready for command...",
-                # transformation_execute (pn_print)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets",
-                set(['"lock_available" --> 1',
-                     '"critical_section_1" --> 0',
-                     '"critical_section_2" --> 0',
-                    ]),
-                "Transformation executed with result: True",
-                "Ready for command...",
-                # transformation_execute (pn_design_to_runtime)
-                "Which transformation do you want to execute?",
-                "Which model to bind for source element PetriNets",
-                "Which model to create for target element PetriNets_Runtime",
-                "Transformation executed with result: True",
-                "Ready for command...",
-                ] + \
-                    step_and_print() * 10 +
-                [],
-            optimization_level))
-
-
-utils.define_perf_tests(
-    TestMvCSimulateLarger,
-    TestMvCSimulateLarger.transform_add_MT_pn_simulate_larger,
-    optimization_levels=[
-        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
-        utils.OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS,
-        utils.OPTIMIZATION_LEVEL_FAST_JIT,
-        utils.OPTIMIZATION_LEVEL_BASELINE_JIT
-    ])

+ 0 - 410
performance/utils.py

@@ -1,410 +0,0 @@
-import unittest
-import sys
-import os
-
-import sys
-import time
-import json
-import urllib
-import urllib2
-import subprocess
-import signal
-import random
-import operator
-
-from collections import defaultdict
-
-sys.path.append("interface/HUTN")
-sys.path.append("scripts")
-from hutn_compiler.compiler import main as do_compile
-from check_objects import to_recompile
-
-USERNAME = "test_task"
-PARALLEL_PUSH = True
-
-BOOTSTRAP_FOLDER_NAME = "bootstrap"
-CURRENT_FOLDER_NAME = "performance"
-
-PORTS = set()
-
-OPTIMIZATION_LEVEL_LEGACY_INTERPRETER = "legacy-interpreter"
-OPTIMIZATION_LEVEL_INTERPRETER = "interpreter"
-OPTIMIZATION_LEVEL_BYTECODE_INTERPRETER = "bytecode-interpreter"
-OPTIMIZATION_LEVEL_BASELINE_JIT = "baseline-jit"
-OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS = "baseline-jit,no-thunks"
-OPTIMIZATION_LEVEL_FAST_JIT = "fast-jit"
-OPTIMIZATION_LEVEL_FAST_JIT_NO_NOPS = "fast-jit,no-insert-nops"
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS = "adaptive-jit-favor-large-functions"
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS = "adaptive-jit-favor-small-functions"
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS = "adaptive-jit-favor-loops"
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS = "adaptive-jit-favor-small-loops"
-ALL_OPTIMIZATION_LEVELS = [
-    OPTIMIZATION_LEVEL_LEGACY_INTERPRETER,
-    OPTIMIZATION_LEVEL_INTERPRETER,
-    OPTIMIZATION_LEVEL_BYTECODE_INTERPRETER,
-    OPTIMIZATION_LEVEL_BASELINE_JIT,
-    OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS,
-    OPTIMIZATION_LEVEL_FAST_JIT,
-    OPTIMIZATION_LEVEL_FAST_JIT_NO_NOPS,
-    OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS,
-    OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS,
-    OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
-    OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS
-]
-
-class ModelverseTerminated(Exception):
-    """An exception that tells the task that the Modelverse has terminated."""
-    pass
-
-def get_code_folder_name():
-    """Gets the name of the code folder."""
-    return '%s/code' % CURRENT_FOLDER_NAME
-
-def get_free_port():
-    """Gets a unique new port."""
-    while 1:
-        port = random.randint(10000, 20000)
-        # Check if this port is in the set of ports.
-        if port not in PORTS:
-            # We have found a unique port. Add it to the set and return.
-            PORTS.add(port)
-            return port
-
-def execute(scriptname, parameters=None, wait=False):
-    """Runs a script."""
-    if os.name not in ["nt", "posix"]:
-        # Stop now, as we would have no clue on how to kill its subtree
-        raise Exception("Unknown OS version: " + str(os.name))
-
-    command = [sys.executable, "scripts/%s.py" % scriptname] + (
-        [] if parameters is None else parameters)
-
-    if wait:
-        return subprocess.call(command, shell=False)
-    else:
-        return subprocess.Popen(command, shell=False)
-
-def kill(process):
-    """Kills the given process."""
-    if os.name == "nt":
-        subprocess.call(["taskkill", "/F", "/T", "/PID", "%i" % process.pid])
-    elif os.name == "posix":
-        subprocess.call(["pkill", "-P", "%i" % process.pid])
-
-def set_input_data(address, data):
-    """Sets the Modelverse program's input data."""
-    if data is not None:
-        urllib2.urlopen(
-            urllib2.Request(
-                address,
-                urllib.urlencode(
-                    {"op": "set_input", "data": json.dumps(data), "taskname": USERNAME})),
-            timeout=10).read()
-    else:
-        return []
-
-def compile_file(address, mod_filename, filename, mode, proc):
-    """Compiles the given file."""
-    # Load in the file required
-    try:
-        timeout_val = 240
-        taskname = str(random.random())
-        while 1:
-            proc2 = execute(
-                "compile", [address, mod_filename, taskname, filename, mode], wait=False)
-
-            if proc.returncode is not None:
-                # Modelverse has already terminated, which isn't a good sign!
-                raise Exception("Modelverse died!")
-
-            while proc2.returncode is None:
-                time.sleep(0.01)
-                proc2.poll()
-                timeout_val -= 0.01
-                if timeout_val < 0:
-                    kill(proc2)
-                    print("Compilation timeout expired!")
-                    return False
-
-            if proc2.returncode != 2:
-                break
-
-        # Make sure everything stopped correctly
-        assert proc2.returncode == 0
-        if proc2.returncode != 0:
-            return False
-    except:
-        raise
-    finally:
-        try:
-            kill(proc2)
-        except UnboundLocalError:
-            pass
-
-def compile_files(address, process, files, mode):
-    """Compiles the given files in the given mode."""
-    threads = []
-    mod_files = []
-    for filename in files:
-        if os.path.isfile(filename):
-            mod_filename = filename
-        elif os.path.isfile("%s/%s" % (get_code_folder_name(), filename)):
-            mod_filename = "%s/%s" % (get_code_folder_name(), filename)
-        elif os.path.isfile("%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)):
-            mod_filename = "%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)
-        else:
-            raise Exception("File not found: %s" % filename)
-        mod_files.append(mod_filename)
-
-    to_compile = to_recompile(address, mod_files)
-
-    for mod_filename in to_compile:
-        if mod_filename.endswith(".mvc"):
-            model_mode = "MO"
-            mod_files.remove(mod_filename)
-        else:
-            model_mode = mode
-        if PARALLEL_PUSH:
-            import threading
-            threads.append(
-                threading.Thread(
-                    target=compile_file,
-                    args=[address, mod_filename, mod_filename, model_mode, process]))
-            threads[-1].start()
-        else:
-            compile_file(address, mod_filename, mod_filename, model_mode, process)
-
-    if PARALLEL_PUSH:
-        for t in threads:
-            t.join()
-
-    if mode[-1] == "O":
-        # Fire up the linker
-        val = execute("link_and_load", [address, USERNAME] + mod_files, wait=True)
-        if val != 0:
-            raise Exception("Linking error")
-
-def run_file(files, parameters, mode, handle_output, optimization_level=None):
-    """Compiles the given sequence of files, feeds them the given input in the given mode,
-       and handles their output."""
-    # Resolve file
-    import os.path
-
-    time.sleep(0.01)
-    port = get_free_port()
-    address = "http://127.0.0.1:%i" % port
-    try:
-        # Run Modelverse server
-        modelverse_args = [str(port)]
-        if optimization_level is not None:
-            modelverse_args.append('--kernel=%s' % optimization_level)
-        proc = execute("run_local_modelverse", modelverse_args, wait=False)
-
-        # Compile, push and link the source code files.
-        compile_files(address, proc, files, mode)
-
-        # Send the request ...
-        set_input_data(address, parameters)
-
-        # ... and wait for replies
-        while 1:
-            val = urllib2.urlopen(
-                urllib2.Request(
-                    address,
-                    urllib.urlencode({"op": "get_output", "taskname": USERNAME})),
-                timeout=240).read()
-            val = json.loads(val)
-
-            if proc.returncode is not None:
-                # Modelverse has terminated. This may or may not be what we want.
-                raise ModelverseTerminated()
-
-            if not handle_output(val):
-                return
-
-        # All passed!
-        return
-    except:
-        raise
-    finally:
-        try:
-            kill(proc)
-        except UnboundLocalError:
-            pass
-
-def run_file_to_completion(files, parameters, mode):
-    """Compiles the given sequence of files, feeds them the given input in the given mode,
-       and then collects and returns output."""
-    results = []
-    def handle_output(output):
-        """Appends the given output to the list of results."""
-        results.append(output)
-        return True
-
-    try:
-        run_file(files, parameters, mode, handle_output)
-    except ModelverseTerminated:
-        return results
-
-def run_file_fixed_output_count(files, parameters, mode, output_count, optimization_level=None):
-    """Compiles the given sequence of files, feeds them the given input in the given mode,
-       and then collects and returns a fixed number of outputs."""
-    results = []
-    def handle_output(output):
-        """Appends the given output to the list of results."""
-        results.append(output)
-        if len(results) < output_count:
-            return True
-        else:
-            return False
-
-    run_file(files, parameters, mode, handle_output, optimization_level)
-    return results
-
-def run_file_single_output(files, parameters, mode, optimization_level=None):
-    """Compiles the given sequence of files, feeds them the given input in the given mode,
-       and then collects and returns a single output."""
-    return run_file_fixed_output_count(files, parameters, mode, 1, optimization_level)[0]
-
-def mean(values):
-    """Computes the arithmetic mean of the given values."""
-    return float(sum(values)) / max(len(values), 1)
-
-def run_perf_test(files, parameters, optimization_level, n_iterations=1):
-    """Compiles the given sequence of files, feeds them the given input in the given mode,
-       and then collects their output. This process is repeated n_iterations times. The
-       return value is the average of all outputs, along with the mean total run-time."""
-    test_runtimes = []
-    total_runtimes = []
-    for _ in xrange(n_iterations):
-        start_time = time.time()
-        test_time = run_file_single_output(
-            files, parameters, 'CO',
-            optimization_level)
-        end_time = time.time()
-        total_time = end_time - start_time
-        test_runtimes.append(test_time)
-        total_runtimes.append(total_time)
-    return mean(test_runtimes), mean(total_runtimes)
-
-def get_expectation_checks(expected_values):
-    """Converts the given sequence of expected values to a sequence of functions which tell
-       if an input is allowed. Every function is accompanied by an expected value."""
-    def get_single_expectation_checks(expectation):
-        """Gets an expectation checker for a single expected value."""
-        if isinstance(expectation, set):
-            # We expect to receive a number of outputs equal to the size of the set, but their
-            # order does not matter.
-            for _ in xrange(len(expectation)):
-                yield lambda val: val in expectation
-        elif expectation is None:
-            # Skip output value
-            yield lambda _: True
-        else:
-            yield lambda val: val == expectation
-
-    for expectation in expected_values:
-        for checker in get_single_expectation_checks(expectation):
-            yield checker, expectation
-
-def run_correctness_test(files, parameters, expected, optimization_level):
-    """Compiles the given sequence of files, feeds them the given input in the given mode,
-       and then compares the output with the expected output. The return value is the total
-       run-time of the test."""
-    checks = iter(list(get_expectation_checks(expected)))
-    next_check = [next(checks)]
-    def handle_output(output):
-        """Checks the given output against the expected output."""
-        check, expectation = next_check[0]
-        print("Got %s, expect %s" % (output, expectation))
-        assert check(output)
-
-        try:
-            next_check[0] = next(checks)
-            return True
-        except StopIteration:
-            return False
-
-    start_time = time.time()
-    try:
-        run_file(files, parameters, 'CO', handle_output, optimization_level)
-    except ModelverseTerminated:
-        return
-    end_time = time.time()
-    return end_time - start_time
-
-def format_output(output):
-    """Formats the output of `run_file_to_completion` as a string."""
-    return '\n'.join(output)
-
-def define_perf_test(target_class, test_function, optimization_level):
-    """Defines a performance test in the given class. The performance test calls the given function
-       at the given optimization level."""
-    setattr(
-        target_class,
-        'test_%s' % optimization_level.replace('-', '_').lower(),
-        lambda self: test_function(self, optimization_level))
-
-def define_perf_tests(target_class, test_function, optimization_levels=None):
-    """Defines performance tests in the given class. Each test calls the given function."""
-    if optimization_levels is None:
-        optimization_levels = ALL_OPTIMIZATION_LEVELS
-    for opt_level in optimization_levels:
-        define_perf_test(target_class, test_function, opt_level)
-
-def get_model_constructor(code):
-    # First change multiple spaces to a tab
-    code_fragments = code.split("\n")
-    code_fragments = [i for i in code_fragments if i.strip() != ""]
-    code_fragments = [i.replace("    ", "\t") for i in code_fragments]
-    initial_tabs = min([len(i) - len(i.lstrip("\t")) for i in code_fragments])
-    code_fragments = [i[initial_tabs:] for i in code_fragments]
-    code = "\n".join(code_fragments)
-
-    with open("__model.mvc", "w") as f:
-        f.write(code)
-        f.flush()
-
-    constructors = do_compile("__model.mvc", "interface/HUTN/grammars/modelling.g", "M") + ["exit"]
-
-    return constructors
-
-DEFAULT_PERF_FILE_NAME = 'perf_data.txt'
-
-TOTAL_TIME_QUANTITY = 'total-runtime'
-TEST_TIME_QUANTITY = 'test-runtime'
-
-def write_perf_entry_to_stream(
-        test_name, optimization_level, quantity,
-        result, output_stream):
-    """Writes a performance measurement entry to the given stream."""
-    output_stream.write('%s:%s:%s:%f\n' % (test_name, optimization_level, quantity, result))
-
-def write_perf_to_file(
-        test_name, optimization_level, runtimes, file_name=DEFAULT_PERF_FILE_NAME):
-    """Writes performance data to a file."""
-    test_runtime, total_runtime = runtimes
-    with open(file_name, "a") as perf_file:
-        write_perf_entry_to_stream(
-            test_name, optimization_level, TEST_TIME_QUANTITY, test_runtime, perf_file)
-        write_perf_entry_to_stream(
-            test_name, optimization_level, TOTAL_TIME_QUANTITY, total_runtime, perf_file)
-
-def write_total_runtime_to_file(
-        test_name, optimization_level, total_runtime, file_name=DEFAULT_PERF_FILE_NAME):
-    """Writes a total runtime entry to a file."""
-    with open(file_name, "a") as perf_file:
-        write_perf_entry_to_stream(
-            test_name, optimization_level, TOTAL_TIME_QUANTITY, total_runtime, perf_file)
-
-def parse_perf_data(file_name):
-    """Parses the performance data in the given file."""
-    results = defaultdict(lambda: defaultdict(list))
-    with open(file_name, 'r') as perf_file:
-        for line in perf_file.readlines():
-            test_name, optimization_level, quantity, result = line.strip().split(':')
-            results[quantity][optimization_level].append((test_name, float(result)))
-    return {
-        quantity: sorted(result_dict.items(), key=operator.itemgetter(0))
-        for quantity, result_dict in results.items()
-    }

+ 4 - 4
wrappers/modelverse.py

@@ -124,11 +124,11 @@ def _compile_AL(code):
     code_fragments.append("")
     code = "\n".join(code_fragments)
 
-    with open("__constraint.alc", "w") as f:
+    with open(".code.alc", "w") as f:
         f.write(code)
         f.flush()
 
-    return do_compile("__constraint.alc", COMPILER_PATH + "/grammars/actionlanguage.g", "CS")
+    return do_compile(".code.alc", COMPILER_PATH + "/grammars/actionlanguage.g", "CS")
 
 def _compile_model(code):
     # Compile a model and send the compiled graph
@@ -141,11 +141,11 @@ def _compile_model(code):
     code_fragments.append("")
     code = "\n".join(code_fragments)
 
-    with open("__model.mvc", "w") as f:
+    with open(".model.mvc", "w") as f:
         f.write(code)
         f.flush()
 
-    return do_compile("__model.mvc", COMPILER_PATH + "/grammars/modelling.g", "M") + ["exit"]
+    return do_compile(".model.mvc", COMPILER_PATH + "/grammars/modelling.g", "M") + ["exit"]
 
 def _output(expected=None):
     try: