|
@@ -1,410 +0,0 @@
|
|
|
-import unittest
|
|
|
-import sys
|
|
|
-import os
|
|
|
-
|
|
|
-import sys
|
|
|
-import time
|
|
|
-import json
|
|
|
-import urllib
|
|
|
-import urllib2
|
|
|
-import subprocess
|
|
|
-import signal
|
|
|
-import random
|
|
|
-import operator
|
|
|
-
|
|
|
-from collections import defaultdict
|
|
|
-
|
|
|
-sys.path.append("interface/HUTN")
|
|
|
-sys.path.append("scripts")
|
|
|
-from hutn_compiler.compiler import main as do_compile
|
|
|
-from check_objects import to_recompile
|
|
|
-
|
|
|
-USERNAME = "test_task"
|
|
|
-PARALLEL_PUSH = True
|
|
|
-
|
|
|
-BOOTSTRAP_FOLDER_NAME = "bootstrap"
|
|
|
-CURRENT_FOLDER_NAME = "performance"
|
|
|
-
|
|
|
-PORTS = set()
|
|
|
-
|
|
|
-OPTIMIZATION_LEVEL_LEGACY_INTERPRETER = "legacy-interpreter"
|
|
|
-OPTIMIZATION_LEVEL_INTERPRETER = "interpreter"
|
|
|
-OPTIMIZATION_LEVEL_BYTECODE_INTERPRETER = "bytecode-interpreter"
|
|
|
-OPTIMIZATION_LEVEL_BASELINE_JIT = "baseline-jit"
|
|
|
-OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS = "baseline-jit,no-thunks"
|
|
|
-OPTIMIZATION_LEVEL_FAST_JIT = "fast-jit"
|
|
|
-OPTIMIZATION_LEVEL_FAST_JIT_NO_NOPS = "fast-jit,no-insert-nops"
|
|
|
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS = "adaptive-jit-favor-large-functions"
|
|
|
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS = "adaptive-jit-favor-small-functions"
|
|
|
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS = "adaptive-jit-favor-loops"
|
|
|
-OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS = "adaptive-jit-favor-small-loops"
|
|
|
-ALL_OPTIMIZATION_LEVELS = [
|
|
|
- OPTIMIZATION_LEVEL_LEGACY_INTERPRETER,
|
|
|
- OPTIMIZATION_LEVEL_INTERPRETER,
|
|
|
- OPTIMIZATION_LEVEL_BYTECODE_INTERPRETER,
|
|
|
- OPTIMIZATION_LEVEL_BASELINE_JIT,
|
|
|
- OPTIMIZATION_LEVEL_BASELINE_JIT_NO_THUNKS,
|
|
|
- OPTIMIZATION_LEVEL_FAST_JIT,
|
|
|
- OPTIMIZATION_LEVEL_FAST_JIT_NO_NOPS,
|
|
|
- OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LARGE_FUNCTIONS,
|
|
|
- OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_FUNCTIONS,
|
|
|
- OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_LOOPS,
|
|
|
- OPTIMIZATION_LEVEL_ADAPTIVE_JIT_FAVOR_SMALL_LOOPS
|
|
|
-]
|
|
|
-
|
|
|
-class ModelverseTerminated(Exception):
|
|
|
- """An exception that tells the task that the Modelverse has terminated."""
|
|
|
- pass
|
|
|
-
|
|
|
-def get_code_folder_name():
|
|
|
- """Gets the name of the code folder."""
|
|
|
- return '%s/code' % CURRENT_FOLDER_NAME
|
|
|
-
|
|
|
-def get_free_port():
|
|
|
- """Gets a unique new port."""
|
|
|
- while 1:
|
|
|
- port = random.randint(10000, 20000)
|
|
|
- # Check if this port is in the set of ports.
|
|
|
- if port not in PORTS:
|
|
|
- # We have found a unique port. Add it to the set and return.
|
|
|
- PORTS.add(port)
|
|
|
- return port
|
|
|
-
|
|
|
-def execute(scriptname, parameters=None, wait=False):
|
|
|
- """Runs a script."""
|
|
|
- if os.name not in ["nt", "posix"]:
|
|
|
- # Stop now, as we would have no clue on how to kill its subtree
|
|
|
- raise Exception("Unknown OS version: " + str(os.name))
|
|
|
-
|
|
|
- command = [sys.executable, "scripts/%s.py" % scriptname] + (
|
|
|
- [] if parameters is None else parameters)
|
|
|
-
|
|
|
- if wait:
|
|
|
- return subprocess.call(command, shell=False)
|
|
|
- else:
|
|
|
- return subprocess.Popen(command, shell=False)
|
|
|
-
|
|
|
-def kill(process):
|
|
|
- """Kills the given process."""
|
|
|
- if os.name == "nt":
|
|
|
- subprocess.call(["taskkill", "/F", "/T", "/PID", "%i" % process.pid])
|
|
|
- elif os.name == "posix":
|
|
|
- subprocess.call(["pkill", "-P", "%i" % process.pid])
|
|
|
-
|
|
|
-def set_input_data(address, data):
|
|
|
- """Sets the Modelverse program's input data."""
|
|
|
- if data is not None:
|
|
|
- urllib2.urlopen(
|
|
|
- urllib2.Request(
|
|
|
- address,
|
|
|
- urllib.urlencode(
|
|
|
- {"op": "set_input", "data": json.dumps(data), "taskname": USERNAME})),
|
|
|
- timeout=10).read()
|
|
|
- else:
|
|
|
- return []
|
|
|
-
|
|
|
-def compile_file(address, mod_filename, filename, mode, proc):
|
|
|
- """Compiles the given file."""
|
|
|
- # Load in the file required
|
|
|
- try:
|
|
|
- timeout_val = 240
|
|
|
- taskname = str(random.random())
|
|
|
- while 1:
|
|
|
- proc2 = execute(
|
|
|
- "compile", [address, mod_filename, taskname, filename, mode], wait=False)
|
|
|
-
|
|
|
- if proc.returncode is not None:
|
|
|
- # Modelverse has already terminated, which isn't a good sign!
|
|
|
- raise Exception("Modelverse died!")
|
|
|
-
|
|
|
- while proc2.returncode is None:
|
|
|
- time.sleep(0.01)
|
|
|
- proc2.poll()
|
|
|
- timeout_val -= 0.01
|
|
|
- if timeout_val < 0:
|
|
|
- kill(proc2)
|
|
|
- print("Compilation timeout expired!")
|
|
|
- return False
|
|
|
-
|
|
|
- if proc2.returncode != 2:
|
|
|
- break
|
|
|
-
|
|
|
- # Make sure everything stopped correctly
|
|
|
- assert proc2.returncode == 0
|
|
|
- if proc2.returncode != 0:
|
|
|
- return False
|
|
|
- except:
|
|
|
- raise
|
|
|
- finally:
|
|
|
- try:
|
|
|
- kill(proc2)
|
|
|
- except UnboundLocalError:
|
|
|
- pass
|
|
|
-
|
|
|
-def compile_files(address, process, files, mode):
|
|
|
- """Compiles the given files in the given mode."""
|
|
|
- threads = []
|
|
|
- mod_files = []
|
|
|
- for filename in files:
|
|
|
- if os.path.isfile(filename):
|
|
|
- mod_filename = filename
|
|
|
- elif os.path.isfile("%s/%s" % (get_code_folder_name(), filename)):
|
|
|
- mod_filename = "%s/%s" % (get_code_folder_name(), filename)
|
|
|
- elif os.path.isfile("%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)):
|
|
|
- mod_filename = "%s/%s" % (BOOTSTRAP_FOLDER_NAME, filename)
|
|
|
- else:
|
|
|
- raise Exception("File not found: %s" % filename)
|
|
|
- mod_files.append(mod_filename)
|
|
|
-
|
|
|
- to_compile = to_recompile(address, mod_files)
|
|
|
-
|
|
|
- for mod_filename in to_compile:
|
|
|
- if mod_filename.endswith(".mvc"):
|
|
|
- model_mode = "MO"
|
|
|
- mod_files.remove(mod_filename)
|
|
|
- else:
|
|
|
- model_mode = mode
|
|
|
- if PARALLEL_PUSH:
|
|
|
- import threading
|
|
|
- threads.append(
|
|
|
- threading.Thread(
|
|
|
- target=compile_file,
|
|
|
- args=[address, mod_filename, mod_filename, model_mode, process]))
|
|
|
- threads[-1].start()
|
|
|
- else:
|
|
|
- compile_file(address, mod_filename, mod_filename, model_mode, process)
|
|
|
-
|
|
|
- if PARALLEL_PUSH:
|
|
|
- for t in threads:
|
|
|
- t.join()
|
|
|
-
|
|
|
- if mode[-1] == "O":
|
|
|
- # Fire up the linker
|
|
|
- val = execute("link_and_load", [address, USERNAME] + mod_files, wait=True)
|
|
|
- if val != 0:
|
|
|
- raise Exception("Linking error")
|
|
|
-
|
|
|
-def run_file(files, parameters, mode, handle_output, optimization_level=None):
|
|
|
- """Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
- and handles their output."""
|
|
|
- # Resolve file
|
|
|
- import os.path
|
|
|
-
|
|
|
- time.sleep(0.01)
|
|
|
- port = get_free_port()
|
|
|
- address = "http://127.0.0.1:%i" % port
|
|
|
- try:
|
|
|
- # Run Modelverse server
|
|
|
- modelverse_args = [str(port)]
|
|
|
- if optimization_level is not None:
|
|
|
- modelverse_args.append('--kernel=%s' % optimization_level)
|
|
|
- proc = execute("run_local_modelverse", modelverse_args, wait=False)
|
|
|
-
|
|
|
- # Compile, push and link the source code files.
|
|
|
- compile_files(address, proc, files, mode)
|
|
|
-
|
|
|
- # Send the request ...
|
|
|
- set_input_data(address, parameters)
|
|
|
-
|
|
|
- # ... and wait for replies
|
|
|
- while 1:
|
|
|
- val = urllib2.urlopen(
|
|
|
- urllib2.Request(
|
|
|
- address,
|
|
|
- urllib.urlencode({"op": "get_output", "taskname": USERNAME})),
|
|
|
- timeout=240).read()
|
|
|
- val = json.loads(val)
|
|
|
-
|
|
|
- if proc.returncode is not None:
|
|
|
- # Modelverse has terminated. This may or may not be what we want.
|
|
|
- raise ModelverseTerminated()
|
|
|
-
|
|
|
- if not handle_output(val):
|
|
|
- return
|
|
|
-
|
|
|
- # All passed!
|
|
|
- return
|
|
|
- except:
|
|
|
- raise
|
|
|
- finally:
|
|
|
- try:
|
|
|
- kill(proc)
|
|
|
- except UnboundLocalError:
|
|
|
- pass
|
|
|
-
|
|
|
-def run_file_to_completion(files, parameters, mode):
|
|
|
- """Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
- and then collects and returns output."""
|
|
|
- results = []
|
|
|
- def handle_output(output):
|
|
|
- """Appends the given output to the list of results."""
|
|
|
- results.append(output)
|
|
|
- return True
|
|
|
-
|
|
|
- try:
|
|
|
- run_file(files, parameters, mode, handle_output)
|
|
|
- except ModelverseTerminated:
|
|
|
- return results
|
|
|
-
|
|
|
-def run_file_fixed_output_count(files, parameters, mode, output_count, optimization_level=None):
|
|
|
- """Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
- and then collects and returns a fixed number of outputs."""
|
|
|
- results = []
|
|
|
- def handle_output(output):
|
|
|
- """Appends the given output to the list of results."""
|
|
|
- results.append(output)
|
|
|
- if len(results) < output_count:
|
|
|
- return True
|
|
|
- else:
|
|
|
- return False
|
|
|
-
|
|
|
- run_file(files, parameters, mode, handle_output, optimization_level)
|
|
|
- return results
|
|
|
-
|
|
|
-def run_file_single_output(files, parameters, mode, optimization_level=None):
|
|
|
- """Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
- and then collects and returns a single output."""
|
|
|
- return run_file_fixed_output_count(files, parameters, mode, 1, optimization_level)[0]
|
|
|
-
|
|
|
-def mean(values):
|
|
|
- """Computes the arithmetic mean of the given values."""
|
|
|
- return float(sum(values)) / max(len(values), 1)
|
|
|
-
|
|
|
-def run_perf_test(files, parameters, optimization_level, n_iterations=1):
|
|
|
- """Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
- and then collects their output. This process is repeated n_iterations times. The
|
|
|
- return value is the average of all outputs, along with the mean total run-time."""
|
|
|
- test_runtimes = []
|
|
|
- total_runtimes = []
|
|
|
- for _ in xrange(n_iterations):
|
|
|
- start_time = time.time()
|
|
|
- test_time = run_file_single_output(
|
|
|
- files, parameters, 'CO',
|
|
|
- optimization_level)
|
|
|
- end_time = time.time()
|
|
|
- total_time = end_time - start_time
|
|
|
- test_runtimes.append(test_time)
|
|
|
- total_runtimes.append(total_time)
|
|
|
- return mean(test_runtimes), mean(total_runtimes)
|
|
|
-
|
|
|
-def get_expectation_checks(expected_values):
|
|
|
- """Converts the given sequence of expected values to a sequence of functions which tell
|
|
|
- if an input is allowed. Every function is accompanied by an expected value."""
|
|
|
- def get_single_expectation_checks(expectation):
|
|
|
- """Gets an expectation checker for a single expected value."""
|
|
|
- if isinstance(expectation, set):
|
|
|
- # We expect to receive a number of outputs equal to the size of the set, but their
|
|
|
- # order does not matter.
|
|
|
- for _ in xrange(len(expectation)):
|
|
|
- yield lambda val: val in expectation
|
|
|
- elif expectation is None:
|
|
|
- # Skip output value
|
|
|
- yield lambda _: True
|
|
|
- else:
|
|
|
- yield lambda val: val == expectation
|
|
|
-
|
|
|
- for expectation in expected_values:
|
|
|
- for checker in get_single_expectation_checks(expectation):
|
|
|
- yield checker, expectation
|
|
|
-
|
|
|
-def run_correctness_test(files, parameters, expected, optimization_level):
|
|
|
- """Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
- and then compares the output with the expected output. The return value is the total
|
|
|
- run-time of the test."""
|
|
|
- checks = iter(list(get_expectation_checks(expected)))
|
|
|
- next_check = [next(checks)]
|
|
|
- def handle_output(output):
|
|
|
- """Checks the given output against the expected output."""
|
|
|
- check, expectation = next_check[0]
|
|
|
- print("Got %s, expect %s" % (output, expectation))
|
|
|
- assert check(output)
|
|
|
-
|
|
|
- try:
|
|
|
- next_check[0] = next(checks)
|
|
|
- return True
|
|
|
- except StopIteration:
|
|
|
- return False
|
|
|
-
|
|
|
- start_time = time.time()
|
|
|
- try:
|
|
|
- run_file(files, parameters, 'CO', handle_output, optimization_level)
|
|
|
- except ModelverseTerminated:
|
|
|
- return
|
|
|
- end_time = time.time()
|
|
|
- return end_time - start_time
|
|
|
-
|
|
|
-def format_output(output):
|
|
|
- """Formats the output of `run_file_to_completion` as a string."""
|
|
|
- return '\n'.join(output)
|
|
|
-
|
|
|
-def define_perf_test(target_class, test_function, optimization_level):
|
|
|
- """Defines a performance test in the given class. The performance test calls the given function
|
|
|
- at the given optimization level."""
|
|
|
- setattr(
|
|
|
- target_class,
|
|
|
- 'test_%s' % optimization_level.replace('-', '_').lower(),
|
|
|
- lambda self: test_function(self, optimization_level))
|
|
|
-
|
|
|
-def define_perf_tests(target_class, test_function, optimization_levels=None):
|
|
|
- """Defines performance tests in the given class. Each test calls the given function."""
|
|
|
- if optimization_levels is None:
|
|
|
- optimization_levels = ALL_OPTIMIZATION_LEVELS
|
|
|
- for opt_level in optimization_levels:
|
|
|
- define_perf_test(target_class, test_function, opt_level)
|
|
|
-
|
|
|
-def get_model_constructor(code):
|
|
|
- # First change multiple spaces to a tab
|
|
|
- code_fragments = code.split("\n")
|
|
|
- code_fragments = [i for i in code_fragments if i.strip() != ""]
|
|
|
- code_fragments = [i.replace(" ", "\t") for i in code_fragments]
|
|
|
- initial_tabs = min([len(i) - len(i.lstrip("\t")) for i in code_fragments])
|
|
|
- code_fragments = [i[initial_tabs:] for i in code_fragments]
|
|
|
- code = "\n".join(code_fragments)
|
|
|
-
|
|
|
- with open("__model.mvc", "w") as f:
|
|
|
- f.write(code)
|
|
|
- f.flush()
|
|
|
-
|
|
|
- constructors = do_compile("__model.mvc", "interface/HUTN/grammars/modelling.g", "M") + ["exit"]
|
|
|
-
|
|
|
- return constructors
|
|
|
-
|
|
|
-DEFAULT_PERF_FILE_NAME = 'perf_data.txt'
|
|
|
-
|
|
|
-TOTAL_TIME_QUANTITY = 'total-runtime'
|
|
|
-TEST_TIME_QUANTITY = 'test-runtime'
|
|
|
-
|
|
|
-def write_perf_entry_to_stream(
|
|
|
- test_name, optimization_level, quantity,
|
|
|
- result, output_stream):
|
|
|
- """Writes a performance measurement entry to the given stream."""
|
|
|
- output_stream.write('%s:%s:%s:%f\n' % (test_name, optimization_level, quantity, result))
|
|
|
-
|
|
|
-def write_perf_to_file(
|
|
|
- test_name, optimization_level, runtimes, file_name=DEFAULT_PERF_FILE_NAME):
|
|
|
- """Writes performance data to a file."""
|
|
|
- test_runtime, total_runtime = runtimes
|
|
|
- with open(file_name, "a") as perf_file:
|
|
|
- write_perf_entry_to_stream(
|
|
|
- test_name, optimization_level, TEST_TIME_QUANTITY, test_runtime, perf_file)
|
|
|
- write_perf_entry_to_stream(
|
|
|
- test_name, optimization_level, TOTAL_TIME_QUANTITY, total_runtime, perf_file)
|
|
|
-
|
|
|
-def write_total_runtime_to_file(
|
|
|
- test_name, optimization_level, total_runtime, file_name=DEFAULT_PERF_FILE_NAME):
|
|
|
- """Writes a total runtime entry to a file."""
|
|
|
- with open(file_name, "a") as perf_file:
|
|
|
- write_perf_entry_to_stream(
|
|
|
- test_name, optimization_level, TOTAL_TIME_QUANTITY, total_runtime, perf_file)
|
|
|
-
|
|
|
-def parse_perf_data(file_name):
|
|
|
- """Parses the performance data in the given file."""
|
|
|
- results = defaultdict(lambda: defaultdict(list))
|
|
|
- with open(file_name, 'r') as perf_file:
|
|
|
- for line in perf_file.readlines():
|
|
|
- test_name, optimization_level, quantity, result = line.strip().split(':')
|
|
|
- results[quantity][optimization_level].append((test_name, float(result)))
|
|
|
- return {
|
|
|
- quantity: sorted(result_dict.items(), key=operator.itemgetter(0))
|
|
|
- for quantity, result_dict in results.items()
|
|
|
- }
|