|
@@ -1,6 +1,7 @@
|
|
|
import unittest
|
|
|
import sys
|
|
|
import os
|
|
|
+import tempfile
|
|
|
|
|
|
import sys
|
|
|
import time
|
|
@@ -184,7 +185,7 @@ def compile_files(address, process, files, mode):
|
|
|
if val != 0:
|
|
|
raise Exception("Linking error")
|
|
|
|
|
|
-def run_file(files, parameters, mode, handle_output, optimization_level=None):
|
|
|
+def run_file(files, parameters, mode, handle_output, optimization_level=None, jit_timing_log=None):
|
|
|
"""Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
and handles their output."""
|
|
|
# Resolve file
|
|
@@ -198,6 +199,9 @@ def run_file(files, parameters, mode, handle_output, optimization_level=None):
|
|
|
modelverse_args = [str(port)]
|
|
|
if optimization_level is not None:
|
|
|
modelverse_args.append('--kernel=%s' % optimization_level)
|
|
|
+ if jit_timing_log is not None:
|
|
|
+ modelverse_args.append('--jit-timing-log=%s' % jit_timing_log)
|
|
|
+
|
|
|
proc = execute("run_local_modelverse", modelverse_args, wait=False)
|
|
|
|
|
|
# Compile, push and link the source code files.
|
|
@@ -246,7 +250,8 @@ def run_file_to_completion(files, parameters, mode):
|
|
|
except ModelverseTerminated:
|
|
|
return results
|
|
|
|
|
|
-def run_file_fixed_output_count(files, parameters, mode, output_count, optimization_level=None):
|
|
|
+def run_file_fixed_output_count(
|
|
|
+ files, parameters, mode, output_count, optimization_level=None, jit_timing_log=None):
|
|
|
"""Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
and then collects and returns a fixed number of outputs."""
|
|
|
results = []
|
|
@@ -258,36 +263,56 @@ def run_file_fixed_output_count(files, parameters, mode, output_count, optimizat
|
|
|
else:
|
|
|
return False
|
|
|
|
|
|
- run_file(files, parameters, mode, handle_output, optimization_level)
|
|
|
+ run_file(files, parameters, mode, handle_output, optimization_level, jit_timing_log)
|
|
|
return results
|
|
|
|
|
|
-def run_file_single_output(files, parameters, mode, optimization_level=None):
|
|
|
+def run_file_single_output(files, parameters, mode, optimization_level=None, jit_timing_log=None):
|
|
|
"""Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
and then collects and returns a single output."""
|
|
|
- return run_file_fixed_output_count(files, parameters, mode, 1, optimization_level)[0]
|
|
|
+ return run_file_fixed_output_count(
|
|
|
+ files, parameters, mode, 1, optimization_level, jit_timing_log)[0]
|
|
|
|
|
|
def mean(values):
|
|
|
"""Computes the arithmetic mean of the given values."""
|
|
|
return float(sum(values)) / max(len(values), 1)
|
|
|
|
|
|
+def parse_jit_timing_log(log_file):
|
|
|
+ """Parses the JIT timing log entries from the given file."""
|
|
|
+ results = []
|
|
|
+ for line in log_file.readlines():
|
|
|
+ first, _, data = line.strip().rpartition(':')
|
|
|
+ _, _, name = first.strip().rpartition(' ')
|
|
|
+ results.append((name, float(data)))
|
|
|
+ return results
|
|
|
+
|
|
|
def run_perf_test(files, parameters, optimization_level, n_iterations=1):
|
|
|
"""Compiles the given sequence of files, feeds them the given input in the given mode,
|
|
|
and then collects their output. This process is repeated n_iterations times. The
|
|
|
return value is the average of all outputs, along with the mean total run-time."""
|
|
|
test_runtimes = []
|
|
|
total_runtimes = []
|
|
|
+ compile_times = []
|
|
|
for _ in xrange(n_iterations):
|
|
|
- start_time = time.time()
|
|
|
- test_time = run_file_single_output(
|
|
|
- files, parameters, 'CO',
|
|
|
- optimization_level)
|
|
|
- end_time = time.time()
|
|
|
- total_time = end_time - start_time
|
|
|
- test_runtimes.append(test_time)
|
|
|
- total_runtimes.append(total_time)
|
|
|
+ try:
|
|
|
+ timing_log = tempfile.mktemp()
|
|
|
+ start_time = time.time()
|
|
|
+ test_time = run_file_single_output(
|
|
|
+ files, parameters, 'CO',
|
|
|
+ optimization_level, timing_log)
|
|
|
+ end_time = time.time()
|
|
|
+ total_time = end_time - start_time
|
|
|
+ test_runtimes.append(test_time)
|
|
|
+ total_runtimes.append(total_time)
|
|
|
+ with open(timing_log, 'r') as log_file:
|
|
|
+ parsed_times = parse_jit_timing_log(log_file)
|
|
|
+
|
|
|
+ compile_times.append(sum([data for _, data in parsed_times]))
|
|
|
+ finally:
|
|
|
+ os.remove(timing_log)
|
|
|
return {
|
|
|
TEST_TIME_QUANTITY: mean(test_runtimes),
|
|
|
- TOTAL_TIME_QUANTITY: mean(total_runtimes)
|
|
|
+ TOTAL_TIME_QUANTITY: mean(total_runtimes),
|
|
|
+ COMPILE_TIME_QUANTITY: mean(compile_times)
|
|
|
}
|
|
|
|
|
|
def get_expectation_checks(expected_values):
|
|
@@ -376,6 +401,7 @@ DEFAULT_PERF_FILE_NAME = 'perf_data.txt'
|
|
|
|
|
|
TOTAL_TIME_QUANTITY = 'total-runtime'
|
|
|
TEST_TIME_QUANTITY = 'test-runtime'
|
|
|
+COMPILE_TIME_QUANTITY = 'compile-time'
|
|
|
|
|
|
def write_perf_entry_to_stream(
|
|
|
test_name, optimization_level, quantity,
|