|
@@ -1,5 +1,7 @@
|
|
"""Converts performance data files (as produced by utils.py) to LaTeX charts."""
|
|
"""Converts performance data files (as produced by utils.py) to LaTeX charts."""
|
|
|
|
|
|
|
|
+import colorsys
|
|
|
|
+import operator
|
|
import utils
|
|
import utils
|
|
|
|
|
|
# Generated LaTeX is based on the accepted answer to
|
|
# Generated LaTeX is based on the accepted answer to
|
|
@@ -7,15 +9,11 @@ import utils
|
|
|
|
|
|
# pylint: disable=I0011,W0141
|
|
# pylint: disable=I0011,W0141
|
|
|
|
|
|
-LATEX_COLORS = [
|
|
|
|
- ('chartBlue', 0x4F81BD),
|
|
|
|
- ('chartRed', 0xC0504D),
|
|
|
|
- ('chartGreen', 0x9BBB59),
|
|
|
|
- ('chartPurple', 0x9F4C7C),
|
|
|
|
- ('chartDarkYellow', 0xCCCC00)
|
|
|
|
-]
|
|
|
|
|
|
+COLOR_SCHEME_MIN_COLOR = (36, 255, 106)
|
|
|
|
+COLOR_SCHEME_MAX_COLOR = (216, 33, 0)
|
|
|
|
|
|
LATEX_HEADER = r"""\documentclass[12pt,a4paper,onecolumn,openright]{report}
|
|
LATEX_HEADER = r"""\documentclass[12pt,a4paper,onecolumn,openright]{report}
|
|
|
|
+\usepackage[landscape]{geometry}
|
|
\usepackage{xcolor}
|
|
\usepackage{xcolor}
|
|
\usepackage{pgfplots}
|
|
\usepackage{pgfplots}
|
|
\usepackage{tikz}
|
|
\usepackage{tikz}
|
|
@@ -40,8 +38,8 @@ def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
|
|
"""Assembles a LaTeX chart from the given components."""
|
|
"""Assembles a LaTeX chart from the given components."""
|
|
lines = []
|
|
lines = []
|
|
lines.append(LATEX_HEADER)
|
|
lines.append(LATEX_HEADER)
|
|
- for color in color_defs:
|
|
|
|
- lines.append(r'\definecolor{%s}{HTML}{%X}' % color)
|
|
|
|
|
|
+ for color_name, (red, green, blue) in color_defs:
|
|
|
|
+ lines.append(r'\definecolor{%s}{HTML}{%02X%02X%02X}' % (color_name, red, green, blue))
|
|
lines.append(LATEX_DOCUMENT_HEADER)
|
|
lines.append(LATEX_DOCUMENT_HEADER)
|
|
lines.append(r"""
|
|
lines.append(r"""
|
|
\begin{axis}[
|
|
\begin{axis}[
|
|
@@ -81,23 +79,97 @@ def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
|
|
|
|
|
|
def create_latex_chart(perf_data):
|
|
def create_latex_chart(perf_data):
|
|
"""Creates a LaTeX chart for the given performance data."""
|
|
"""Creates a LaTeX chart for the given performance data."""
|
|
- unused_colors = LATEX_COLORS[:]
|
|
|
|
|
|
+ perf_data_dict = {opt_level: dict(tests) for opt_level, tests in perf_data}
|
|
|
|
+ sorted_opt_levels = sort_by_runtime(perf_data_dict)
|
|
|
|
+ color_scheme = generate_color_scheme(sorted_opt_levels)
|
|
opt_levels = []
|
|
opt_levels = []
|
|
color_defs = []
|
|
color_defs = []
|
|
test_names = []
|
|
test_names = []
|
|
data = []
|
|
data = []
|
|
- for optimization_level, measurements in perf_data:
|
|
|
|
- color = unused_colors.pop(0)
|
|
|
|
- color_name, _ = color
|
|
|
|
|
|
+ for i, optimization_level in enumerate(sorted_opt_levels):
|
|
|
|
+ measurements = perf_data_dict[optimization_level]
|
|
|
|
+ color = color_scheme[optimization_level]
|
|
|
|
+ color_name = 'chartColor%d' % i
|
|
opt_levels.append(optimization_level)
|
|
opt_levels.append(optimization_level)
|
|
- color_defs.append(color)
|
|
|
|
- data.append((color_name, measurements))
|
|
|
|
- for name, _ in measurements:
|
|
|
|
|
|
+ color_defs.append((color_name, color))
|
|
|
|
+ data.append((color_name, measurements.items()))
|
|
|
|
+ for name, _ in measurements.items():
|
|
if name not in test_names:
|
|
if name not in test_names:
|
|
test_names.append(name)
|
|
test_names.append(name)
|
|
|
|
|
|
return assemble_latex_chart(opt_levels, color_defs, test_names, data)
|
|
return assemble_latex_chart(opt_levels, color_defs, test_names, data)
|
|
|
|
|
|
|
|
+def get_mean_runtimes(perf_data):
|
|
|
|
+ """Computes the mean run-time of every optimization level in the given
|
|
|
|
+ performance data."""
|
|
|
|
+ return {
|
|
|
|
+ opt_level: utils.mean(perf_data[opt_level].values())
|
|
|
|
+ for opt_level in perf_data.keys()
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+def get_baseline_optimization_level(perf_data):
|
|
|
|
+ """Gets a baseline optimization level from the given performance data.
|
|
|
|
+ This baseline optimization level is guaranteed to be for every test case.
|
|
|
|
+ If no baseline optimization level can be found, then None is returned."""
|
|
|
|
+ # First find the name of all test cases.
|
|
|
|
+ all_test_names = set()
|
|
|
|
+ for optimization_level, measurements in perf_data.items():
|
|
|
|
+ all_test_names.update(measurements.keys())
|
|
|
|
+
|
|
|
|
+ # Filter optimization levels which are used for every test case.
|
|
|
|
+ candidate_opt_levels = []
|
|
|
|
+ for optimization_level, measurements in perf_data.items():
|
|
|
|
+ if len(all_test_names) == len(measurements):
|
|
|
|
+ candidate_opt_levels.append(optimization_level)
|
|
|
|
+
|
|
|
|
+ if len(candidate_opt_levels) == 0:
|
|
|
|
+ # Looks like there is no baseline optimization level.
|
|
|
|
+ return None
|
|
|
|
+
|
|
|
|
+ # Pick the optimization level with the lowest total run-time as the baseline.
|
|
|
|
+ return min(candidate_opt_levels, key=lambda opt_level: sum(perf_data[opt_level].values()))
|
|
|
|
+
|
|
|
|
+def get_relative_measurements(perf_data, baseline_optimization_level):
|
|
|
|
+ """Computes a map of measurements that are relative to the given optimization level."""
|
|
|
|
+ results = {}
|
|
|
|
+ for optimization_level, measurements in perf_data.items():
|
|
|
|
+ results[optimization_level] = {}
|
|
|
|
+ for test_name, data_point in measurements.items():
|
|
|
|
+ results[optimization_level][test_name] = (
|
|
|
|
+ data_point / perf_data[baseline_optimization_level][test_name])
|
|
|
|
+
|
|
|
|
+ return results
|
|
|
|
+
|
|
|
|
+def interpolate(value_range, index, length):
|
|
|
|
+ """Uses an index and a length to interpolate in the given range."""
|
|
|
|
+ min_val, max_val = value_range
|
|
|
|
+ return min_val + float(index) * (max_val - min_val) / float(length - 1)
|
|
|
|
+
|
|
|
|
+def sort_by_runtime(perf_data):
|
|
|
|
+ """Sorts the optimization levels by mean relative runtimes."""
|
|
|
|
+ baseline_opt_level = get_baseline_optimization_level(perf_data)
|
|
|
|
+ relative_perf = get_relative_measurements(perf_data, baseline_opt_level)
|
|
|
|
+ # Sort the optimization levels by their mean runtimes.
|
|
|
|
+ mean_runtimes = get_mean_runtimes(relative_perf)
|
|
|
|
+ return list(sorted(mean_runtimes.keys(), key=lambda opt_level: mean_runtimes[opt_level]))
|
|
|
|
+
|
|
|
|
+def generate_color_scheme(sorted_opt_levels):
|
|
|
|
+ """Assigns a color to every optimization level in the given performance data."""
|
|
|
|
+ # Assign colors to the optimization levels.
|
|
|
|
+ color_scheme = {}
|
|
|
|
+ min_hue, min_sat, min_val = colorsys.rgb_to_hsv(
|
|
|
|
+ *[c / float(255) for c in COLOR_SCHEME_MIN_COLOR])
|
|
|
|
+ max_hue, max_sat, max_val = colorsys.rgb_to_hsv(
|
|
|
|
+ *[c / float(255) for c in COLOR_SCHEME_MAX_COLOR])
|
|
|
|
+ for i, opt_level in enumerate(sorted_opt_levels):
|
|
|
|
+ hue = interpolate((min_hue, max_hue), i, len(sorted_opt_levels))
|
|
|
|
+ sat = interpolate((min_sat, max_sat), i, len(sorted_opt_levels))
|
|
|
|
+ val = interpolate((min_val, max_val), i, len(sorted_opt_levels))
|
|
|
|
+ color = [component * 255 for component in colorsys.hsv_to_rgb(hue, sat, val)]
|
|
|
|
+ color_scheme[opt_level] = color
|
|
|
|
+
|
|
|
|
+ return color_scheme
|
|
|
|
+
|
|
if __name__ == '__main__':
|
|
if __name__ == '__main__':
|
|
print(
|
|
print(
|
|
create_latex_chart(
|
|
create_latex_chart(
|